code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import uuid
import abc
from .lammps_particles import LammpsParticles
class ABCDataManager(object):
""" Class managing Lammps data information
The class performs communicating the data to and from lammps. The
class manages data existing in Lammps and allows this data to be
queried and to be changed.
Class maintains and provides LammpsParticles (which implements
the ABCParticles class). The queries and changes to LammpsParticles
occurs through the many abstract methods in this class. See subclasses
to understand how the communication occurs.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
# map from name to unique name
self._unames = {}
# map from unique name to names
self._names = {}
# dictionary of lammps_particle
# where the the key is the unique name
self._lpcs = {}
def get_name(self, uname):
"""
Get the name of a particle container
Parameters
----------
uname : string
unique name of particle container
Returns
-------
string
name of particle container
"""
return self._names[uname]
def rename(self, uname, new_name):
""" Rename a particle container
Parameters
---------
uname :
unique name of particle container to be renamed
new_name :
new name of the particle container
"""
del self._unames[self._names[uname]]
self._unames[new_name] = uname
self._names[uname] = new_name
def __iter__(self):
""" Iter over names of particle containers
"""
for name in self._unames:
yield name
def __contains__(self, name):
""" Checks if particle container with this name exists
"""
return name in self._unames
def __getitem__(self, name):
""" Returns particle container with this name
"""
return self._lpcs[self._unames[name]]
def __delitem__(self, name):
"""Deletes lammps particle container and associated cache
"""
self._handle_delete_particles(self._unames[name])
del self._lpcs[self._unames[name]]
del self._unames[name]
def new_particles(self, particles):
"""Add new particle container to this manager.
Parameters
----------
particles : ABCParticles
particle container to be added
Returns
-------
LammpsParticles
"""
# generate a unique name for this particle container
# that will not change over the lifetime of the wrapper.
uname = uuid.uuid4()
self._unames[particles.name] = uname
self._names[uname] = particles.name
lammps_pc = LammpsParticles(self, uname)
self._lpcs[uname] = lammps_pc
self._handle_new_particles(uname, particles)
return lammps_pc
@abc.abstractmethod
def _handle_delete_particles(self, uname):
"""Handle when a Particles is deleted
Parameters
----------
uname : string
non-changing unique name of particles to be deleted
"""
@abc.abstractmethod
def _handle_new_particles(self, uname, particles):
"""Handle when new particles are added
Parameters
----------
uname : string
non-changing unique name associated with particles to be added
particles : ABCParticles
particle container to be added
"""
@abc.abstractmethod
def get_data(self, uname):
"""Returns data container associated with particle container
Parameters
----------
uname : string
non-changing unique name of particles
"""
@abc.abstractmethod
def set_data(self, data, uname):
"""Sets data container associated with particle container
Parameters
----------
uname : string
non-changing unique name of particles
"""
@abc.abstractmethod
def get_particle(self, uid, uname):
"""Get particle
Parameters
----------
uid :
uid of particle
uname : string
non-changing unique name of particles
"""
@abc.abstractmethod
def update_particles(self, iterable, uname):
"""Update particle
Parameters
----------
iterable : iterable of Particle objects
the particles that will be updated.
uname : string
non-changing unique name of particles
Raises
------
ValueError :
If any particle inside the iterable does not exist.
"""
@abc.abstractmethod
def add_particles(self, iterable, uname):
"""Add particles
Parameters
----------
iterable : iterable of Particle objects
the particles that will be added.
uname : string
non-changing unique name of particles
ValueError :
when there is a particle with an uids that already exists
in the container.
"""
@abc.abstractmethod
def remove_particle(self, uid, uname):
"""Remove particle
Parameters
----------
uid :
uid of particle
uname : string
non-changing unique name of particles
"""
@abc.abstractmethod
def has_particle(self, uid, uname):
"""Has particle
Parameters
----------
uid :
uid of particle
uname : string
name of particle container
"""
@abc.abstractmethod
def iter_particles(self, uname, uids=None):
"""Iterate over the particles of a certain type
Parameters
----------
uids : list of particle uids
sequence of uids of particles that should be iterated over. If
uids is None then all particles will be iterated over.
uname : string
non-changing unique name of particles
"""
@abc.abstractmethod
def number_of_particles(self, uname):
"""Get number of particles in a container
Parameters
----------
uname : string
non-changing unique name of particles
"""
@abc.abstractmethod
def flush(self, input_data_filename=None):
"""flush to file
Parameters
----------
input_data_filename : string, optional
name of data-file where inform is written to (i.e lammps's input).
"""
@abc.abstractmethod
def read(self, output_data_filename=None):
"""read from file
Parameters
----------
output_data_filename : string, optional
name of data-file where info read from (i.e lammps's output).
"""
| simphony/simphony-lammps-md | simlammps/abc_data_manager.py | Python | bsd-2-clause | 6,973 |
def main(request, response):
response.headers.set("Content-Type", "text/html")
response.headers.set("Custom", "\0")
return "<!doctype html><b>This is a document.</b>"
| UK992/servo | tests/wpt/web-platform-tests/fetch/h1-parsing/resources/document-with-0x00-in-header.py | Python | mpl-2.0 | 179 |
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
"""
A bouncer that combines other bouncers.
"""
from twisted.internet import defer
from twisted.python import util
from flumotion.common import keycards, watched
from flumotion.common import messages, errors, documentation
from flumotion.common.i18n import N_, gettexter
from flumotion.component.bouncers import base, component, combinator
T_ = gettexter()
class MultiBouncer(component.Bouncer):
logCategory = 'multibouncer'
# FIXME random classes for now, they're going away anyway
keycardClasses = (keycards.KeycardHTTPGetArguments,
keycards.KeycardGeneric, keycards.KeycardUACPCC,
keycards.KeycardUACPP, keycards.KeycardToken)
def init(self):
self.watchable_keycards = watched.WatchedDict() # keycard id -> Keycard
self.contexts = {} # keycard id -> {algorithm name -> bool result}
self.algorithms = util.OrderedDict() # name -> algorithm instance
self.combinator = None
def do_setup(self):
def add_entry(entry, algorithm):
name = entry['type']
if name in self.algorithms:
suffix = 1
while ('%s-%d' % (name, suffix)) in self.algorithms:
suffix += 1
name = '%s-%d' % (name, suffix)
assert name not in self.algorithms
self.algorithms[name] = algorithm
return name
# get all algorithm plugs this component has, put them into
# self.algorithms with unique names
entries = self.config['plugs'].get(base.BOUNCER_ALGORITHM_SOCKET, [])
algorithms = self.plugs.get(base.BOUNCER_ALGORITHM_SOCKET, [])
# check if there's at least one algorithm plug in a separate method, so
# subclasses can override it
self.check_algorithms(algorithms)
for entry, algorithm in zip(entries, algorithms):
# add the algorithm to the algorithms dictionary
name = add_entry(entry, algorithm)
# provide the algorithm with the keycard store
algorithm.set_keycard_store(self.watchable_keycards)
# provide the algorithm with an expiry function crafted especially
# for it (containing its unique name)
expire = lambda ids: self.algorithm_expire_keycard_ids(ids, name)
algorithm.set_expire_function(expire)
# we don't have any algorithms, stop here (see StaticMultiBouncer to
# see why we need this)
if not self.algorithms:
return
self.debug("configured with algorithms %r", self.algorithms.keys())
# create the algorithm combinator
props = self.config['properties']
self.combinator = combinator.AlgorithmCombinator(self.algorithms)
if 'combination' in props and combinator.pyparsing is None:
m = messages.Error(T_(N_(
"To use the 'combination' property you need to "
"have the 'pyparsing' module installed.\n")),
mid='missing-pyparsing')
documentation.messageAddPythonInstall(m, 'pyparsing')
self.addMessage(m)
raise errors.ComponentSetupHandledError()
# get the combination specification, defaulting to implicit AND
spec = props.get('combination', ' and '.join(self.algorithms.keys()))
self.debug("using combination %s", spec)
try:
self.combinator.create_combination(spec)
except combinator.ParseException, e:
m = messages.Error(T_(N_(
"Invalid algorithms combination: %s"), str(e)),
mid='wrong-combination')
self.addMessage(m)
raise errors.ComponentSetupHandledError()
def check_algorithms(self, algorithms):
if not algorithms:
m = messages.Error(T_(N_(
"The multibouncer requires at least one bouncer "
"algorithm plug to be present")), mid='no-algorithm')
self.addMessage(m)
raise errors.ComponentSetupHandledError()
def do_authenticate(self, keycard):
# create a context for this request
context = {}
# ask the combinator for an answer
d = self.combinator.evaluate(keycard, context)
def authenticated(res, keycard):
# the answer is True/False
if not res:
# False, return None as per the bouncer protocol
return None
if hasattr(keycard, 'ttl') and keycard.ttl <= 0:
# keycard was invalid on input
self.log('immediately expiring keycard %r', keycard)
return None
if self.addKeycard(keycard):
# keycard added, set state to AUTHENTICATED, keep the context,
# return to caller
keycard.state = keycards.AUTHENTICATED
self.contexts[keycard.id] = context
self.watchable_keycards[keycard.id] = keycard
return keycard
return d.addCallback(authenticated, keycard)
def on_keycardRemoved(self, keycard):
# clear our references to the keycard
del self.contexts[keycard.id]
del self.watchable_keycards[keycard.id]
def algorithm_expire_keycard_ids(self, keycard_ids, name):
# this gets called by a particular algorithm when it wants to expire a
# keycard
to_expire = []
self.debug("algorithm %r requested expiration of keycards %r",
name, keycard_ids)
for keycard_id in keycard_ids:
# change the result in the context
context = self.contexts[keycard_id]
context[name] = False
# Reevaluate in the combinator. Because we already got an answer
# for that context, it should contain all necesary info, so we
# never should call any algorithm method: just do synchronous
# evaluation.
if not self.combinator.synchronous_evaluate(context):
self.log("keycard with id %r will be expired", keycard_id)
to_expire.append(keycard_id)
return self.expireKeycardIds(to_expire)
class StaticMultiBouncer(MultiBouncer):
"""A multibouncer that has a static list of bouncer algorithm plugs"""
algorithmClasses = None
def get_main_algorithm(self):
for algorithm in self.algorithms.itervalues():
return algorithm
def setMedium(self, medium):
MultiBouncer.setMedium(self, medium)
for algorithm in self.algorithms.itervalues():
self._export_plug_interface(algorithm, medium)
def do_setup(self):
if self.algorithmClasses is None:
raise NotImplementedError("Subclass did not choose algorithm")
def start_algorithm(d, algorithm, name):
self.algorithms[name] = algorithm
d.addCallback(lambda _: defer.maybeDeferred(algorithm.start, self))
d.addCallback(algorithm_started, algorithm, name)
def algorithm_started(_, algorithm, name):
algorithm.set_keycard_store(self.watchable_keycards)
expire = lambda ids: self.algorithm_expire_keycard_ids(ids, name)
algorithm.set_expire_function(expire)
try:
klasses = iter(self.algorithmClasses)
except TypeError:
klasses = iter((self.algorithmClasses, ))
d = defer.Deferred()
for klass in klasses:
name = klass.__name__
algorithm = klass({'properties': self.config['properties']})
start_algorithm(d, algorithm, name)
def create_combinator(_):
self.combinator = combinator.AlgorithmCombinator(self.algorithms)
spec = ' and '.join(self.algorithms.keys())
self.combinator.create_combination(spec)
d.addCallback(create_combinator)
d.callback(None)
return d
def check_algorithms(self, algorithms):
pass
def do_stop(self):
d = defer.Deferred()
for algorithm in self.algorithms.values():
d.addCallback(lambda _: defer.maybeDeferred(algorithm.stop, self))
d.callback(None)
return d
| flumotion-mirror/flumotion | flumotion/component/bouncers/multibouncer.py | Python | lgpl-2.1 | 8,965 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
import mock
import unittest
import google.cloud.monitoring_v3
import google.auth
import google.datalab
import google.datalab.stackdriver.monitoring as gcm
DEFAULT_PROJECT = 'test'
PROJECT = 'my-project'
METRIC_TYPES = ['compute.googleapis.com/instances/cpu/utilization',
'compute.googleapis.com/instances/cpu/usage_time']
DISPLAY_NAMES = ['CPU Utilization', 'CPU Usage']
METRIC_KIND = 'GAUGE'
VALUE_TYPE = 'DOUBLE'
UNIT = '1'
LABELS = [dict(key='instance_name', value_type='STRING',
description='VM instance'),
dict(key='device_name', value_type='STRING',
description='Device name')]
FILTER_STRING = 'metric.type:"cpu"'
TYPE_PREFIX = 'compute'
class TestCases(unittest.TestCase):
def setUp(self):
self.context = self._create_context(DEFAULT_PROJECT)
self.descriptors = gcm.MetricDescriptors(context=self.context)
@mock.patch('google.datalab.Context.default')
def test_constructor_minimal(self, mock_context_default):
mock_context_default.return_value = self.context
descriptors = gcm.MetricDescriptors()
self.assertEqual(descriptors._client.project, DEFAULT_PROJECT)
self.assertIsNone(descriptors._filter_string)
self.assertIsNone(descriptors._type_prefix)
self.assertIsNone(descriptors._descriptors)
def test_constructor_maximal(self):
context = self._create_context(PROJECT)
descriptors = gcm.MetricDescriptors(
filter_string=FILTER_STRING, type_prefix=TYPE_PREFIX,
context=context)
self.assertEqual(descriptors._client.project, PROJECT)
self.assertEqual(descriptors._filter_string, FILTER_STRING)
self.assertEqual(descriptors._type_prefix, TYPE_PREFIX)
self.assertIsNone(descriptors._descriptors)
@mock.patch('google.cloud.monitoring_v3.MetricServiceClient.list_metric_descriptors')
def test_list(self, mock_gcloud_list_descriptors):
mock_gcloud_list_descriptors.return_value = self._list_metrics_get_result(
context=self.context)
metric_descriptor_list = self.descriptors.list()
mock_gcloud_list_descriptors.assert_called_once_with(
DEFAULT_PROJECT, filter_='')
self.assertEqual(len(metric_descriptor_list), 2)
self.assertEqual(metric_descriptor_list[0].type, METRIC_TYPES[0])
self.assertEqual(metric_descriptor_list[1].type, METRIC_TYPES[1])
@mock.patch('google.cloud.monitoring_v3.MetricServiceClient.list_metric_descriptors')
def test_list_w_api_filter(self, mock_gcloud_list_descriptors):
mock_gcloud_list_descriptors.return_value = self._list_metrics_get_result(
context=self.context)
descriptors = gcm.MetricDescriptors(
filter_string=FILTER_STRING, type_prefix=TYPE_PREFIX,
context=self.context)
metric_descriptor_list = descriptors.list()
expected_filter = '{} AND metric.type = starts_with("{}")'.format(
FILTER_STRING, TYPE_PREFIX)
mock_gcloud_list_descriptors.assert_called_once_with(
DEFAULT_PROJECT, filter_=expected_filter)
self.assertEqual(len(metric_descriptor_list), 2)
self.assertEqual(metric_descriptor_list[0].type, METRIC_TYPES[0])
self.assertEqual(metric_descriptor_list[1].type, METRIC_TYPES[1])
@mock.patch('google.cloud.monitoring_v3.MetricServiceClient.list_metric_descriptors')
def test_list_w_pattern_match(self, mock_gcloud_list_descriptors):
mock_gcloud_list_descriptors.return_value = self._list_metrics_get_result(
context=self.context)
metric_descriptor_list = self.descriptors.list(pattern='*usage_time')
mock_gcloud_list_descriptors.assert_called_once_with(
DEFAULT_PROJECT, filter_='')
self.assertEqual(len(metric_descriptor_list), 1)
self.assertEqual(metric_descriptor_list[0].type, METRIC_TYPES[1])
@mock.patch('google.cloud.monitoring_v3.MetricServiceClient.list_metric_descriptors')
def test_list_caching(self, mock_gcloud_list_descriptors):
mock_gcloud_list_descriptors.return_value = self._list_metrics_get_result(
context=self.context)
actual_list1 = self.descriptors.list()
actual_list2 = self.descriptors.list()
mock_gcloud_list_descriptors.assert_called_once_with(
DEFAULT_PROJECT, filter_='')
self.assertEqual(actual_list1, actual_list2)
@mock.patch('google.datalab.stackdriver.monitoring.MetricDescriptors.list')
def test_as_dataframe(self, mock_datalab_list_descriptors):
mock_datalab_list_descriptors.return_value = self._list_metrics_get_result(
context=self.context)
dataframe = self.descriptors.as_dataframe()
mock_datalab_list_descriptors.assert_called_once_with('*')
expected_headers = list(gcm.MetricDescriptors._DISPLAY_HEADERS)
self.assertEqual(dataframe.columns.tolist(), expected_headers)
self.assertEqual(dataframe.columns.names, [None])
self.assertEqual(dataframe.index.tolist(), list(range(len(METRIC_TYPES))))
self.assertEqual(dataframe.index.names, [None])
expected_labels = 'instance_name, device_name'
expected_values = [
[metric_type, display_name, METRIC_KIND, VALUE_TYPE, UNIT,
expected_labels]
for metric_type, display_name in zip(METRIC_TYPES, DISPLAY_NAMES)]
self.assertEqual(dataframe.values.tolist(), expected_values)
@mock.patch('google.datalab.stackdriver.monitoring.MetricDescriptors.list')
def test_as_dataframe_w_all_args(self, mock_datalab_list_descriptors):
mock_datalab_list_descriptors.return_value = self._list_metrics_get_result(
context=self.context)
dataframe = self.descriptors.as_dataframe(pattern='*cpu*', max_rows=1)
mock_datalab_list_descriptors.assert_called_once_with('*cpu*')
expected_headers = list(gcm.MetricDescriptors._DISPLAY_HEADERS)
self.assertEqual(dataframe.columns.tolist(), expected_headers)
self.assertEqual(dataframe.index.tolist(), [0])
self.assertEqual(dataframe.iloc[0, 0], METRIC_TYPES[0])
@staticmethod
def _create_context(project_id):
creds = mock.Mock(spec=google.auth.credentials.Credentials)
return google.datalab.Context(project_id, creds)
@staticmethod
def _list_metrics_get_result(context):
all_labels = [google.cloud.monitoring_v3.types.LabelDescriptor(**labels)
for labels in LABELS]
descriptors = [
google.cloud.monitoring_v3.types.MetricDescriptor(
type=metric_type, metric_kind=METRIC_KIND, value_type=VALUE_TYPE,
unit=UNIT, display_name=display_name, labels=all_labels,
)
for metric_type, display_name in zip(METRIC_TYPES, DISPLAY_NAMES)]
return descriptors
| googledatalab/pydatalab | tests/stackdriver/monitoring/metric_tests.py | Python | apache-2.0 | 7,203 |
# Setup file for package macro_pie
from setuptools import setup
setup(name="macro_pie",
version="0.0.1",
install_requires=["quark==0.0.1"],
py_modules=['macro_pie'],
packages=['macro_pie', 'macro_pie_md'])
| bozzzzo/quark | quarkc/test/emit/expected/py/macro_pie/setup.py | Python | apache-2.0 | 232 |
# Generated by Django 2.0.8 on 2018-10-28 04:50
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Treedj',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('statusid', models.BigIntegerField(unique=True)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='conversation.Treedj')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Tweetdj',
fields=[
('statusid', models.BigIntegerField(primary_key=True, serialize=False, unique=True)),
('userid', models.BigIntegerField()),
('json', django.contrib.postgres.fields.jsonb.JSONField()),
('created_at', models.DateTimeField()),
('reply', models.PositiveIntegerField()),
('like', models.PositiveIntegerField()),
('retweet', models.PositiveIntegerField()),
('parentid', models.BigIntegerField(null=True)),
],
),
]
| jeromecc/doctoctocbot | src/conversation/migrations/0001_initial.py | Python | mpl-2.0 | 1,819 |
from classes.LinkedList import *
# Iterative approch
def isPalindrome_iter(linkedlist):
if linkedlist.head == None:
return None
fast = linkedlist.head
slow = linkedlist.head
firsthalf = []
while fast != None and fast.next != None:
firsthalf.append(slow.value)
slow = slow.next
fast = fast.next.next
if fast != None:
slow = slow.next
while slow != None:
if firsthalf.pop() != slow.value:
return False
else:
slow = slow.next
return True
# Recursive approch
def isPalindrome_recu(linkedlist):
length = lengthOfLinkedlist(linkedlist)
current = linkedlist.head
result = isPalindrome_recu_helper(current, length)
return result[1]
def isPalindrome_recu_helper(current, length):
if current == None:
return [None, True]
elif length == 1:
return [current.next, True]
elif length == 2:
return [current.next.next, current.value == current.next.value]
# result is a python list stores two variables
result = isPalindrome_recu_helper(current.next, length - 2)
if (result[0] == None) or (not result[1]):
return result
else:
result[1] = current.value == result[0].value
result[0] = result[0].next
return result
def lengthOfLinkedlist(linkedlist):
length = 0
current = linkedlist.head
while current != None:
length += 1
current = current.next
return length
# -------------------test------------------
L1 = randomLinkedList(3, 3, 4)
print "L2:", L1
print "isPalindrome_iter: ", isPalindrome_iter(L1)
print "isPalindrome_recu: ", isPalindrome_recu(L1)
L2 = LinkedList()
for i in range(1,4):
L2.addNode(i)
for i in range(3, 0, -1):
L2.addNode(i)
print "L3:", L2
print "isPalindrome_iter: ", isPalindrome_iter(L2)
print "isPalindrome_recu: ", isPalindrome_recu(L2)
# Another method: reverse the list and check if they are the same
def isPalindrome(L1):
reverseL1 = reverseList(L1)
return isEqual(L1, reverseL1)
def reverseList(L1):
reverseL1 = LinkedList()
current = L1.head
while current != None:
reverseL1.addNode(current.value)
current = current.next
return reverseL1
def isEqual(L1,L2):
curr1 = L1.head
curr2 = L2.head
while curr1 != None and curr2 != None:
if curr1.value != curr2.value:
return False
curr1 = curr1.next
curr2 = curr2.next
if curr1 != None or curr2 != None:
return False
else:
return True
for i in range(27):
L1 = randomLinkedList(3, 3, 5)
print L1
print isPalindrome(L1)
| aattaran/Machine-Learning-with-Python | CTCI/Chapter 2/Question2_7.py | Python | bsd-3-clause | 2,591 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import SimpleTestCase
from django.utils import translation
from ...utils import setup
class GetLanguageInfoListTests(SimpleTestCase):
libraries = {
'custom': 'template_tests.templatetags.custom',
'i18n': 'django.templatetags.i18n',
}
@setup({'i18n30': '{% load i18n %}'
'{% get_language_info_list for langcodes as langs %}'
'{% for l in langs %}{{ l.code }}: {{ l.name }}/'
'{{ l.name_local }} bidi={{ l.bidi }}; {% endfor %}'})
def test_i18n30(self):
output = self.engine.render_to_string('i18n30', {'langcodes': ['it', 'no']})
self.assertEqual(output, 'it: Italian/italiano bidi=False; no: Norwegian/norsk bidi=False; ')
@setup({'i18n31': '{% load i18n %}'
'{% get_language_info_list for langcodes as langs %}'
'{% for l in langs %}{{ l.code }}: {{ l.name }}/'
'{{ l.name_local }} bidi={{ l.bidi }}; {% endfor %}'})
def test_i18n31(self):
output = self.engine.render_to_string('i18n31', {'langcodes': (('sl', 'Slovenian'), ('fa', 'Persian'))})
self.assertEqual(
output,
'sl: Slovenian/Sloven\u0161\u010dina bidi=False; '
'fa: Persian/\u0641\u0627\u0631\u0633\u06cc bidi=True; '
)
@setup({'i18n38_2': '{% load i18n custom %}'
'{% get_language_info_list for langcodes|noop:"x y" as langs %}'
'{% for l in langs %}{{ l.code }}: {{ l.name }}/'
'{{ l.name_local }}/{{ l.name_translated }} '
'bidi={{ l.bidi }}; {% endfor %}'})
def test_i18n38_2(self):
with translation.override('cs'):
output = self.engine.render_to_string('i18n38_2', {'langcodes': ['it', 'fr']})
self.assertEqual(
output,
'it: Italian/italiano/italsky bidi=False; '
'fr: French/français/francouzsky bidi=False; '
)
| kawamon/hue | desktop/core/ext-py/Django-1.11.29/tests/template_tests/syntax_tests/i18n/test_get_language_info_list.py | Python | apache-2.0 | 2,093 |
"""Regresssion tests for urllib"""
import collections
import urllib
import httplib
import io
import unittest
import os
import sys
import mimetools
import tempfile
from test import test_support
from base64 import b64encode
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
def fakehttp(fakedata):
class FakeSocket(io.BytesIO):
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
return self
def read(self, amt=None):
if self.closed:
return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return b""
return io.BytesIO.readline(self, length)
class FakeHTTPConnection(httplib.HTTPConnection):
# buffer to store data for verification in urlopen tests.
buf = ""
def connect(self):
self.sock = FakeSocket(self.fakedata)
self.__class__.fakesock = self.sock
FakeHTTPConnection.fakedata = fakedata
return FakeHTTPConnection
class FakeHTTPMixin(object):
def fakehttp(self, fakedata):
assert httplib.HTTP._connection_class == httplib.HTTPConnection
httplib.HTTP._connection_class = fakehttp(fakedata)
def unfakehttp(self):
httplib.HTTP._connection_class = httplib.HTTPConnection
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
"""Setup of a temp file to use for testing"""
self.text = "test_urllib: %s\n" % self.__class__.__name__
FILE = file(test_support.TESTFN, 'wb')
try:
FILE.write(self.text)
finally:
FILE.close()
self.pathname = test_support.TESTFN
self.returned_obj = urllib.urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(test_support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual('', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it hear and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), mimetools.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertEqual(self.returned_obj.getcode(), None)
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison
for line in self.returned_obj.__iter__():
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
self.assertRaises(ValueError,urllib.urlopen,'./' + self.pathname)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = test_support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in os.environ.keys():
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies with space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234')
self.assertTrue(urllib.proxy_bypass_environment('anotherdomain.com'))
self.assertTrue(urllib.proxy_bypass_environment('anotherdomain.com:8888'))
self.assertTrue(urllib.proxy_bypass_environment('newdomain.com:1234'))
def test_proxy_bypass_environment_host_match(self):
bypass = urllib.proxy_bypass_environment
self.env.set('NO_PROXY',
'localhost, anotherdomain.com, newdomain.com:1234')
self.assertTrue(bypass('localhost'))
self.assertTrue(bypass('LocalHost')) # MixedCase
self.assertTrue(bypass('LOCALHOST')) # UPPERCASE
self.assertTrue(bypass('newdomain.com:1234'))
self.assertTrue(bypass('anotherdomain.com:8888'))
self.assertTrue(bypass('www.newdomain.com:1234'))
self.assertFalse(bypass('prelocalhost'))
self.assertFalse(bypass('newdomain.com')) # no port
self.assertFalse(bypass('newdomain.com:1235')) # wrong port
class ProxyTests_withOrderedEnv(unittest.TestCase):
def setUp(self):
# We need to test conditions, where variable order _is_ significant
self._saved_env = os.environ
# Monkey patch os.environ, start with empty fake environment
os.environ = collections.OrderedDict()
def tearDown(self):
os.environ = self._saved_env
def test_getproxies_environment_prefer_lowercase(self):
# Test lowercase preference with removal
os.environ['no_proxy'] = ''
os.environ['No_Proxy'] = 'localhost'
self.assertFalse(urllib.proxy_bypass_environment('localhost'))
self.assertFalse(urllib.proxy_bypass_environment('arbitrary'))
os.environ['http_proxy'] = ''
os.environ['HTTP_PROXY'] = 'http://somewhere:3128'
proxies = urllib.getproxies_environment()
self.assertEqual({}, proxies)
# Test lowercase preference of proxy bypass and correct matching including ports
os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234'
os.environ['No_Proxy'] = 'xyz.com'
self.assertTrue(urllib.proxy_bypass_environment('localhost'))
self.assertTrue(urllib.proxy_bypass_environment('noproxy.com:5678'))
self.assertTrue(urllib.proxy_bypass_environment('my.proxy:1234'))
self.assertFalse(urllib.proxy_bypass_environment('my.proxy'))
self.assertFalse(urllib.proxy_bypass_environment('arbitrary'))
# Test lowercase preference with replacement
os.environ['http_proxy'] = 'http://somewhere:3128'
os.environ['Http_Proxy'] = 'http://somewhereelse:3128'
proxies = urllib.getproxies_environment()
self.assertEqual('http://somewhere:3128', proxies['http'])
class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urlopen() opening a fake http connection."""
def test_read(self):
self.fakehttp('Hello!')
try:
fp = urllib.urlopen("http://python.org/")
self.assertEqual(fp.readline(), 'Hello!')
self.assertEqual(fp.readline(), '')
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp('Hello!')
try:
fp = urllib.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp('''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(IOError, urllib.urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp("""HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file:README
Connection: close
Content-Type: text/html; charset=iso-8859-1
""")
try:
msg = "Redirection to url 'file:"
with self.assertRaisesRegexp(IOError, msg):
urllib.urlopen("http://python.org/")
finally:
self.unfakehttp()
def test_redirect_limit_independent(self):
# Ticket #12923: make sure independent requests each use their
# own retry limit.
for i in range(urllib.FancyURLopener().maxtries):
self.fakehttp(b'''HTTP/1.1 302 Found
Location: file://guidocomputer.athome.com:/python/license
Connection: close
''')
try:
self.assertRaises(IOError, urllib.urlopen,
"http://something")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises IOError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp('')
try:
self.assertRaises(IOError, urllib.urlopen, 'http://something')
finally:
self.unfakehttp()
def test_missing_localfile(self):
self.assertRaises(IOError, urllib.urlopen,
'file://localhost/a/missing/file.py')
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
self.assertTrue(os.path.exists(tmp_file))
try:
fp = urllib.urlopen(tmp_fileurl)
fp.close()
finally:
os.close(fd)
os.unlink(tmp_file)
self.assertFalse(os.path.exists(tmp_file))
self.assertRaises(IOError, urllib.urlopen, tmp_fileurl)
def test_ftp_nonexisting(self):
self.assertRaises(IOError, urllib.urlopen,
'ftp://localhost/not/existing/file.py')
def test_userpass_inurl(self):
self.fakehttp('Hello!')
try:
fakehttp_wrapper = httplib.HTTP._connection_class
fp = urllib.urlopen("http://user:pass@python.org/")
authorization = ("Authorization: Basic %s\r\n" %
b64encode('user:pass'))
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf)
self.assertEqual(fp.readline(), "Hello!")
self.assertEqual(fp.readline(), "")
self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_userpass_with_spaces_inurl(self):
self.fakehttp('Hello!')
try:
url = "http://a b:c d@python.org/"
fakehttp_wrapper = httplib.HTTP._connection_class
authorization = ("Authorization: Basic %s\r\n" %
b64encode('a b:c d'))
fp = urllib.urlopen(url)
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf)
self.assertEqual(fp.readline(), "Hello!")
self.assertEqual(fp.readline(), "")
# the spaces are quoted in URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(test_support.TESTFN)
self.text = 'testing urllib.urlretrieve'
try:
FILE = file(test_support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
return "file://%s" % urllib.pathname2url(os.path.abspath(filePath))
def createNewTempFile(self, data=""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.urlretrieve("file:%s" % test_support.TESTFN)
self.assertEqual(result[0], test_support.TESTFN)
self.assertIsInstance(result[1], mimetools.Message,
"did not get a mimetools.Message instance as "
"second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.urlretrieve(self.constructLocalFileUrl(
test_support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = file(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(count, block_size, total_size, count_holder=[0]):
self.assertIsInstance(count, int)
self.assertIsInstance(block_size, int)
self.assertIsInstance(total_size, int)
self.assertEqual(count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.urlretrieve(self.constructLocalFileUrl(test_support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile()
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read). Since the block size is 8192 bytes, only one block read is
# required to read the entire file.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 5)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 8193)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 8193)
class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.fakehttp('''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
try:
self.assertRaises(urllib.ContentTooShortError, urllib.urlretrieve,
'http://example.com', reporthook=_reporthook)
finally:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.fakehttp('''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
try:
self.assertRaises(urllib.ContentTooShortError, urllib.urlretrieve, 'http://example.com/')
finally:
self.unfakehttp()
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 ("Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>. The Python
code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly.
Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %s != %s" % (do_not_quote, result))
result = urllib.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %s != %s" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.quote.func_defaults[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %s != %s" % (quote_by_default, result))
result = urllib.quote_plus(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %s != %s" %
(quote_by_default, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): %s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %s != %s" % (expected, result))
result = urllib.quote_plus(partial_quote)
self.assertEqual(expected, result,
"using quote_plus(): %s != %s" % (expected, result))
self.assertRaises(TypeError, urllib.quote, None)
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %s != %s" % (result, hexescape(' ')))
result = urllib.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %s != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.quote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %s != %s" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using quote(): not all characters escaped; %s" %
result)
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = '\xab\xea'
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquote_with_unicode(self):
r = urllib.unquote(u'br%C3%BCckner_sapporo_20050930.doc')
self.assertEqual(r, u'br\xc3\xbcckner_sapporo_20050930.doc')
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.quote_plus(str(['1', '2', '3']))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
result = urllib.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.quote("quot=ing")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.quote("make sure")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the nturl2path library')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.url2pathname(url)
self.assertEqual(expect, result,
'nturl2path.url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.url2pathname(given)
self.assertEqual(expect, result,
'nturl2path.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
# In Python 3 this test class is moved to test_urlparse.
def test_splittype(self):
splittype = urllib.splittype
self.assertEqual(splittype('type:opaquestring'), ('type', 'opaquestring'))
self.assertEqual(splittype('opaquestring'), (None, 'opaquestring'))
self.assertEqual(splittype(':opaquestring'), (None, ':opaquestring'))
self.assertEqual(splittype('type:'), ('type', ''))
self.assertEqual(splittype('type:opaque:string'), ('type', 'opaque:string'))
def test_splithost(self):
splithost = urllib.splithost
self.assertEqual(splithost('//www.example.org:80/foo/bar/baz.html'),
('www.example.org:80', '/foo/bar/baz.html'))
self.assertEqual(splithost('//www.example.org:80'),
('www.example.org:80', ''))
self.assertEqual(splithost('/foo/bar/baz.html'),
(None, '/foo/bar/baz.html'))
def test_splituser(self):
splituser = urllib.splituser
self.assertEqual(splituser('User:Pass@www.python.org:080'),
('User:Pass', 'www.python.org:080'))
self.assertEqual(splituser('@www.python.org:080'),
('', 'www.python.org:080'))
self.assertEqual(splituser('www.python.org:080'),
(None, 'www.python.org:080'))
self.assertEqual(splituser('User:Pass@'),
('User:Pass', ''))
self.assertEqual(splituser('User@example.com:Pass@www.python.org:080'),
('User@example.com:Pass', 'www.python.org:080'))
def test_splitpasswd(self):
# Some of the password examples are not sensible, but it is added to
# confirming to RFC2617 and addressing issue4675.
splitpasswd = urllib.splitpasswd
self.assertEqual(splitpasswd('user:ab'), ('user', 'ab'))
self.assertEqual(splitpasswd('user:a\nb'), ('user', 'a\nb'))
self.assertEqual(splitpasswd('user:a\tb'), ('user', 'a\tb'))
self.assertEqual(splitpasswd('user:a\rb'), ('user', 'a\rb'))
self.assertEqual(splitpasswd('user:a\fb'), ('user', 'a\fb'))
self.assertEqual(splitpasswd('user:a\vb'), ('user', 'a\vb'))
self.assertEqual(splitpasswd('user:a:b'), ('user', 'a:b'))
self.assertEqual(splitpasswd('user:a b'), ('user', 'a b'))
self.assertEqual(splitpasswd('user 2:ab'), ('user 2', 'ab'))
self.assertEqual(splitpasswd('user+1:a+b'), ('user+1', 'a+b'))
self.assertEqual(splitpasswd('user:'), ('user', ''))
self.assertEqual(splitpasswd('user'), ('user', None))
self.assertEqual(splitpasswd(':ab'), ('', 'ab'))
def test_splitport(self):
splitport = urllib.splitport
self.assertEqual(splitport('parrot:88'), ('parrot', '88'))
self.assertEqual(splitport('parrot'), ('parrot', None))
self.assertEqual(splitport('parrot:'), ('parrot', None))
self.assertEqual(splitport('127.0.0.1'), ('127.0.0.1', None))
self.assertEqual(splitport('parrot:cheese'), ('parrot:cheese', None))
self.assertEqual(splitport('[::1]:88'), ('[::1]', '88'))
self.assertEqual(splitport('[::1]'), ('[::1]', None))
self.assertEqual(splitport(':88'), ('', '88'))
def test_splitnport(self):
splitnport = urllib.splitnport
self.assertEqual(splitnport('parrot:88'), ('parrot', 88))
self.assertEqual(splitnport('parrot'), ('parrot', -1))
self.assertEqual(splitnport('parrot', 55), ('parrot', 55))
self.assertEqual(splitnport('parrot:'), ('parrot', -1))
self.assertEqual(splitnport('parrot:', 55), ('parrot', 55))
self.assertEqual(splitnport('127.0.0.1'), ('127.0.0.1', -1))
self.assertEqual(splitnport('127.0.0.1', 55), ('127.0.0.1', 55))
self.assertEqual(splitnport('parrot:cheese'), ('parrot', None))
self.assertEqual(splitnport('parrot:cheese', 55), ('parrot', None))
def test_splitquery(self):
# Normal cases are exercised by other tests; ensure that we also
# catch cases with no port specified (testcase ensuring coverage)
splitquery = urllib.splitquery
self.assertEqual(splitquery('http://python.org/fake?foo=bar'),
('http://python.org/fake', 'foo=bar'))
self.assertEqual(splitquery('http://python.org/fake?foo=bar?'),
('http://python.org/fake?foo=bar', ''))
self.assertEqual(splitquery('http://python.org/fake'),
('http://python.org/fake', None))
self.assertEqual(splitquery('?foo=bar'), ('', 'foo=bar'))
def test_splittag(self):
splittag = urllib.splittag
self.assertEqual(splittag('http://example.com?foo=bar#baz'),
('http://example.com?foo=bar', 'baz'))
self.assertEqual(splittag('http://example.com?foo=bar#'),
('http://example.com?foo=bar', ''))
self.assertEqual(splittag('#baz'), ('', 'baz'))
self.assertEqual(splittag('http://example.com?foo=bar'),
('http://example.com?foo=bar', None))
self.assertEqual(splittag('http://example.com?foo=bar#baz#boo'),
('http://example.com?foo=bar#baz', 'boo'))
def test_splitattr(self):
splitattr = urllib.splitattr
self.assertEqual(splitattr('/path;attr1=value1;attr2=value2'),
('/path', ['attr1=value1', 'attr2=value2']))
self.assertEqual(splitattr('/path;'), ('/path', ['']))
self.assertEqual(splitattr(';attr1=value1;attr2=value2'),
('', ['attr1=value1', 'attr2=value2']))
self.assertEqual(splitattr('/path'), ('/path', []))
def test_splitvalue(self):
# Normal cases are exercised by other tests; test pathological cases
# with no key/value pairs. (testcase ensuring coverage)
splitvalue = urllib.splitvalue
self.assertEqual(splitvalue('foo=bar'), ('foo', 'bar'))
self.assertEqual(splitvalue('foo='), ('foo', ''))
self.assertEqual(splitvalue('=bar'), ('', 'bar'))
self.assertEqual(splitvalue('foobar'), ('foobar', None))
self.assertEqual(splitvalue('foo=bar=baz'), ('foo', 'bar=baz'))
def test_toBytes(self):
result = urllib.toBytes(u'http://www.python.org')
self.assertEqual(result, 'http://www.python.org')
self.assertRaises(UnicodeError, urllib.toBytes,
test_support.u(r'http://www.python.org/medi\u00e6val'))
def test_unwrap(self):
url = urllib.unwrap('<URL:type://host/path>')
self.assertEqual(url, 'type://host/path')
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.URLopener):
def open_spam(self, url):
return url
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen(5)
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
def test_main():
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', ".*urllib\.urlopen.*Python 3.0",
DeprecationWarning)
test_support.run_unittest(
urlopen_FileTests,
urlopen_HttpTests,
urlretrieve_FileTests,
urlretrieve_HttpTests,
ProxyTests,
QuotingTests,
UnquotingTests,
urlencode_Tests,
Pathname_Tests,
Utility_Tests,
URLopener_Tests,
ProxyTests,
ProxyTests_withOrderedEnv,
#FTPWrapperTests,
)
if __name__ == '__main__':
test_main()
| wang1352083/pythontool | python-2.7.12-lib/test/test_urllib.py | Python | mit | 44,628 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Clione Software
# Copyright (c) 2010-2013 Cidadania S. Coop. Galega
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains functions to help with caching.
"""
# Django's cache module
from django.core.cache import cache
# Cached models
from core.spaces.models import Space
# Response types
from django.shortcuts import get_object_or_404
# Tries to get the object from cache
# Else queries the database
# Else returns a 404 error
def _get_cache_key_for_model(model, key):
"""
Returns a unique key for the given model.
We prefix the given `key` with the name of the `model` to provide a further
degree of uniqueness of keys across the cache.
"""
if not isinstance(key, basestring):
raise TypeError('key must be str or a unicode string')
return model.__name__ + '_' + key
def get_or_insert_object_in_cache(model, key, *args, **kwargs):
"""
Returns an instance of the `model` stored in the cache with the given key.
If the object is not found in the cache, it is retrieved from the database
and set in the cache.
"""
actual_key = _get_cache_key_for_model(model, key)
return_object = cache.get(actual_key)
if not return_object:
return_object = get_object_or_404(model, *args, **kwargs)
cache.set(actual_key, return_object)
return return_object
| cidadania/e-cidadania | src/helpers/cache.py | Python | apache-2.0 | 1,903 |
# Copyright (c) 2016 AT&T
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from unittest import mock
try:
from mistralclient.api import client as mistralcli
except ImportError:
mistralcli = None
from oslo_config import cfg
from murano.dsl import murano_method
from murano.dsl import murano_type
from murano.engine.system import workflowclient
from murano.tests.unit import base
CONF = cfg.CONF
def rand_name(name='murano'):
"""Generates random string.
:param name: Basic name
:return:
"""
return name + str(random.randint(1, 0x7fffffff))
class TestMistralClient(base.MuranoTestCase):
def setUp(self):
super(TestMistralClient, self).setUp()
self.mistral_client_mock = mock.Mock()
self.mistral_client_mock.client = mock.MagicMock(
spec=mistralcli.client)
self._patch_client()
self.mock_class = mock.MagicMock(spec=murano_type.MuranoClass)
self.mock_method = mock.MagicMock(spec=murano_method.MuranoMethod)
self._this = mock.MagicMock()
self._this.owner = None
self.addCleanup(mock.patch.stopall)
def _patch_client(self):
self.mock_client = mock.Mock(return_value=self.mistral_client_mock)
self.client_patcher = mock.patch.object(workflowclient.MistralClient,
'_client', self.mock_client)
self.client_patcher.start()
self.mock_create_client = mock.Mock(
return_value=self.mistral_client_mock)
self.create_client_patcher = mock.patch.object(
workflowclient.MistralClient, '_create_client',
self.mock_create_client)
self.create_client_patcher.start()
def _unpatch_client(self):
self.client_patcher.stop()
self.create_client_patcher.stop()
def test_run_with_execution_success_state(self):
test_output = '{"openstack": "foo", "__execution": "bar", "task":'\
' "baz"}'
mock_execution = mock.MagicMock(
id='123', state='SUCCESS', output=test_output)
self.mock_client.executions.create.return_value = mock_execution
self.mock_client.executions.get.return_value = mock_execution
run_name = rand_name('test')
timeout = 1
mc = workflowclient.MistralClient(self._this, 'regionOne')
output = mc.run(run_name, timeout)
for prop in ['openstack', '__execution', 'task']:
self.assertFalse(hasattr(output, prop))
self.assertEqual({}, output)
def test_run_with_execution_error_state(self):
mock_execution = mock.MagicMock(
id='123', state='ERROR', output="{'test_attr': 'test_val'}")
self.mock_client.executions.create.return_value = mock_execution
self.mock_client.executions.get.return_value = mock_execution
run_name = rand_name('test')
timeout = 1
mc = workflowclient.MistralClient(self._this, 'regionOne')
expected_error_msg = 'Mistral execution completed with ERROR.'\
' Execution id: {0}. Output: {1}'\
.format(mock_execution.id, mock_execution.output)
with self.assertRaisesRegex(workflowclient.MistralError,
expected_error_msg):
mc.run(run_name, timeout)
def test_run_except_timeout_error(self):
mock_execution = mock.MagicMock(
id='123', state='TEST_STATE', output="{'test_attr': 'test_val'}")
self.mock_client.executions.create.return_value = mock_execution
self.mock_client.executions.get.return_value = mock_execution
run_name = rand_name('test')
timeout = 1
mc = workflowclient.MistralClient(self._this, 'regionOne')
expected_error_msg = 'Mistral run timed out. Execution id: {0}.'\
.format(mock_execution.id)
with self.assertRaisesRegex(workflowclient.MistralError,
expected_error_msg):
mc.run(run_name, timeout)
def test_run_with_immediate_timeout(self):
mock_execution = mock.MagicMock(
id='123', state='ERROR', output="{'test_attr': 'test_val'}")
self.mock_client.executions.create.return_value = mock_execution
run_name = rand_name('test')
timeout = 0
mc = workflowclient.MistralClient(self._this, 'regionOne')
self.assertEqual(mock_execution.id, mc.run(run_name, timeout))
def test_upload(self):
mc = workflowclient.MistralClient(self._this, 'regionOne')
definition = rand_name('test')
self.assertIsNone(mc.upload(definition))
self.assertTrue(workflowclient.MistralClient.
_client.workflows.create.called)
@mock.patch('murano.engine.system.workflowclient.auth_utils')
def test_client_property(self, _):
self._unpatch_client()
test_mistral_settings = {
'url': rand_name('test_mistral_url'),
'project_id': rand_name('test_project_id'),
'endpoint_type': rand_name('test_endpoint_type'),
'auth_token': rand_name('test_auth_token'),
'user_id': rand_name('test_user_id'),
'insecure': rand_name('test_insecure'),
'cacert': rand_name('test_ca_cert')
}
with mock.patch('murano.engine.system.workflowclient.CONF')\
as mock_conf:
mock_conf.mistral = mock.MagicMock(**test_mistral_settings)
region_name = rand_name('test_region_name')
mc = workflowclient.MistralClient(self._this, region_name)
mistral_client = mc._client
self.assertIsNotNone(mistral_client)
| openstack/murano | murano/tests/unit/engine/system/test_workflowclient.py | Python | apache-2.0 | 6,285 |
from client import clib
from engine import character
from client import serialize
mario = character.Character(10, 20)
pseudo = input("What's your name?")
characters = {pseudo: mario}
ip = "localhost"#input("Server's IP: ")
port = 9876#int(input("Server's port: "))
clib = clib.Clib(ip, port)
clib.sendMessage(serialize.gen_handshake_msg(pseudo))# TODO: Send this reliably
print(mario)
a = 0
while True:
clib.pool(characters, mario)
a += 1
if a == 100000:
a = 0
clib.sendMessage(serialize.gen_input_msg(21))
print("le", mario.x)
| Getkey/mario-kombat | tests/micro-client.py | Python | mpl-2.0 | 541 |
import socket
import re
import config
def get_word(data):
word = None
word_regexp = re.compile(r'[^Score:\s\d{1,}]([a-zA-Z0-9]+)')
found = word_regexp.search(data)
if found:
word = found.group(1)
else:
pass
return word
def get_score(data):
score = None
score_regexp = re.compile(r'Score:\s(\d{1,})')
found = score_regexp.search(data)
if found:
score = int(found.group(1))
else:
pass
return score
def main():
playing = True
is_game_over = False
lastScore = 0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (config.HOST, config.PORT)
print 'connecting to %s port %s' % server_address
sock.connect(server_address)
while True:
data = sock.recv(1024)
if '=====Magic Type Menu=====' in data and playing:
print "[*] Play a game!"
sock.sendall('1\r\n')
if 'Choose the speed level' in data and playing:
print "[*] Choose speed level at " + str(config.LEVEL) + '!'
sock.sendall(str(config.LEVEL) + '\r\n')
if 'Game over' in data:
print '[*] Game over!'
is_game_over = True
if '|' in data and playing:
score = get_score(data)
word = get_word(data)
if score is not None:
if score >= config.STOP_THRESHOLD_SCORE:
playing = False
else:
if lastScore != score:
print 'Score:', score
lastScore = score
if word is not None:
print 'Found word: ', word
sock.sendall(word + '\r\n')
if is_game_over:
data = sock.recv(1024)
print data
break
print 'Close the socket!'
sock.close()
if __name__ == '__main__':
main()
| Plummy-Panda/python-magictype | auto_typing_game.py | Python | mit | 1,905 |
from datetime import datetime, timedelta
import logging
import traceback
try:
from django.contrib.gis.utils import GeoIP, GeoIPException
HAS_GEOIP = True
except ImportError:
HAS_GEOIP = False
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext, ugettext_lazy as _
from tracking import utils
USE_GEOIP = getattr(settings, 'TRACKING_USE_GEOIP', False)
CACHE_TYPE = getattr(settings, 'GEOIP_CACHE_TYPE', 4)
log = logging.getLogger('tracking.models')
class VisitorManager(models.Manager):
def active(self, timeout=None):
"""
Retrieves only visitors who have been active within the timeout
period.
"""
if not timeout:
timeout = utils.get_timeout()
now = datetime.now()
cutoff = now - timedelta(minutes=timeout)
return self.get_query_set().filter(last_update__gte=cutoff)
class Visitor(models.Model):
session_key = models.CharField(max_length=40)
ip_address = models.CharField(max_length=20)
user = models.ForeignKey(User, null=True)
user_agent = models.CharField(max_length=255)
referrer = models.CharField(max_length=255)
url = models.CharField(max_length=255)
page_views = models.PositiveIntegerField(default=0)
session_start = models.DateTimeField()
last_update = models.DateTimeField()
objects = VisitorManager()
def _time_on_site(self):
"""
Attempts to determine the amount of time a visitor has spent on the
site based upon their information that's in the database.
"""
if self.session_start:
seconds = (self.last_update - self.session_start).seconds
hours = seconds / 3600
seconds -= hours * 3600
minutes = seconds / 60
seconds -= minutes * 60
return u'%i:%02i:%02i' % (hours, minutes, seconds)
else:
return ugettext(u'unknown')
time_on_site = property(_time_on_site)
def _get_geoip_data(self):
"""
Attempts to retrieve MaxMind GeoIP data based upon the visitor's IP
"""
if not HAS_GEOIP or not USE_GEOIP:
# go no further when we don't need to
log.debug('Bailing out. HAS_GEOIP: %s; TRACKING_USE_GEOIP: %s' % (HAS_GEOIP, USE_GEOIP))
return None
if not hasattr(self, '_geoip_data'):
self._geoip_data = None
try:
gip = GeoIP(cache=CACHE_TYPE)
self._geoip_data = gip.city(self.ip_address)
except GeoIPException:
# don't even bother...
log.error('Error getting GeoIP data for IP "%s": %s' % (self.ip_address, traceback.format_exc()))
return self._geoip_data
geoip_data = property(_get_geoip_data)
def _get_geoip_data_json(self):
"""
Cleans out any dirty unicode characters to make the geoip data safe for
JSON encoding.
"""
clean = {}
if not self.geoip_data: return {}
for key,value in self.geoip_data.items():
clean[key] = utils.u_clean(value)
return clean
geoip_data_json = property(_get_geoip_data_json)
class Meta:
ordering = ('-last_update',)
unique_together = ('session_key', 'ip_address',)
class UntrackedUserAgent(models.Model):
keyword = models.CharField(_('keyword'), max_length=100, help_text=_('Part or all of a user-agent string. For example, "Googlebot" here will be found in "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)" and that visitor will not be tracked.'))
def __unicode__(self):
return self.keyword
class Meta:
ordering = ('keyword',)
verbose_name = _('Untracked User-Agent')
verbose_name_plural = _('Untracked User-Agents')
class BannedIP(models.Model):
ip_address = models.IPAddressField('IP Address', help_text=_('The IP address that should be banned'))
def __unicode__(self):
return self.ip_address
class Meta:
ordering = ('ip_address',)
verbose_name = _('Banned IP')
verbose_name_plural = _('Banned IPs')
| MontmereLimited/django-tracking | tracking/models.py | Python | mit | 4,243 |
# -*- coding: utf-8 -*-
from datetime import datetime
from django.test import TestCase, RequestFactory
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.timezone import utc
from oioioi.base.tests import fake_time
from oioioi.contests.models import Contest
from oioioi.statistics.plotfunctions import histogram, \
points_to_source_length_problem, test_scores
from oioioi.contests.models import ProblemInstance
from oioioi.statistics.controllers import statistics_categories, \
statistics_plot_kinds
from oioioi.statistics.models import StatisticsConfig
class TestStatisticsPlotFunctions(TestCase):
fixtures = ['test_users', 'test_contest', 'test_full_package',
'test_problem_instance', 'test_submission']
def setUp(self):
self.request = RequestFactory().request()
self.request.user = User.objects.get(username='test_user')
self.request.contest = Contest.objects.get()
self.request.timestamp = datetime.now().replace(tzinfo=utc)
def assertSizes(self, data, dims):
"""Assert that ``data`` is a ``len(dims)``-dimensional rectangular
matrix, represented as a list, with sizes in consecutive dimensions
as specified in ``dims``"""
if dims == []:
self.assertTrue(not isinstance(data, list) or data == [])
else:
self.assertEqual(len(data), dims[0])
for sub in data:
self.assertSizes(sub, dims[1:])
def test_histogram(self):
test1 = [0, 0, 50, 50, 100, 100]
result1 = [[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
[2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2]]
self.assertEqual(histogram(test1), result1)
test2 = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
result2 = [[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
self.assertEqual(histogram(test2), result2)
test3 = [34]
result3 = [[0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]
self.assertEqual(histogram(test3), result3)
test4 = [0]
result4 = [[0], [1]]
self.assertEqual(histogram(test4), result4)
def test_points_to_source_length(self):
pi = ProblemInstance.objects.get(short_name='zad1')
plot = points_to_source_length_problem(self.request, pi)
self.assertEqual(len(plot['series']), 1)
self.assertSizes(plot['data'], [1, 1, 3])
def test_test_scores(self):
pi = ProblemInstance.objects.get(short_name='zad1')
plot = test_scores(self.request, pi)
self.assertEqual(len(plot['series']), 3)
self.assertEqual(len(plot['series']), len(plot['data']))
self.assertEqual(len(plot['keys']), 4)
self.assertIn('OK', plot['series'])
self.assertIn('WA', plot['series'])
class TestHighchartsOptions(TestCase):
fixtures = ['test_users', 'test_contest', 'test_full_package',
'test_problem_instance', 'test_submission', 'test_extra_rounds']
def setUp(self):
self.request = RequestFactory().request()
self.request.user = User.objects.get(username='test_user')
self.request.contest = Contest.objects.get()
self.request.timestamp = datetime.now().replace(tzinfo=utc)
def test_scatter(self):
plot_function, plot_type = \
statistics_plot_kinds['POINTS_TO_SOURCE_LENGTH_PROBLEM']
plot = plot_type.highcharts_options(plot_function(self.request,
ProblemInstance.objects.filter(short_name='zad2')[0]))
self.assertIsInstance(plot, dict)
self.assertIn('xAxis', plot)
self.assertIn('title', plot['xAxis'])
self.assertIn('min', plot['xAxis'])
self.assertIn('scatter', plot['plotOptions'])
def test_results_histogram(self):
plot_function, plot_type = \
statistics_plot_kinds['POINTS_HISTOGRAM_PROBLEM']
plot = plot_type.highcharts_options(plot_function(self.request,
ProblemInstance.objects.filter(short_name='zad2')[0]))
self.assertIsInstance(plot, dict)
self.assertIn('yAxis', plot)
self.assertIn('title', plot['yAxis'])
self.assertIn('min', plot['yAxis'])
self.assertIn('column', plot['plotOptions'])
self.assertIn(';∞)', plot['xAxis']['categories'][-1])
def test_submission_histogram(self):
contest = Contest.objects.get()
plot_function, plot_type = \
statistics_plot_kinds['SUBMISSIONS_HISTOGRAM_CONTEST']
plot = plot_type.highcharts_options(plot_function(self.request,
contest))
self.assertIsInstance(plot, dict)
self.assertIn('yAxis', plot)
self.assertIn('title', plot['yAxis'])
self.assertIn('min', plot['yAxis'])
self.assertIn('column', plot['plotOptions'])
self.assertIn('OK', [s['name'] for s in plot['series']])
class TestStatisticsViews(TestCase):
fixtures = ['test_users', 'test_contest', 'test_full_package',
'test_problem_instance', 'test_submission', 'test_extra_rounds']
def test_statistics_view(self):
contest = Contest.objects.get()
url = reverse('statistics_main', kwargs={'contest_id': contest.id})
# Without StatisticsConfig model
self.client.login(username='test_admin')
with fake_time(datetime(2015, 8, 5, tzinfo=utc)):
response = self.client.get(url)
self.assertContains(response, 'Results histogram')
self.client.login(username='test_user')
with fake_time(datetime(2015, 8, 5, tzinfo=utc)):
response = self.client.get(url)
self.assertEquals(403, response.status_code)
cfg = StatisticsConfig(contest=contest, visible_to_users=True,
visibility_date=datetime(2014, 2, 3, tzinfo=utc))
cfg.save()
self.client.login(username='test_admin')
with fake_time(datetime(2015, 8, 5, tzinfo=utc)):
response = self.client.get(url)
self.assertContains(response, 'Results histogram')
self.client.login(username='test_user')
with fake_time(datetime(2015, 8, 5, tzinfo=utc)):
response = self.client.get(url)
self.assertContains(response, 'Results histogram')
self.assertContains(response, 'zad4')
self.assertContains(response, 'zad2')
self.assertContains(response, 'zad3')
self.assertContains(response, 'zad1')
self.assertContains(response,
"[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]")
url = reverse('statistics_view', kwargs={'contest_id': contest.id,
'category': statistics_categories['PROBLEM'][1],
'object_name': 'zad2'})
self.assertContains(response, url)
| papedaniel/oioioi | oioioi/statistics/tests.py | Python | gpl-3.0 | 7,040 |
"""
Support for particulate matter sensors connected to a serial port.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.serial_pm/
"""
import logging
import voluptuous as vol
from homeassistant.const import CONF_NAME
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
REQUIREMENTS = ['pmsensor==0.4']
_LOGGER = logging.getLogger(__name__)
CONF_SERIAL_DEVICE = 'serial_device'
CONF_BRAND = 'brand'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_BRAND): cv.string,
vol.Required(CONF_SERIAL_DEVICE): cv.string,
vol.Optional(CONF_NAME): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available PM sensors."""
from pmsensor import serial_pm as pm
try:
coll = pm.PMDataCollector(
config.get(CONF_SERIAL_DEVICE),
pm.SUPPORTED_SENSORS[config.get(CONF_BRAND)]
)
except KeyError:
_LOGGER.error("Brand %s not supported\n supported brands: %s",
config.get(CONF_BRAND), pm.SUPPORTED_SENSORS.keys())
return
except OSError as err:
_LOGGER.error("Could not open serial connection to %s (%s)",
config.get(CONF_SERIAL_DEVICE), err)
return
dev = []
for pmname in coll.supported_values():
if config.get(CONF_NAME) is not None:
name = '{} PM{}'.format(config.get(CONF_NAME), pmname)
else:
name = 'PM{}'.format(pmname)
dev.append(ParticulateMatterSensor(coll, name, pmname))
add_entities(dev)
class ParticulateMatterSensor(Entity):
"""Representation of an Particulate matter sensor."""
def __init__(self, pmDataCollector, name, pmname):
"""Initialize a new PM sensor."""
self._name = name
self._pmname = pmname
self._state = None
self._collector = pmDataCollector
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return "µg/m³"
def update(self):
"""Read from sensor and update the state."""
_LOGGER.debug("Reading data from PM sensor")
try:
self._state = self._collector.read_data()[self._pmname]
except KeyError:
_LOGGER.error("Could not read PM%s value", self._pmname)
def should_poll(self):
"""Sensor needs polling."""
return True
| jamespcole/home-assistant | homeassistant/components/serial_pm/sensor.py | Python | apache-2.0 | 2,805 |
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible # only if you need to support Python 2
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def approved_comments(self):
return self.comments.filter(approved_comment=True)
def __str__(self):
return self.title
@python_2_unicode_compatible # only if you need to support Python 2
class Comment(models.Model):
post = models.ForeignKey('djtransverbisblog.Post',
related_name='comments')
author = models.CharField(max_length=200)
text = models.TextField()
email_address = models.EmailField()
created_date = models.DateTimeField(default=timezone.now)
approved_comment = models.BooleanField(default=False)
def approve(self):
self.approved_comment = True
self.save()
def __str__(self):
return self.text
@python_2_unicode_compatible # only if you need to support Python 2
class Page(models.Model):
title = models.CharField(max_length=200)
en_content = models.TextField()
page_order = models.IntegerField()
sidebar = models.BooleanField(default=False)
def __str__(self):
return self.title
@python_2_unicode_compatible # only if you need to support Python 2
class ApprovedEmail(models.Model):
email_address = models.EmailField()
def __str__(self):
return self.email_address
| filipok/django-transverbis-blog | djtransverbisblog/models.py | Python | mit | 1,821 |
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((-153.026, 10151.9, 5450.89), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((249.406, 8469.25, 5119.64), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((1391.31, 7665.44, 6419.85), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((-596.402, 8629.51, 7098.43), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((-218.784, 8501.14, 8644.69), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((2002.56, 7554.45, 9170.98), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((3703.25, 7615.17, 9460.75), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2976.01, 7956.24, 9827.02), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((5440.55, 7570.14, 8891.85), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((6496.76, 8872.63, 9055.24), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((7747.25, 7942.01, 7994.32), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((7236.1, 7888.5, 6956.47), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((7335.77, 7662.01, 5434.06), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((6722.56, 8638.52, 5365.53), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((7453.05, 9409.88, 3360.42), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((7110.39, 8205.02, 524.343), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((6085.64, 6596.12, 997.108), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((6239.47, 7129.63, 1235.73), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((6826.97, 7382.9, 2779.2), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((8125.75, 7914.63, 3291.54), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((7348.01, 6934.22, 5359.95), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((7715.31, 7303.86, 3405.92), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((7247.78, 6468.67, 3107.23), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((6830.33, 6029.15, 1981.19), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((5714.83, 6677.62, 1511.14), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((5241.49, 7400.53, 278.427), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((6061.86, 7181.59, 1601.77), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((5834.16, 7412.37, 3767), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((6976.18, 6416.53, 3853.08), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((7579.47, 6384.78, 5009.37), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((7206.06, 5666.64, 4762.14), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((7255.64, 6958.6, 5915.55), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((8332.73, 6254.74, 4614.03), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((7618.45, 5292.32, 3704.15), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((7381.1, 6238.07, 2827.96), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((7828.47, 6891.84, 1649.9), (0.7, 0.7, 0.7), 693.382)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((7337.56, 6585.46, 4045.65), (0.7, 0.7, 0.7), 804.038)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((7727.67, 5939.02, 2337.47), (0.7, 0.7, 0.7), 816.178)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((6482.82, 5948.2, 2675.16), (0.7, 0.7, 0.7), 776.628)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((8110.34, 5833.53, 2645.82), (0.7, 0.7, 0.7), 750.656)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((8221.21, 5204.89, 4309.52), (0.7, 0.7, 0.7), 709.625)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((9489.35, 5160.64, 5775.73), (0.7, 0.7, 0.7), 927.681)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((11118.9, 3198.95, 4896.35), (0.7, 0.7, 0.7), 1088.21)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((10506.7, 3803.34, 6639.55), (0.7, 0.7, 0.7), 736.147)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((9570.06, 4087.09, 5273.73), (0.7, 0.7, 0.7), 861.101)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((7991.88, 4832.68, 6086.16), (0.7, 0.7, 0.7), 924.213)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((7432.88, 3053.97, 5507.71), (0.7, 0.7, 0.7), 881.828)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((9289.05, 2608.11, 4740.44), (0.7, 0.7, 0.7), 927.681)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((7840.58, 2582.72, 5924.58), (0.7, 0.7, 0.7), 831.576)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((6658.22, 3559.83, 7067.26), (0.7, 0.7, 0.7), 859.494)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((7974.24, 3091.3, 7568.91), (0.7, 0.7, 0.7), 704.845)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((7068.35, 4530.01, 7709.58), (0.7, 0.7, 0.7), 804.461)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((5948.22, 5904.65, 7559.85), (0.7, 0.7, 0.7), 934.111)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((6064.31, 6240.78, 9067.13), (0.7, 0.7, 0.7), 988.339)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((6347.86, 5548.64, 9391.72), (1, 0.7, 0), 803.7)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((6103.87, 4323.44, 7706.01), (0.7, 0.7, 0.7), 812.118)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((3977.85, 3819.48, 7638.04), (0.7, 0.7, 0.7), 1177.93)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((2423.2, 2168.06, 6504.65), (0.7, 0.7, 0.7), 1038.21)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((2102.64, 1789.75, 6181.05), (1, 0.7, 0), 758.016)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((1708.24, 1869.88, 6904.01), (0.7, 0.7, 0.7), 824.046)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((2702.97, 1985.56, 6712.15), (0.7, 0.7, 0.7), 793.379)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((2437.12, 1376.06, 7372.92), (0.7, 0.7, 0.7), 1011.56)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((3284.83, 3034.37, 7112.77), (0.7, 0.7, 0.7), 1097.01)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((2772.73, 2906.48, 5283.32), (0.7, 0.7, 0.7), 851.626)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((2217.31, 2483.1, 3493.16), (0.7, 0.7, 0.7), 869.434)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((3939.31, 2069.94, 4014.47), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((3524.71, 353.277, 3924.82), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((3847.71, 1579.2, 6009.37), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((2522.4, -164.997, 6533.08), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((2696.23, -1131.95, 5288.54), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((3559.06, 94.7442, 5489.99), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| batxes/4Cin | SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/mtx1_models/SHH_WT_models10306.py | Python | gpl-3.0 | 17,587 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import brew, core, scope, workspace
from caffe2.python.modeling.parameter_info import ParameterTags
from caffe2.python.model_helper import ModelHelper
from caffe2.python.cnn import CNNModelHelper
import unittest
import numpy as np
class BrewTest(unittest.TestCase):
def setUp(self):
def myhelper(model, val=-1):
return val
if not brew.has_helper(myhelper):
brew.Register(myhelper)
self.myhelper = myhelper
def myhelper2(model, val=-1):
return val
if not brew.has_helper(myhelper2):
brew.Register(myhelper2)
self.myhelper2 = myhelper2
self.model = ModelHelper(name="test_model")
def test_dropout(self):
p = 0.2
X = np.ones((100, 100)).astype(np.float32) - p
workspace.FeedBlob("x", X)
model = ModelHelper(name="test_model")
brew.dropout(model, "x", "out", is_test=False)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob("out")
self.assertLess(abs(out.mean() - (1 - p)), 0.05)
def test_fc(self):
m, n, k = (15, 15, 15)
X = np.random.rand(m, k).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
model = ModelHelper(name="test_model")
brew.fc(model, "x", "out_1", k, n)
model.Validate()
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
def test_relu(self):
Xpos = np.ones((5, 5)).astype(np.float32) - 0.5
Xneg = np.ones((5, 5)).astype(np.float32) - 1.5
workspace.FeedBlob("xpos", Xpos)
workspace.FeedBlob("xneg", Xneg)
model = ModelHelper(name="test_model")
brew.relu(model, "xpos", "out_xpos")
brew.relu(model, "xneg", "out_xneg")
model.Validate()
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
pos = workspace.FetchBlob("out_xpos")
self.assertAlmostEqual(pos.mean(), 0.5)
neg = workspace.FetchBlob("out_xneg")
self.assertAlmostEqual(neg.mean(), 0)
def test_tanh(self):
X = np.ones((5, 5)).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
model = ModelHelper(name="test_model")
brew.tanh(model, "x", "out_tanh")
model.Validate()
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob("out_tanh")
self.assertAlmostEqual(out.mean(), np.tanh(0.5), places=5)
def test_validate(self):
model = ModelHelper(name="test_model")
model.params.append("aaa")
model.params.append("bbb")
self.assertEqual(model._Validate(), [])
model.params.append("xxx")
model.params.append("bbb")
self.assertEqual(model._Validate(), ["bbb"])
def test_arg_scope(self):
myhelper = self.myhelper
myhelper2 = self.myhelper2
n = 15
with brew.arg_scope([myhelper], val=n):
res = brew.myhelper(self.model)
self.assertEqual(n, res)
with brew.arg_scope([myhelper, myhelper2], val=n):
res1 = brew.myhelper(self.model)
res2 = brew.myhelper2(self.model)
self.assertEqual([n, n], [res1, res2])
def test_arg_scope_single(self):
X = np.random.rand(64, 3, 32, 32).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
model = ModelHelper(name="test_model")
with brew.arg_scope(
brew.conv,
stride=2,
pad=2,
weight_init=('XavierFill', {}),
bias_init=('ConstantFill', {})
):
brew.conv(
model=model,
blob_in="x",
blob_out="out",
dim_in=3,
dim_out=64,
kernel=3,
)
model.Validate()
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob("out")
self.assertEqual(out.shape, (64, 64, 17, 17))
def test_arg_scope_nested(self):
myhelper = self.myhelper
n = 16
with brew.arg_scope([myhelper], val=-3), \
brew.arg_scope([myhelper], val=-2):
with brew.arg_scope([myhelper], val=n):
res = brew.myhelper(self.model)
self.assertEqual(n, res)
res = brew.myhelper(self.model)
self.assertEqual(res, -2)
res = brew.myhelper(self.model, val=15)
self.model.Validate()
self.assertEqual(res, 15)
def test_double_register(self):
myhelper = self.myhelper
with self.assertRaises(AttributeError):
brew.Register(myhelper)
def test_has_helper(self):
self.assertTrue(brew.has_helper(brew.conv))
self.assertTrue(brew.has_helper("conv"))
def myhelper3():
pass
self.assertFalse(brew.has_helper(myhelper3))
def test_model_helper(self):
X = np.random.rand(64, 32, 32, 3).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
my_arg_scope = {'order': 'NHWC'}
model = ModelHelper(name="test_model", arg_scope=my_arg_scope)
with brew.arg_scope(
brew.conv,
stride=2,
pad=2,
weight_init=('XavierFill', {}),
bias_init=('ConstantFill', {})
):
brew.conv(
model=model,
blob_in="x",
blob_out="out",
dim_in=3,
dim_out=64,
kernel=[8, 3]
)
model.Validate()
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob("out")
self.assertEqual(out.shape, (64, 15, 17, 64))
def test_cnn_model_helper_deprecated(self):
X = np.random.rand(64, 32, 32, 3).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
# CNNModelHelper is going to be deprecated soon. This test is only
# covering some CNNModelHelper logic
model = CNNModelHelper(name="test_model", order='NHWC')
self.assertEqual(model.arg_scope['order'], 'NHWC')
def test_get_params(self):
def param(x):
return core.ScopedBlobReference(x)
def to_str_list(x):
return sorted([str(p) for p in x])
model = ModelHelper(name="test_model")
model.AddParameter(param("a"))
model.AddParameter(param("b"), tags=ParameterTags.COMPUTED_PARAM)
with scope.NameScope("c"):
model.AddParameter(param("a"))
model.AddParameter(param("d"), tags=ParameterTags.COMPUTED_PARAM)
self.assertEqual(to_str_list(model.GetParams()), ['c/a'])
self.assertEqual(to_str_list(model.GetComputedParams()), ['c/d'])
self.assertEqual(to_str_list(model.GetAllParams()), ['c/a', 'c/d'])
# Get AllParams from the global Scope
self.assertEqual(to_str_list(model.GetAllParams('')), [
'a', 'b', 'c/a', 'c/d'])
self.assertEqual(to_str_list(model.GetParams()), ['a', 'c/a'])
self.assertEqual(to_str_list(model.GetComputedParams()), ['b', 'c/d'])
self.assertEqual(to_str_list(model.GetAllParams()),
['a', 'b', 'c/a', 'c/d'])
self.assertEqual(to_str_list(model.GetAllParams('')),
['a', 'b', 'c/a', 'c/d'])
# Get AllParams from the scope 'c'
self.assertEqual(to_str_list(model.GetAllParams('c')), ['c/a', 'c/d'])
self.assertEqual(to_str_list(model.GetAllParams('c/')), ['c/a', 'c/d'])
def test_param_consistence(self):
model = ModelHelper(name='test_mode')
cnv = brew.conv(model, 'data', 'cnv', 32, 32, 4)
step_model = ModelHelper(name='step_model', param_model=model)
a = brew.fc(step_model, cnv, 'a', 100, 200)
brew.fc(model, a, 'b', 200, 5)
# test the _parameters_info is shared between model and step_model
self.assertEqual(model._parameters_info, step_model._parameters_info)
def test_cond(self):
workspace.FeedBlob("cond", np.array(True))
workspace.FeedBlob("then_value", np.array(1))
workspace.FeedBlob("else_value", np.array(2))
then_model = ModelHelper(name="then_test_model")
then_model.net.Copy("then_value", "output_blob")
else_model = ModelHelper(name="else_test_model")
else_model.net.Copy("else_value", "output_blob")
model = ModelHelper(name="test_model")
brew.cond(
model=model,
cond_blob="cond",
external_blobs=["then_value", "else_value", "output_blob"],
then_model=then_model,
else_model=else_model)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
output_value = workspace.FetchBlob("output_blob")
self.assertEqual(output_value, 1)
workspace.FeedBlob("cond", np.array(False))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
output_value = workspace.FetchBlob("output_blob")
self.assertEqual(output_value, 2)
def test_loop(self):
workspace.FeedBlob("cond", np.array(True))
workspace.FeedBlob("ONE", np.array(1))
workspace.FeedBlob("TWO", np.array(2))
workspace.FeedBlob("TEN", np.array(10))
workspace.FeedBlob("counter", np.array(0))
workspace.FeedBlob("output_blob", np.array(0))
loop_model = ModelHelper(name="loop_test_model")
loop_model.net.Add(["output_blob", "TWO"], "output_blob")
cond_model = ModelHelper(name="cond_test_model")
cond_model.net.Add(["counter", "ONE"], "counter")
comp_res = cond_model.net.LT(["counter", "TEN"])
cond_model.net.Copy(comp_res, "cond")
model = ModelHelper(name="test_model")
brew.loop(
model=model,
cond_blob="cond",
external_blobs=["cond", "ONE", "TWO", "TEN", "counter", "output_blob"],
loop_model=loop_model,
cond_model=cond_model)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
output_value = workspace.FetchBlob("output_blob")
self.assertEqual(output_value, 18)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
class BrewGPUTest(unittest.TestCase):
def test_relu(self):
Xpos = np.ones((5, 5)).astype(np.float32) - 0.5
Xneg = np.ones((5, 5)).astype(np.float32) - 1.5
workspace.FeedBlob("xpos", Xpos)
workspace.FeedBlob("xneg", Xneg)
model = ModelHelper(name="test_model")
brew.relu(model, "xpos", "out_xpos", use_cudnn=True)
brew.relu(model, "xneg", "out_xneg", use_cudnn=True)
model.Validate()
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
pos = workspace.FetchBlob("out_xpos")
self.assertAlmostEqual(pos.mean(), 0.5)
neg = workspace.FetchBlob("out_xneg")
self.assertAlmostEqual(neg.mean(), 0)
def test_tanh(self):
X = np.ones((5, 5)).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
model = ModelHelper(name="test_model")
brew.tanh(model, "x", "out_tanh", use_cudnn=True)
model.Validate()
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob("out_tanh")
self.assertAlmostEqual(out.mean(), np.tanh(0.5), places=5)
| ryfeus/lambda-packs | pytorch/source/caffe2/python/brew_test.py | Python | mit | 11,884 |
# coding: utf-8
from django.core.paginator import Paginator
from django.forms.models import model_to_dict
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
from django.utils import timesince
from .models import Category, CategoryTag, Post, Reply, Attachment
from accounts.templatetags.users_tags import gravatar
from actstream.models import Action
import random
import datetime
import json
import bleach
def post_list(request, category_id):
category = Category.objects.get(pk=category_id)
tags = CategoryTag.objects.filter(category=category)
page_size = 50
posts = Post.objects.approved().filter(category=category).order_by('-created_at')
if request.GET.get('tag_id'):
posts = posts.filter(tag_id=int(request.GET.get('tag_id')))
paginator = Paginator(posts, page_size)
page = request.GET.get('page', 1)
try:
page = int(page)
except:
page = 1
posts = paginator.page(page)
ctx = {
'paginator': paginator,
'tags': tags,
'category': category,
'posts': posts,
}
return TemplateResponse(request, 'posts/category.html', ctx)
def post_detail(request, post_id):
post = get_object_or_404(Post, pk=post_id)
post.pageviews = post.pageviews + 1
post.save()
replies = Reply.objects.filter(post=post)
# 暂时以修改verb的方式实现清除回帖提醒
if request.user:
notices = Action.objects.filter(actor_object_id=request.user.id, target_object_id=post.id).exclude(verb='read')
notices.update(verb='read')
if request.is_ajax():
response = []
for reply in replies:
data = model_to_dict(reply)
data['user'] = _serialize_user(data['author'])
data['content'] = bleach.clean(data['content'], ['a',], strip=True)
data['created_at'] = timesince.timesince(reply.created_at)
response.append(data)
return HttpResponse(json.dumps(response), content_type='application/json')
ctx = {
'category': post.category,
'post': post,
'tags': CategoryTag.objects.filter(category=post.category),
'post_replies': replies,
}
return TemplateResponse(request, 'posts/detail.html', ctx)
@login_required
def create(request):
if request.method == 'POST':
category_id = request.POST.get('category_id')
if not category_id:
category_id = 1
category = Category.objects.get(pk=category_id)
tag_id = request.POST.get('tag')
title = request.POST.get('title')
content = request.POST.get('content', '')
post, is_create = Post.objects.get_or_create(title=title, content=content, category=category, tag_id=tag_id, author=request.user)
post.save()
return HttpResponseRedirect('/posts/%s/' % post.id)
return HttpResponseRedirect('/')
@login_required
def edit(request, post_id):
if request.method == 'POST':
post = get_object_or_404(Post, pk=post_id)
if request.user == post.author:
post.title = request.POST.get('title')
post.content = request.POST.get('content')
post.save()
return HttpResponse(post.id)
return HttpResponseRedirect('/')
@csrf_exempt
@login_required
def reply(request, post_id):
if request.method == 'POST':
reply = Reply()
reply.post = Post.objects.get(pk=post_id)
reply.author = request.user
reply.content = request.POST.get('content')
reply.save()
response = model_to_dict(reply)
response['user'] = _serialize_user(response['author'])
return HttpResponse(json.dumps(response), content_type='application/json')
else:
try:
reply_id = int(request.GET.get('reply_id'))
except TypeError:
return HttpResponse(json.dumps({'errorMessage': '获取回复内容失败,reply_id错误'}), content_type='application/json')
reply = Reply.objects.get(pk=reply_id)
response = model_to_dict(reply)
response['user'] = _serialize_user(reply.author.id)
return HttpResponse(json.dumps(response), content_type='application/json')
def _serialize_user(user_id):
user = User.objects.get(pk=user_id)
return {
'username': user.username,
'id': user.id,
'gravatar': gravatar(user.email),
}
def delete(request):
object_id = request.GET.get('object_id')
if request.GET.get('type').lower() == 'post':
model = Post
else:
model = Reply
row = model.objects.get(pk=object_id)
response = {}
if request.user.id == row.author.id:
row.delete()
response['status'] = 'ok'
else:
response['errorMessage'] = '没有删除权限'
return HttpResponse(json.dumps(response), content_type='application/json')
def upload(request):
today = datetime.datetime.today()
upfile = request.FILES.get('upfile', '')
file_type = str(upfile.name).split('.')[-1].lower()
file_name = str(today.strftime("%Y%m%d%H-%f")) + '.' + file_type
upfile.name = file_name
attachment = Attachment.objects.create(user=request.user, src=upfile)
return HttpResponse(attachment.src.url)
| libchaos/erya | posts/views.py | Python | gpl-3.0 | 5,491 |
# -*- coding: utf-8 -*-
from data_type.classes.static.staticclass3 import Pool
def print_connection():
print(Pool.conn) | xmnlab/minilab | data_type/classes/static/staticclass2.py | Python | gpl-3.0 | 124 |
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
if sys.version < '2.6' or sys.version >= '2.8':
print >> sys.stderr, "Unsupported Python version: WebKit only supports 2.6.x - 2.7.x, and you're running %s." % sys.version.split()[0]
sys.exit(1)
| leighpauls/k2cro4 | third_party/WebKit/Tools/Scripts/webkitpy/common/version_check.py | Python | bsd-3-clause | 1,767 |
#!/usr/bin/env python
""" Tests for pycazy module
test_getfamily.py
(c) The James Hutton Institute 2014
Author: Leighton Pritchard
Contact:
leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD6 9LH,
Scotland,
UK
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Builtins
import os
import requests
import unittest
# Do we have BeautifulSoup4
try:
from bs4 import BeautifulSoup
except ImportError:
raise MissingPythonDependencyError(
"Install BeautifulSoup4 if you want to use pyCAZy.")
# pyCAZy import
import pycazy
# Test ability to get all sequence info for a CAZy family
class GetClassSeqTest(unittest.TestCase):
def test_get_all_family_seqinfo(self):
""" Get all sequence info for a single CAZy family.
"""
seqinfo = pycazy.get_family_seqinfo('GH1', filter='all')
# Run tests
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| widdowquinn/pycazy | tests/test_getfamily.py | Python | mit | 1,614 |
"""
Methods for exporting course data to XML
"""
import logging
from abc import abstractmethod
from six import text_type
import lxml.etree
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore import EdxJSONEncoder, ModuleStoreEnum
from xmodule.modulestore.inheritance import own_metadata
from xmodule.modulestore.store_utilities import draft_node_constructor, get_draft_subtree_roots
from xmodule.modulestore import LIBRARY_ROOT
from fs.osfs import OSFS
from json import dumps
import os
from xmodule.modulestore.draft_and_published import DIRECT_ONLY_CATEGORIES
from opaque_keys.edx.locator import CourseLocator, LibraryLocator
DRAFT_DIR = "drafts"
PUBLISHED_DIR = "published"
DEFAULT_CONTENT_FIELDS = ['metadata', 'data']
def _export_drafts(modulestore, course_key, export_fs, xml_centric_course_key):
"""
Exports course drafts.
"""
# NOTE: we need to explicitly implement the logic for setting the vertical's parent
# and index here since the XML modulestore cannot load draft modules
with modulestore.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_key):
draft_modules = modulestore.get_items(
course_key,
qualifiers={'category': {'$nin': DIRECT_ONLY_CATEGORIES}},
revision=ModuleStoreEnum.RevisionOption.draft_only
)
# Check to see if the returned draft modules have changes w.r.t. the published module.
# Only modules with changes will be exported into the /drafts directory.
draft_modules = [module for module in draft_modules if modulestore.has_changes(module)]
if draft_modules:
draft_course_dir = export_fs.makedir(DRAFT_DIR, recreate=True)
# accumulate tuples of draft_modules and their parents in
# this list:
draft_node_list = []
for draft_module in draft_modules:
parent_loc = modulestore.get_parent_location(
draft_module.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred
)
# if module has no parent, set its parent_url to `None`
parent_url = None
if parent_loc is not None:
parent_url = text_type(parent_loc)
draft_node = draft_node_constructor(
draft_module,
location=draft_module.location,
url=text_type(draft_module.location),
parent_location=parent_loc,
parent_url=parent_url,
)
draft_node_list.append(draft_node)
for draft_node in get_draft_subtree_roots(draft_node_list):
# only export the roots of the draft subtrees
# since export_from_xml (called by `add_xml_to_node`)
# exports a whole tree
# ensure module has "xml_attributes" attr
if not hasattr(draft_node.module, 'xml_attributes'):
draft_node.module.xml_attributes = {}
# Don't try to export orphaned items
# and their descendents
if draft_node.parent_location is None:
continue
logging.debug('parent_loc = %s', draft_node.parent_location)
draft_node.module.xml_attributes['parent_url'] = draft_node.parent_url
parent = modulestore.get_item(draft_node.parent_location)
# Don't try to export orphaned items
if draft_node.module.location not in parent.children:
continue
index = parent.children.index(draft_node.module.location)
draft_node.module.xml_attributes['index_in_children_list'] = str(index)
draft_node.module.runtime.export_fs = draft_course_dir
adapt_references(draft_node.module, xml_centric_course_key, draft_course_dir)
node = lxml.etree.Element('unknown')
draft_node.module.add_xml_to_node(node)
class ExportManager(object):
"""
Manages XML exporting for courselike objects.
"""
def __init__(self, modulestore, contentstore, courselike_key, root_dir, target_dir):
"""
Export all modules from `modulestore` and content from `contentstore` as xml to `root_dir`.
`modulestore`: A `ModuleStore` object that is the source of the modules to export
`contentstore`: A `ContentStore` object that is the source of the content to export, can be None
`courselike_key`: The Locator of the Descriptor to export
`root_dir`: The directory to write the exported xml to
`target_dir`: The name of the directory inside `root_dir` to write the content to
"""
self.modulestore = modulestore
self.contentstore = contentstore
self.courselike_key = courselike_key
self.root_dir = root_dir
self.target_dir = text_type(target_dir)
@abstractmethod
def get_key(self):
"""
Get the courselike locator key
"""
raise NotImplementedError
def process_root(self, root, export_fs):
"""
Perform any additional tasks to the root XML node.
"""
def process_extra(self, root, courselike, root_courselike_dir, xml_centric_courselike_key, export_fs):
"""
Process additional content, like static assets.
"""
def post_process(self, root, export_fs):
"""
Perform any final processing after the other export tasks are done.
"""
@abstractmethod
def get_courselike(self):
"""
Get the target courselike object for this export.
"""
def export(self):
"""
Perform the export given the parameters handed to this class at init.
"""
with self.modulestore.bulk_operations(self.courselike_key):
fsm = OSFS(self.root_dir)
root = lxml.etree.Element('unknown')
# export only the published content
with self.modulestore.branch_setting(ModuleStoreEnum.Branch.published_only, self.courselike_key):
courselike = self.get_courselike()
export_fs = courselike.runtime.export_fs = fsm.makedir(self.target_dir, recreate=True)
# change all of the references inside the course to use the xml expected key type w/o version & branch
xml_centric_courselike_key = self.get_key()
adapt_references(courselike, xml_centric_courselike_key, export_fs)
root.set('url_name', self.courselike_key.run)
courselike.add_xml_to_node(root)
# Make any needed adjustments to the root node.
self.process_root(root, export_fs)
# Process extra items-- drafts, assets, etc
root_courselike_dir = self.root_dir + '/' + self.target_dir
self.process_extra(root, courselike, root_courselike_dir, xml_centric_courselike_key, export_fs)
# Any last pass adjustments
self.post_process(root, export_fs)
class CourseExportManager(ExportManager):
"""
Export manager for courses.
"""
def get_key(self):
return CourseLocator(
self.courselike_key.org, self.courselike_key.course, self.courselike_key.run, deprecated=True
)
def get_courselike(self):
# depth = None: Traverses down the entire course structure.
# lazy = False: Loads and caches all block definitions during traversal for fast access later
# -and- to eliminate many round-trips to read individual definitions.
# Why these parameters? Because a course export needs to access all the course block information
# eventually. Accessing it all now at the beginning increases performance of the export.
return self.modulestore.get_course(self.courselike_key, depth=None, lazy=False)
def process_root(self, root, export_fs):
with export_fs.open(u'course.xml', 'wb') as course_xml:
lxml.etree.ElementTree(root).write(course_xml, encoding='utf-8')
def process_extra(self, root, courselike, root_courselike_dir, xml_centric_courselike_key, export_fs):
# Export the modulestore's asset metadata.
asset_dir = root_courselike_dir + '/' + AssetMetadata.EXPORTED_ASSET_DIR + '/'
if not os.path.isdir(asset_dir):
os.makedirs(asset_dir)
asset_root = lxml.etree.Element(AssetMetadata.ALL_ASSETS_XML_TAG)
course_assets = self.modulestore.get_all_asset_metadata(self.courselike_key, None)
for asset_md in course_assets:
# All asset types are exported using the "asset" tag - but their asset type is specified in each asset key.
asset = lxml.etree.SubElement(asset_root, AssetMetadata.ASSET_XML_TAG)
asset_md.to_xml(asset)
with OSFS(asset_dir).open(AssetMetadata.EXPORTED_ASSET_FILENAME, 'wb') as asset_xml_file:
lxml.etree.ElementTree(asset_root).write(asset_xml_file, encoding='utf-8')
# export the static assets
policies_dir = export_fs.makedir('policies', recreate=True)
if self.contentstore:
self.contentstore.export_all_for_course(
self.courselike_key,
root_courselike_dir + '/static/',
root_courselike_dir + '/policies/assets.json',
)
# If we are using the default course image, export it to the
# legacy location to support backwards compatibility.
if courselike.course_image == courselike.fields['course_image'].default:
try:
course_image = self.contentstore.find(
StaticContent.compute_location(
courselike.id,
courselike.course_image
),
)
except NotFoundError:
pass
else:
output_dir = root_courselike_dir + '/static/images/'
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
with OSFS(output_dir).open(u'course_image.jpg', 'wb') as course_image_file:
course_image_file.write(course_image.data)
# export the static tabs
export_extra_content(
export_fs, self.modulestore, self.courselike_key, xml_centric_courselike_key,
'static_tab', 'tabs', '.html'
)
# export the custom tags
export_extra_content(
export_fs, self.modulestore, self.courselike_key, xml_centric_courselike_key,
'custom_tag_template', 'custom_tags'
)
# export the course updates
export_extra_content(
export_fs, self.modulestore, self.courselike_key, xml_centric_courselike_key,
'course_info', 'info', '.html'
)
# export the 'about' data (e.g. overview, etc.)
export_extra_content(
export_fs, self.modulestore, self.courselike_key, xml_centric_courselike_key,
'about', 'about', '.html'
)
course_policy_dir_name = courselike.location.run
course_run_policy_dir = policies_dir.makedir(course_policy_dir_name, recreate=True)
# export the grading policy
with course_run_policy_dir.open(u'grading_policy.json', 'wb') as grading_policy:
grading_policy.write(dumps(courselike.grading_policy, cls=EdxJSONEncoder,
sort_keys=True, indent=4).encode('utf-8'))
# export all of the course metadata in policy.json
with course_run_policy_dir.open(u'policy.json', 'wb') as course_policy:
policy = {'course/' + courselike.location.run: own_metadata(courselike)}
course_policy.write(dumps(policy, cls=EdxJSONEncoder, sort_keys=True, indent=4).encode('utf-8'))
_export_drafts(self.modulestore, self.courselike_key, export_fs, xml_centric_courselike_key)
class LibraryExportManager(ExportManager):
"""
Export manager for Libraries
"""
def get_key(self):
"""
Get the library locator for the current library key.
"""
return LibraryLocator(
self.courselike_key.org, self.courselike_key.library
)
def get_courselike(self):
"""
Get the library from the modulestore.
"""
return self.modulestore.get_library(self.courselike_key, depth=None, lazy=False)
def process_root(self, root, export_fs):
"""
Add extra attributes to the root XML file.
"""
root.set('org', self.courselike_key.org)
root.set('library', self.courselike_key.library)
def process_extra(self, root, courselike, root_courselike_dir, xml_centric_courselike_key, export_fs):
"""
Notionally, libraries may have assets. This is currently unsupported, but the structure is here
to ease in duck typing during import. This may be expanded as a useful feature eventually.
"""
# export the static assets
export_fs.makedir('policies', recreate=True)
if self.contentstore:
self.contentstore.export_all_for_course(
self.courselike_key,
self.root_dir + '/' + self.target_dir + '/static/',
self.root_dir + '/' + self.target_dir + '/policies/assets.json',
)
def post_process(self, root, export_fs):
"""
Because Libraries are XBlocks, they aren't exported in the same way Course Modules
are, but instead use the standard XBlock serializers. Accordingly, we need to
create our own index file to act as the equivalent to the root course.xml file,
called library.xml.
"""
# Create the Library.xml file, which acts as the index of all library contents.
xml_file = export_fs.open(LIBRARY_ROOT, 'wb')
xml_file.write(lxml.etree.tostring(root, pretty_print=True, encoding='utf-8'))
xml_file.close()
def export_course_to_xml(modulestore, contentstore, course_key, root_dir, course_dir):
"""
Thin wrapper for the Course Export Manager. See ExportManager for details.
"""
CourseExportManager(modulestore, contentstore, course_key, root_dir, course_dir).export()
def export_library_to_xml(modulestore, contentstore, library_key, root_dir, library_dir):
"""
Thin wrapper for the Library Export Manager. See ExportManager for details.
"""
LibraryExportManager(modulestore, contentstore, library_key, root_dir, library_dir).export()
def adapt_references(subtree, destination_course_key, export_fs):
"""
Map every reference in the subtree into destination_course_key and set it back into the xblock fields
"""
subtree.runtime.export_fs = export_fs # ensure everything knows where it's going!
for field_name, field in subtree.fields.iteritems():
if field.is_set_on(subtree):
if isinstance(field, Reference):
value = field.read_from(subtree)
if value is not None:
field.write_to(subtree, field.read_from(subtree).map_into_course(destination_course_key))
elif field_name == 'children':
# don't change the children field but do recurse over the children
[adapt_references(child, destination_course_key, export_fs) for child in subtree.get_children()]
elif isinstance(field, ReferenceList):
field.write_to(
subtree,
[ele.map_into_course(destination_course_key) for ele in field.read_from(subtree)]
)
elif isinstance(field, ReferenceValueDict):
field.write_to(
subtree, {
key: ele.map_into_course(destination_course_key) for key, ele in field.read_from(subtree).iteritems()
}
)
def _export_field_content(xblock_item, item_dir):
"""
Export all fields related to 'xblock_item' other than 'metadata' and 'data' to json file in provided directory
"""
module_data = xblock_item.get_explicitly_set_fields_by_scope(Scope.content)
if isinstance(module_data, dict):
for field_name in module_data:
if field_name not in DEFAULT_CONTENT_FIELDS:
# filename format: {dirname}.{field_name}.json
with item_dir.open(u'{0}.{1}.{2}'.format(xblock_item.location.block_id, field_name, 'json'),
'wb') as field_content_file:
field_content_file.write(dumps(module_data.get(field_name, {}), cls=EdxJSONEncoder,
sort_keys=True, indent=4).encode('utf-8'))
def export_extra_content(export_fs, modulestore, source_course_key, dest_course_key, category_type, dirname, file_suffix=''):
items = modulestore.get_items(source_course_key, qualifiers={'category': category_type})
if len(items) > 0:
item_dir = export_fs.makedir(dirname, recreate=True)
for item in items:
adapt_references(item, dest_course_key, export_fs)
with item_dir.open(item.location.block_id + file_suffix, 'wb') as item_file:
item_file.write(item.data.encode('utf8'))
# export content fields other then metadata and data in json format in current directory
_export_field_content(item, item_dir)
| ahmedaljazzar/edx-platform | common/lib/xmodule/xmodule/modulestore/xml_exporter.py | Python | agpl-3.0 | 17,842 |
"""
MolML
=====
An interface between molecules and machine learning
MolML is a python module to use to map molecules into representations that
are usable with machine learning. This is done using an API similar to
scikit-learn to keep things simple and straightforward. For documentation,
look at the docstrings.
"""
__version__ = "0.9.0"
| crcollins/molml | molml/__init__.py | Python | mit | 340 |
#! /usr/bin/env python
###############################################################################
#
# simulavr - A simulator for the Atmel AVR family of microcontrollers.
# Copyright (C) 2001, 2002 Theodore A. Roth
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
###############################################################################
#
# $Id: test_LD_X.py,v 1.1 2004/07/31 00:59:11 rivetwa Exp $
#
"""Test the LD_X opcode.
"""
import base_test
from registers import Reg, SREG
class LD_X_TestFail(base_test.TestFail): pass
class base_LD_X(base_test.opcode_test):
"""Generic test case for testing LD_X opcode.
LD_X - Load Indirect from data space to Register using index X
Operation: Rd <- (X)
opcode is '1001 000d dddd 1100' where 0 <= d <= 31
Only registers PC and Rd should be changed.
"""
def setup(self):
# Set the register values
self.setup_regs[self.Rd] = 0
self.setup_regs[Reg.R26] = (self.X & 0xff)
self.setup_regs[Reg.R27] = (self.X >> 8)
# set up the val in memory
self.mem_byte_write( self.X, self.Vd )
# Return the raw opcode
return 0x900C | (self.Rd << 4)
def analyze_results(self):
self.reg_changed.extend( [self.Rd] )
# check that result is correct
expect = self.Vd
got = self.anal_regs[self.Rd]
if expect != got:
self.fail('LD_X: expect=%02x, got=%02x' % (expect, got))
#
# Template code for test case.
# The fail method will raise a test specific exception.
#
template = """
class LD_X_r%02d_X%04x_v%02x_TestFail(LD_X_TestFail): pass
class test_LD_X_r%02d_X%04x_v%02x(base_LD_X):
Rd = %d
X = 0x%x
Vd = 0x%x
def fail(self,s):
raise LD_X_r%02d_X%04x_v%02x_TestFail, s
"""
#
# automagically generate the test_LD_X_rNN_vXX class definitions.
#
code = ''
for d in range(0,32):
for x in (0x10f, 0x1ff):
for v in (0xaa, 0x55):
args = (d,x,v)*4
code += template % args
exec code
| zouppen/simulavr | regress/test_opcodes/test_LD_X.py | Python | gpl-2.0 | 2,530 |
import socket
from src.nlp import NLP
from src.channel import Channel
class PyIRC:
def __init__(self, hostname, port, channel, nick):
self.hostname = hostname
self.port = port
self.channel = channel
self.nick = nick
self.nlp = NLP()
"""
Sends a message.
"""
def send(self, message):
print("SEND: %s" % message)
self.ircsock.send(message.encode())
"""
Sends a private message.
"""
def privmsg(self, channel, message):
self.send("PRIVMSG %s :%s\n" % (channel, message))
"""
Returns the next available message on the socket.
"""
def get_message(self):
message = self.ircsock.recv(2048).decode()
message = message.strip('\n\r')
print("RECV: %s" % message)
return message
"""
Change the bot's nickname
"""
def change_nick(self, nick):
self.send("USER %s 8 * :Skylar\'s Bot\n" % nick)
self.send("NICK %s\n" % nick)
# Make sure this is okay.
while 1:
message = self.get_message()
if message.find('004') != -1:
break
"""
Join a channel
"""
def join(self, channel):
self.send("JOIN " + channel + "\n")
"""
Run the bot
"""
def run(self):
self.ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.ircsock.connect((self.hostname, self.port))
self.change_nick(self.nick)
self.join(self.channel)
while 1:
message = self.get_message()
if message.find("PING :") != -1:
self.send("PONG :Pong\n")
continue
if message.find(' PRIVMSG ') !=-1:
nick = message.split('!')[0][1:]
person = Channel(self, nick)
channel = message.split(' PRIVMSG ')[-1].split(' :')[0]
channel = Channel(self, channel)
message = message.split(" :", 1)[1]
message = message.lower()
botname = self.nick.lower()
if not self.nlp.is_subject(botname, message):
print("DET: Not the subject.")
continue
# Extract name.
if message.startswith(botname):
message = message.lstrip(botname)
elif message.endswith(botname):
message = message.rstrip(botname)
if nick == "sky" and self.nlp.match_any_ends(message, "shutdown"):
break
(module, arguments) = self.nlp.parse(message.strip(" "))
module.recv(channel, person, arguments)
| SkylarKelty/pyirc | src/bot.py | Python | mit | 2,178 |
"""
Helper functions and classes for discussion tests.
"""
from uuid import uuid4
import json
from ...fixtures import LMS_BASE_URL
from ...fixtures.course import CourseFixture
from ...fixtures.discussion import (
SingleThreadViewFixture,
Thread,
Response,
)
from ...pages.lms.discussion import DiscussionTabSingleThreadPage
from ...tests.helpers import UniqueCourseTest
class BaseDiscussionMixin(object):
"""
A mixin containing methods common to discussion tests.
"""
def setup_thread(self, num_responses, **thread_kwargs):
"""
Create a test thread with the given number of responses, passing all
keyword arguments through to the Thread fixture, then invoke
setup_thread_page.
"""
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, **thread_kwargs)
)
for i in range(num_responses):
thread_fixture.addResponse(Response(id=str(i), body=str(i)))
thread_fixture.push()
self.setup_thread_page(thread_id)
return thread_id
class CohortTestMixin(object):
"""
Mixin for tests of cohorted courses
"""
def setup_cohort_config(self, course_fixture, auto_cohort_groups=None):
"""
Sets up the course to use cohorting with the given list of auto_cohort_groups.
If auto_cohort_groups is None, no auto cohorts are set.
"""
course_fixture._update_xblock(course_fixture._course_location, {
"metadata": {
u"cohort_config": {
"auto_cohort_groups": auto_cohort_groups or [],
"cohorted_discussions": [],
"cohorted": True,
},
},
})
def disable_cohorting(self, course_fixture):
"""
Disables cohorting for the current course fixture.
"""
course_fixture._update_xblock(course_fixture._course_location, {
"metadata": {
u"cohort_config": {
"cohorted": False
},
},
})
def add_manual_cohort(self, course_fixture, cohort_name):
"""
Adds a cohort by name, returning its ID.
"""
url = LMS_BASE_URL + "/courses/" + course_fixture._course_key + '/cohorts/'
data = json.dumps({"name": cohort_name})
response = course_fixture.session.post(url, data=data, headers=course_fixture.headers)
self.assertTrue(response.ok, "Failed to create cohort")
return response.json()['id']
def add_user_to_cohort(self, course_fixture, username, cohort_id):
"""
Adds a user to the specified cohort.
"""
url = LMS_BASE_URL + "/courses/" + course_fixture._course_key + "/cohorts/{}/add".format(cohort_id)
data = {"users": username}
response = course_fixture.session.post(url, data=data, headers=course_fixture.headers)
self.assertTrue(response.ok, "Failed to add user to cohort")
class BaseDiscussionTestCase(UniqueCourseTest):
def setUp(self):
super(BaseDiscussionTestCase, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fixture = CourseFixture(**self.course_info)
self.course_fixture.add_advanced_settings(
{'discussion_topics': {'value': {'Test Discussion Topic': {'id': self.discussion_id}}}}
)
self.course_fixture.install()
def create_single_thread_page(self, thread_id):
"""
Sets up a `DiscussionTabSingleThreadPage` for a given
`thread_id`.
"""
return DiscussionTabSingleThreadPage(self.browser, self.course_id, self.discussion_id, thread_id)
| sameetb-cuelogic/edx-platform-test | common/test/acceptance/tests/discussion/helpers.py | Python | agpl-3.0 | 3,825 |
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flask
import os
from werkzeug import exceptions
from scoreboard import attachments
from scoreboard import main
from scoreboard import models
app = main.get_app()
_VIEW_CACHE = {}
@app.errorhandler(404)
def handle_404(ex):
"""Handle 404s, sending index.html for unhandled paths."""
path = flask.request.path[1:]
try:
return app.send_static_file(path)
except (exceptions.NotFound, UnicodeEncodeError):
if '.' not in path and not path.startswith('api/'):
app.logger.info('%s -> index.html', path)
return render_index()
return '404 Not Found', 404
# Needed because emails with a "." in them prevent 404 handler from working
@app.route('/pwreset/<path:unused>')
def render_pwreset(unused):
return render_index()
@app.route('/')
@app.route('/index.html')
def render_index():
"""Render index.
Do not include any user-controlled content to avoid XSS!
"""
try:
tmpl = _VIEW_CACHE['index']
except KeyError:
minify = not app.debug and os.path.exists(
os.path.join(app.static_folder, 'js/app.min.js'))
tmpl = flask.render_template('index.html', minify=minify)
_VIEW_CACHE['index'] = tmpl
resp = flask.make_response(tmpl, 200)
if flask.request.path.startswith('/scoreboard'):
resp.headers.add('X-FRAME-OPTIONS', 'ALLOW')
return resp
@app.route('/attachment/<filename>')
def download(filename):
"""Download an attachment."""
attachment = models.Attachment.query.get_or_404(filename)
cuser = models.User.current()
valid = cuser and cuser.admin
for ch in attachment.challenges:
if ch.unlocked:
valid = True
break
if not valid:
flask.abort(404)
app.logger.info('Download of %s by %r.', attachment, cuser or "Anonymous")
return attachments.backend.send(attachment)
@app.route('/createdb')
def createdb():
"""Create database schema without CLI access.
Useful for AppEngine and other container environments.
Should be safe to be exposed, as operation is idempotent and does not
clear any data.
"""
try:
models.db.create_all()
return 'Tables created.'
except Exception as ex:
app.logger.exception('Failed creating tables: %s', str(ex))
return 'Failed creating tables: see log.'
| google/ctfscoreboard | scoreboard/views.py | Python | apache-2.0 | 2,965 |
#!/usr/bin/env python
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# Stop a running glideinFactory
#
# Arguments:
# $1 = glidein submit_dir (i.e. factory dir)
#
# Author:
# Igor Sfiligoi May 6th 2008
#
import signal
import sys
import os
import os.path
import fcntl
import string
import time
import subprocess
sys.path.append(os.path.join(sys.path[0],"../../"))
from glideinwms.factory import glideFactoryPidLib
from glideinwms.factory import glideFactoryConfig
def all_pids_in_pgid_dead(pgid):
# return 1 if there are no pids in the pgid still alive
devnull = os.open(os.devnull, os.O_RDWR)
return subprocess.call(["pgrep", "-g", "%s" % pgid],
stdout=devnull,
stderr=devnull)
def kill_and_check_pgid(pgid, signr=signal.SIGTERM,
retries=100, retry_interval=0.5):
# return 0 if all pids in pgid are dead
try:
os.killpg(pgid, signr)
except OSError:
pass
time.sleep(.2)
for retries in range(retries):
if not all_pids_in_pgid_dead(pgid):
try:
os.killpg(pgid, signr)
except OSError:
# already dead
pass
time.sleep(retry_interval)
else:
return 0
return 1
def main(startup_dir,force=True):
# get the pids
try:
factory_pid=glideFactoryPidLib.get_factory_pid(startup_dir)
except RuntimeError, e:
print e
return 1
#print factory_pid
factory_pgid = os.getpgid(factory_pid)
if not glideFactoryPidLib.pidSupport.check_pid(factory_pid):
# Factory already dead
return 0
# kill processes
# first soft kill the factoryprocess group (20s timeout)
if (kill_and_check_pgid(factory_pgid) == 0):
return 0
if not force:
print "Factory did not die within the timeout"
return 1
# retry soft kill the factory... should exit now (5s timeout)
if (kill_and_check_pgid(factory_pgid, retries=25) == 0):
return 0
print "Factory or children still alive... sending hard kill"
try:
os.killpg(factory_pgid, signal.SIGKILL)
except OSError:
# in case they died between the last check and now
pass
return 0
if __name__ == '__main__':
if len(sys.argv)<2:
print "Usage: stopFactory.py submit_dir"
sys.exit(1)
if len(sys.argv)>2:
if sys.argv[1]=='-force':
sys.exit(main(sys.argv[2],True))
else:
print "Usage: stopFactory.py submit_dir"
sys.exit(1)
else:
sys.exit(main(sys.argv[1]))
| holzman/glideinwms-old | factory/stopFactory.py | Python | bsd-3-clause | 2,678 |
# -*- coding: utf-8 -*-
"""
.. _tut-filter-resample:
=============================
Filtering and resampling data
=============================
This tutorial covers filtering and resampling, and gives examples of how
filtering can be used for artifact repair.
We begin as always by importing the necessary Python modules and loading some
:ref:`example data <sample-dataset>`. We'll also crop the data to 60 seconds
(to save memory on the documentation server):
"""
# %%
import os
import numpy as np
import matplotlib.pyplot as plt
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
# use just 60 seconds of data and mag channels, to save memory
raw.crop(0, 60).pick_types(meg='mag', stim=True).load_data()
# %%
# Background on filtering
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# A filter removes or attenuates parts of a signal. Usually, filters act on
# specific *frequency ranges* of a signal — for example, suppressing all
# frequency components above or below a certain cutoff value. There are *many*
# ways of designing digital filters; see :ref:`disc-filtering` for a longer
# discussion of the various approaches to filtering physiological signals in
# MNE-Python.
#
#
# Repairing artifacts by filtering
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Artifacts that are restricted to a narrow frequency range can sometimes
# be repaired by filtering the data. Two examples of frequency-restricted
# artifacts are slow drifts and power line noise. Here we illustrate how each
# of these can be repaired by filtering.
#
#
# Slow drifts
# ~~~~~~~~~~~
#
# Low-frequency drifts in raw data can usually be spotted by plotting a fairly
# long span of data with the :meth:`~mne.io.Raw.plot` method, though it is
# helpful to disable channel-wise DC shift correction to make slow drifts
# more readily visible. Here we plot 60 seconds, showing all the magnetometer
# channels:
raw.plot(duration=60, proj=False, n_channels=len(raw.ch_names),
remove_dc=False)
# %%
# A half-period of this slow drift appears to last around 10 seconds, so a full
# period would be 20 seconds, i.e., :math:`\frac{1}{20} \mathrm{Hz}`. To be
# sure those components are excluded, we want our highpass to be *higher* than
# that, so let's try :math:`\frac{1}{10} \mathrm{Hz}` and :math:`\frac{1}{5}
# \mathrm{Hz}` filters to see which works best:
for cutoff in (0.1, 0.2):
raw_highpass = raw.copy().filter(l_freq=cutoff, h_freq=None)
with mne.viz.use_browser_backend('matplotlib'):
fig = raw_highpass.plot(duration=60, proj=False,
n_channels=len(raw.ch_names), remove_dc=False)
fig.subplots_adjust(top=0.9)
fig.suptitle('High-pass filtered at {} Hz'.format(cutoff), size='xx-large',
weight='bold')
# %%
# Looks like 0.1 Hz was not quite high enough to fully remove the slow drifts.
# Notice that the text output summarizes the relevant characteristics of the
# filter that was created. If you want to visualize the filter, you can pass
# the same arguments used in the call to :meth:`raw.filter()
# <mne.io.Raw.filter>` above to the function :func:`mne.filter.create_filter`
# to get the filter parameters, and then pass the filter parameters to
# :func:`mne.viz.plot_filter`. :func:`~mne.filter.create_filter` also requires
# parameters ``data`` (a :class:`NumPy array <numpy.ndarray>`) and ``sfreq``
# (the sampling frequency of the data), so we'll extract those from our
# :class:`~mne.io.Raw` object:
filter_params = mne.filter.create_filter(raw.get_data(), raw.info['sfreq'],
l_freq=0.2, h_freq=None)
# %%
# Notice that the output is the same as when we applied this filter to the data
# using :meth:`raw.filter() <mne.io.Raw.filter>`. You can now pass the filter
# parameters (and the sampling frequency) to :func:`~mne.viz.plot_filter` to
# plot the filter:
mne.viz.plot_filter(filter_params, raw.info['sfreq'], flim=(0.01, 5))
# %%
# .. _tut-section-line-noise:
#
# Power line noise
# ~~~~~~~~~~~~~~~~
#
# Power line noise is an environmental artifact that manifests as persistent
# oscillations centered around the `AC power line frequency`_. Power line
# artifacts are easiest to see on plots of the spectrum, so we'll use
# :meth:`~mne.io.Raw.plot_psd` to illustrate. We'll also write a little
# function that adds arrows to the spectrum plot to highlight the artifacts:
def add_arrows(axes):
# add some arrows at 60 Hz and its harmonics
for ax in axes:
freqs = ax.lines[-1].get_xdata()
psds = ax.lines[-1].get_ydata()
for freq in (60, 120, 180, 240):
idx = np.searchsorted(freqs, freq)
# get ymax of a small region around the freq. of interest
y = psds[(idx - 4):(idx + 5)].max()
ax.arrow(x=freqs[idx], y=y + 18, dx=0, dy=-12, color='red',
width=0.1, head_width=3, length_includes_head=True)
fig = raw.plot_psd(fmax=250, average=True)
add_arrows(fig.axes[:2])
# %%
# It should be evident that MEG channels are more susceptible to this kind of
# interference than EEG that is recorded in the magnetically shielded room.
# Removing power-line noise can be done with a notch filter,
# applied directly to the :class:`~mne.io.Raw` object, specifying an array of
# frequencies to be attenuated. Since the EEG channels are relatively
# unaffected by the power line noise, we'll also specify a ``picks`` argument
# so that only the magnetometers and gradiometers get filtered:
meg_picks = mne.pick_types(raw.info, meg=True)
freqs = (60, 120, 180, 240)
raw_notch = raw.copy().notch_filter(freqs=freqs, picks=meg_picks)
for title, data in zip(['Un', 'Notch '], [raw, raw_notch]):
fig = data.plot_psd(fmax=250, average=True)
fig.subplots_adjust(top=0.85)
fig.suptitle('{}filtered'.format(title), size='xx-large', weight='bold')
add_arrows(fig.axes[:2])
# %%
# :meth:`~mne.io.Raw.notch_filter` also has parameters to control the notch
# width, transition bandwidth and other aspects of the filter. See the
# docstring for details.
#
# It's also possible to try to use a spectrum fitting routine to notch filter.
# In principle it can automatically detect the frequencies to notch, but our
# implementation generally does not do so reliably, so we specify the
# frequencies to remove instead, and it does a good job of removing the
# line noise at those frequencies:
raw_notch_fit = raw.copy().notch_filter(
freqs=freqs, picks=meg_picks, method='spectrum_fit', filter_length='10s')
for title, data in zip(['Un', 'spectrum_fit '], [raw, raw_notch_fit]):
fig = data.plot_psd(fmax=250, average=True)
fig.subplots_adjust(top=0.85)
fig.suptitle('{}filtered'.format(title), size='xx-large', weight='bold')
add_arrows(fig.axes[:2])
# %%
# Resampling
# ^^^^^^^^^^
#
# EEG and MEG recordings are notable for their high temporal precision, and are
# often recorded with sampling rates around 1000 Hz or higher. This is good
# when precise timing of events is important to the experimental design or
# analysis plan, but also consumes more memory and computational resources when
# processing the data. In cases where high-frequency components of the signal
# are not of interest and precise timing is not needed (e.g., computing EOG or
# ECG projectors on a long recording), downsampling the signal can be a useful
# time-saver.
#
# In MNE-Python, the resampling methods (:meth:`raw.resample()
# <mne.io.Raw.resample>`, :meth:`epochs.resample() <mne.Epochs.resample>` and
# :meth:`evoked.resample() <mne.Evoked.resample>`) apply a low-pass filter to
# the signal to avoid `aliasing`_, so you don't need to explicitly filter it
# yourself first. This built-in filtering that happens when using
# :meth:`raw.resample() <mne.io.Raw.resample>`, :meth:`epochs.resample()
# <mne.Epochs.resample>`, or :meth:`evoked.resample() <mne.Evoked.resample>` is
# a brick-wall filter applied in the frequency domain at the `Nyquist
# frequency`_ of the desired new sampling rate. This can be clearly seen in the
# PSD plot, where a dashed vertical line indicates the filter cutoff; the
# original data had an existing lowpass at around 172 Hz (see
# ``raw.info['lowpass']``), and the data resampled from 600 Hz to 200 Hz gets
# automatically lowpass filtered at 100 Hz (the `Nyquist frequency`_ for a
# target rate of 200 Hz):
raw_downsampled = raw.copy().resample(sfreq=200)
for data, title in zip([raw, raw_downsampled], ['Original', 'Downsampled']):
fig = data.plot_psd(average=True)
fig.subplots_adjust(top=0.9)
fig.suptitle(title)
plt.setp(fig.axes, xlim=(0, 300))
# %%
# Because resampling involves filtering, there are some pitfalls to resampling
# at different points in the analysis stream:
#
# - Performing resampling on :class:`~mne.io.Raw` data (*before* epoching) will
# negatively affect the temporal precision of Event arrays, by causing
# `jitter`_ in the event timing. This reduced temporal precision will
# propagate to subsequent epoching operations.
#
# - Performing resampling *after* epoching can introduce edge artifacts *on
# every epoch*, whereas filtering the :class:`~mne.io.Raw` object will only
# introduce artifacts at the start and end of the recording (which is often
# far enough from the first and last epochs to have no affect on the
# analysis).
#
# The following section suggests best practices to mitigate both of these
# issues.
#
#
# Best practices
# ~~~~~~~~~~~~~~
#
# To avoid the reduction in temporal precision of events that comes with
# resampling a :class:`~mne.io.Raw` object, and also avoid the edge artifacts
# that come with filtering an :class:`~mne.Epochs` or :class:`~mne.Evoked`
# object, the best practice is to:
#
# 1. low-pass filter the :class:`~mne.io.Raw` data at or below
# :math:`\frac{1}{3}` of the desired sample rate, then
#
# 2. decimate the data after epoching, by either passing the ``decim``
# parameter to the :class:`~mne.Epochs` constructor, or using the
# :meth:`~mne.Epochs.decimate` method after the :class:`~mne.Epochs` have
# been created.
#
# .. warning::
# The recommendation for setting the low-pass corner frequency at
# :math:`\frac{1}{3}` of the desired sample rate is a fairly safe rule of
# thumb based on the default settings in :meth:`raw.filter()
# <mne.io.Raw.filter>` (which are different from the filter settings used
# inside the :meth:`raw.resample() <mne.io.Raw.resample>` method). If you
# use a customized lowpass filter (specifically, if your transition
# bandwidth is wider than 0.5× the lowpass cutoff), downsampling to 3× the
# lowpass cutoff may still not be enough to avoid `aliasing`_, and
# MNE-Python will not warn you about it (because the :class:`raw.info
# <mne.Info>` object only keeps track of the lowpass cutoff, not the
# transition bandwidth). Conversely, if you use a steeper filter, the
# warning may be too sensitive. If you are unsure, plot the PSD of your
# filtered data *before decimating* and ensure that there is no content in
# the frequencies above the `Nyquist frequency`_ of the sample rate you'll
# end up with *after* decimation.
#
# Note that this method of manually filtering and decimating is exact only when
# the original sampling frequency is an integer multiple of the desired new
# sampling frequency. Since the sampling frequency of our example data is
# 600.614990234375 Hz, ending up with a specific sampling frequency like (say)
# 90 Hz will not be possible:
current_sfreq = raw.info['sfreq']
desired_sfreq = 90 # Hz
decim = np.round(current_sfreq / desired_sfreq).astype(int)
obtained_sfreq = current_sfreq / decim
lowpass_freq = obtained_sfreq / 3.
raw_filtered = raw.copy().filter(l_freq=None, h_freq=lowpass_freq)
events = mne.find_events(raw_filtered)
epochs = mne.Epochs(raw_filtered, events, decim=decim)
print('desired sampling frequency was {} Hz; decim factor of {} yielded an '
'actual sampling frequency of {} Hz.'
.format(desired_sfreq, decim, epochs.info['sfreq']))
# %%
# If for some reason you cannot follow the above-recommended best practices,
# you should at the very least either:
#
# 1. resample the data *after* epoching, and make your epochs long enough that
# edge effects from the filtering do not affect the temporal span of the
# epoch that you hope to analyze / interpret; or
#
# 2. perform resampling on the :class:`~mne.io.Raw` object and its
# corresponding Events array *simultaneously* so that they stay more or less
# in synch. This can be done by passing the Events array as the
# ``events`` parameter to :meth:`raw.resample() <mne.io.Raw.resample>`.
#
#
# .. LINKS
#
# .. _`AC power line frequency`:
# https://en.wikipedia.org/wiki/Mains_electricity
# .. _`aliasing`: https://en.wikipedia.org/wiki/Anti-aliasing_filter
# .. _`jitter`: https://en.wikipedia.org/wiki/Jitter
# .. _`Nyquist frequency`: https://en.wikipedia.org/wiki/Nyquist_frequency
| mne-tools/mne-python | tutorials/preprocessing/30_filtering_resampling.py | Python | bsd-3-clause | 13,159 |
sorteados = []
sorteados = [73,84,49,97,24]
print(sorteados[1])
| ronas/PythonGNF | Fabulao/Array.py | Python | gpl-3.0 | 67 |
from chatterbot.logic import LogicAdapter
from chatterbot.conversation import Statement
from chatterbot import languages
from chatterbot import parsing
from mathparse import mathparse
import re
class UnitConversion(LogicAdapter):
"""
The UnitConversion logic adapter parse inputs to convert values
between several metric units.
For example:
User: 'How many meters are in one kilometer?'
Bot: '1000.0'
:kwargs:
* *language* (``object``) --
The language is set to ``chatterbot.languages.ENG`` for English by default.
"""
def __init__(self, chatbot, **kwargs):
super().__init__(chatbot, **kwargs)
from pint import UnitRegistry
self.language = kwargs.get('language', languages.ENG)
self.cache = {}
self.patterns = [
(
re.compile(r'''
(([Hh]ow\s+many)\s+
(?P<target>\S+)\s+ # meter, celsius, hours
((are)*\s*in)\s+
(?P<number>([+-]?\d+(?:\.\d+)?)|(a|an)|(%s[-\s]?)+)\s+
(?P<from>\S+)\s*) # meter, celsius, hours
''' % (parsing.numbers),
(re.VERBOSE | re.IGNORECASE)
),
lambda m: self.handle_matches(m)
),
(
re.compile(r'''
((?P<number>([+-]?\d+(?:\.\d+)?)|(%s[-\s]?)+)\s+
(?P<from>\S+)\s+ # meter, celsius, hours
(to)\s+
(?P<target>\S+)\s*) # meter, celsius, hours
''' % (parsing.numbers),
(re.VERBOSE | re.IGNORECASE)
),
lambda m: self.handle_matches(m)
),
(
re.compile(r'''
((?P<number>([+-]?\d+(?:\.\d+)?)|(a|an)|(%s[-\s]?)+)\s+
(?P<from>\S+)\s+ # meter, celsius, hours
(is|are)\s+
(how\s+many)*\s+
(?P<target>\S+)\s*) # meter, celsius, hours
''' % (parsing.numbers),
(re.VERBOSE | re.IGNORECASE)
),
lambda m: self.handle_matches(m)
)
]
self.unit_registry = UnitRegistry()
def get_unit(self, unit_variations):
"""
Get the first match unit metric object supported by pint library
given a variation of unit metric names (Ex:['HOUR', 'hour']).
:param unit_variations: A list of strings with names of units
:type unit_variations: str
"""
for unit in unit_variations:
try:
return getattr(self.unit_registry, unit)
except Exception:
continue
return None
def get_valid_units(self, from_unit, target_unit):
"""
Returns the firt match `pint.unit.Unit` object for from_unit and
target_unit strings from a possible variation of metric unit names
supported by pint library.
:param from_unit: source metric unit
:type from_unit: str
:param from_unit: target metric unit
:type from_unit: str
"""
from_unit_variations = [from_unit.lower(), from_unit.upper()]
target_unit_variations = [target_unit.lower(), target_unit.upper()]
from_unit = self.get_unit(from_unit_variations)
target_unit = self.get_unit(target_unit_variations)
return from_unit, target_unit
def handle_matches(self, match):
"""
Returns a response statement from a matched input statement.
:param match: It is a valid matched pattern from the input statement
:type: `_sre.SRE_Match`
"""
response = Statement(text='')
from_parsed = match.group("from")
target_parsed = match.group("target")
n_statement = match.group("number")
if n_statement == 'a' or n_statement == 'an':
n_statement = '1.0'
n = mathparse.parse(n_statement, self.language.ISO_639.upper())
from_parsed, target_parsed = self.get_valid_units(from_parsed, target_parsed)
if from_parsed is None or target_parsed is None:
response.confidence = 0.0
else:
from_value = self.unit_registry.Quantity(float(n), from_parsed)
target_value = from_value.to(target_parsed)
response.confidence = 1.0
response.text = str(target_value.magnitude)
return response
def can_process(self, statement):
response = self.process(statement)
self.cache[statement.text] = response
return response.confidence == 1.0
def process(self, statement, additional_response_selection_parameters=None):
response = Statement(text='')
input_text = statement.text
try:
# Use the result cached by the process method if it exists
if input_text in self.cache:
response = self.cache[input_text]
self.cache = {}
return response
for pattern, func in self.patterns:
p = pattern.match(input_text)
if p is not None:
response = func(p)
if response.confidence == 1.0:
break
except Exception:
response.confidence = 0.0
finally:
return response
| vkosuri/ChatterBot | chatterbot/logic/unit_conversion.py | Python | bsd-3-clause | 5,449 |
# -*- coding: utf-8 -*-
# Copyright 2015 Antonio Espinosa <antonio.espinosa@tecnativa.com>
# Copyright 2015 Jairo Llopis <jairo.llopis@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import fields, models
class ResPartnerTurnoverRange(models.Model):
_name = 'res.partner.turnover_range'
_description = "Turnover range"
name = fields.Char(required=True, translate=True)
| sergiocorato/partner-contact | partner_capital/models/res_partner_turnover_range.py | Python | agpl-3.0 | 431 |
try:
from shutil import which # Python >= 3.3
except ImportError:
import os, sys
# This is copied from Python 3.4.1
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
class PtyProcessError(Exception):
"""Generic error class for this package."""
| endlessm/chromium-browser | third_party/llvm/lldb/third_party/Python/module/ptyprocess-0.6.0/ptyprocess/util.py | Python | bsd-3-clause | 2,785 |
from rabbitcredentials_SECRET import PREFIX, RABBIT_EXCHANGE
import esgfpid
import time
import datetime
today = datetime.datetime.now().strftime('%Y-%m-%d')
filename_for_logging_handles = 'handles_created_during_tests_%s.txt' % today
def init_connector(list_of_nodes, exch=RABBIT_EXCHANGE):
print('Init connector (%s)' % list_of_nodes)
connector = esgfpid.Connector(
handle_prefix=PREFIX,
messaging_service_credentials=list_of_nodes,
messaging_service_exchange_name=exch,
data_node='data.dkrz.foo.de',
test_publication=True
)
connector.start_messaging_thread()
return connector
def data_cart(num, connector):
foo = 'foo'+str(num)
pid = connector.create_data_cart_pid({foo:'hdl:123/345','bar':None,'baz':'hdl:123/678'})
return pid
#print('Data cart %i: hdl.handle.net/%s?noredirect' % (num, pid))
def send_one_test_message(num, connector):
return data_cart(num, connector)
def send_messages(n, connector, wait=0):
with open('filename_for_logging_handles', 'a') as text_file:
print('Sending %i messages.' % n)
for i in range(n):
pid = send_one_test_message(i, connector)
text_file.write(';%s' % pid)
time.sleep(wait)
def gentle_close(connector):
print('Stopping thread, waiting for pending messages...')
connector.finish_messaging_thread()
def force_close(connector):
print('Stopping thread, NOT waiting for pending messages...')
connector.force_finish_messaging_thread() | IS-ENES-Data/esgf-pid | tests/integration_tests/helpers_esgfpid.py | Python | apache-2.0 | 1,530 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
PORT = 'port'
ROUTER = 'router'
ROUTER_GATEWAY = 'router_gateway'
ROUTER_INTERFACE = 'router_interface'
VALID = (
PORT,
ROUTER,
ROUTER_GATEWAY,
ROUTER_INTERFACE,
)
| Stavitsky/neutron | neutron/callbacks/resources.py | Python | apache-2.0 | 754 |
from lampost.di.resource import Injected, module_inject
from lampost.event.zone import Attachable
from lampost.gameops.action import obj_action, ActionProvider
from lampost.gameops.target import TargetKeys
from lampost.server.channel import Channel
from lampmud.model.item import ItemAspect
from lampmud.mud.action import mud_action
ev = Injected('dispatcher')
message_service = Injected('message_service')
module_inject(__name__)
class Group(ActionProvider, Attachable):
target_keys = TargetKeys('group')
def __init__(self, leader):
leader.group = self
self.leader = leader
self.members = []
self.invites = set()
self.instance = None
self.channel = Channel('gchat', 'next', aliases=('g', 'gc', 'gt', 'gtell', 'gsay', 'gs'), tag='[g] ')
ev.register('player_connect', self._player_connect)
def join(self, member):
if not self.members:
self._add_member(self.leader)
self.msg("{} has joined the group".format(member.name))
self._add_member(member)
self.invites.remove(member)
def _add_member(self, member):
member.group = self
self.channel.add_sub(member)
self.members.append(member)
member.enhance_soul(self)
def decline(self, member):
self.leader.display_line("{} has declined your group invitation.".format(member.name))
self.invites.remove(member)
self._check_empty()
@obj_action()
def leave(self, source):
self._remove_member(source)
if len(self.members) > 1 and source == self.leader:
self.leader = self.members[0]
self.msg("{} is now the leader of the group.".format(self.leader.name))
else:
self._check_empty()
def _remove_member(self, member):
self.msg("{} has left the group.".format(member.name))
member.group = None
member.diminish_soul(self)
self.channel.remove_sub(member)
self.members.remove(member)
def msg(self, msg):
self.channel.send_msg(msg)
def _check_empty(self):
if self.invites:
return
if len(self.members) == 1:
self._remove_member(self.members[0])
self.channel.disband()
self.detach()
def _player_connect(self, player, *_):
if player in self.members:
self.msg("{} has reconnected.".format(player.name))
def detach_shared(self, member):
self.leave(member)
class Invitation(ItemAspect):
title = "A group invitation"
target_keys = TargetKeys(title)
resolved = False
def __init__(self, group, invitee):
self.attach()
self.group = group
self.invitee = invitee
ev.register_once(self.decline, seconds=60)
def short_desc(self, *_):
return self.title
def long_desc(self, *_):
return "An invitation to {}'s group.".format(self.group.leader.name)
@obj_action(target_class="action_owner action_default")
def accept(self):
self.invitee.display_line("You have joined {}'s group.".format(self.group.leader.name))
self.group.join(self.invitee)
self.resolved = True
self.detach()
@obj_action(target_class="action_owner action_default")
def decline(self):
self.detach()
def _on_detach(self):
if not self.resolved:
self.invitee.display_line("You decline {}'s invitation.".format(self.group.leader.name))
self.group.decline(self.invitee)
self.resolved = True
self.invitee.remove_inven(self)
@mud_action(('group', 'invite'), target_class='player_env player_online')
def invite(source, target):
if target == source:
return "Not really necessary. You're pretty much stuck with yourself anyway."
if message_service.is_blocked(target.dbo_id, source.dbo_id):
return "{} has blocked requests from you.".format(target.name)
if target.group:
if target.group == source.group:
return "{} is already in your group!".format(target.name)
target.display_line("{} attempted to invite you to a different group.".format(source.name))
return "{} is already in a group.".format(target.name)
if source.group:
if target in source.group.invites:
return "You have already invited {} to a group.".format(target.name)
else:
Group(source)
source.group.invites.add(target)
target.display_line(
"{} has invited you to join a group. Please 'accept' or 'decline' the invitation.".format(source.name))
source.display_line("You invite {} to join a group.".format(target.name))
target.add_inven(Invitation(source.group, target))
| genzgd/Lampost-Mud | lampmud/mud/group.py | Python | mit | 4,728 |
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
import time
# ============= standard library imports ========================
from numpy import vstack, histogram, array
# import math
from numpy.core.fromnumeric import argmax
from numpy.random import normal
from pylab import show, plot, axvline
from six.moves import zip
from traits.api import HasTraits
# from scipy.stats.stats import mode
# from numpy.lib.function_base import median
from pychron.core.time_series.time_series import smooth
# ============= local library imports ==========================
'''
Bayesian stratigraphic modeler
input:
age, error[, strat_pos]
if strat_pos neglected, list assumed to be in stratigraphic order
where idx=0 == youngest idx=n == oldest
execute a Monte Carlo simulation reject any run that violates strat order
1.
ok run A<B<C
-----A------
\
\
-----B------
\
\
-----C------
2.
invalid run A<B>C
-----A------
\
\
-----B------
/
/
-----C------
'''
def _monte_carlo_step(in_ages):
ages = _generate_ages(in_ages)
if _is_valid(ages):
return ages
def _generate_ages(in_ages):
ages = array([normal(loc=ai, scale=ei)
for ai, ei in in_ages])
return ages
def _is_valid(ages):
a = ages[0]
for ai in ages[1:]:
if ai < a:
return False
a = ai
else:
return True
def age_generator(ages, n):
i = 0
while i < n:
yield ages
i += 1
class BayesianModeler2(HasTraits):
def run(self):
ages = [(1, 0.1), (1.5, 0.4), (1.7, 0.1), (1.8, 0.2), (2.1, 0.5)]
# pool = Pool(processes=10)
n = 1e5
# st = time.time()
# aa = ((ai, ages) for ai in arange(n))
# print 'a"', time.time() - st
age_gen = age_generator(ages, n)
st = time.time()
# results = pool.map(_monte_carlo_step, age_gen)
results = [_monte_carlo_step(a) for a in age_gen]
print('a', time.time() - st)
st = time.time()
results = vstack((ri for ri in results if ri is not None))
print('b', time.time() - st)
for xx, (ai, ei) in zip(results.T, ages):
# print 'dev ', abs(xx.mean() - ai) / ai * 100, abs(xx.std() - ei) / ei * 100
# print xx
f, v = histogram(xx, 40)
lp = plot(v[:-1], f)[0]
c = lp.get_color()
nf = smooth(f, window='flat')
plot(v[:-1], nf, c=c, ls='--', lw=2)
axvline(ai, c=c, lw=5) # nominal age
# print f, v
idx = argmax(nf)
# axvline(xx.mean(), c=c, ls='--')
axvline(v[idx], c=c, ls='--')
show()
if __name__ == '__main__':
bm = BayesianModeler2()
bm.run()
# from pylab import linspace, plot, show
# loc = 5
# x = linspace(0, 1)
# y = [normal(loc=loc) for i in x]
# plot(x, normal(size=x.shape[0]))
# show()
# ============= EOF =============================================
| UManPychron/pychron | pychron/processing/bayesian_modeler.py | Python | apache-2.0 | 4,221 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.base.config import Config
from pants.option.errors import ParseError
def _parse_error(s, msg):
"""Return a ParseError with a usefully formatted message, for the caller to throw.
:param s: The option value we're parsing.
:param msg: An extra message to add to the ParseError.
"""
return ParseError('Error while parsing option value {0}: {1}'.format(s, msg))
def dict_option(s):
"""An option of type 'dict'.
The value (on the command-line, in an env var or in the config file) must be eval'able to a dict.
"""
return _convert(s, (dict,))
def list_option(s):
"""An option of type 'list'.
The value (on the command-line, in an env var or in the config file) must be eval'able to a
list or tuple.
"""
return _convert(s, (list, tuple))
def target_list_option(s):
"""Same type as 'list_option', but indicates list contents are target specs."""
return _convert(s, (list, tuple))
def file_option(s):
"""Same type as 'str', but indicates string represents a filepath."""
if not os.path.isfile(s):
raise ParseError('Options file "{filepath}" does not exist.'.format(filepath=s))
return s
def _convert(val, acceptable_types):
"""Ensure that val is one of the acceptable types, converting it if needed.
:param val: The value we're parsing.
:param acceptable_types: A tuple of expected types for val.
:returns: The parsed value
"""
if isinstance(val, acceptable_types):
return val
try:
parsed_value = eval(val, {}, {})
except Exception as e:
raise _parse_error(val, 'Value cannot be evaluated as an expression: '
'{msg}\n{value}\nAcceptable types: '
'{expected}'.format(
msg=e, value=Config.format_raw_value(val),
expected=format_type_tuple(acceptable_types)))
if not isinstance(parsed_value, acceptable_types):
raise _parse_error(val, 'Value is not of the acceptable types: '
'{msg}\n{''value}'.format(
msg=format_type_tuple(acceptable_types),
value=Config.format_raw_value(val)))
return parsed_value
def format_type_tuple(type_tuple):
"""Return a list of type names from tuple of types."""
return ", ".join([item.__name__ for item in type_tuple])
| sid-kap/pants | src/python/pants/option/custom_types.py | Python | apache-2.0 | 2,669 |
# -*- coding: utf-8 -*-
import os
# example from Pang, Ch. 4
#
# Solve a Poisson equation via shooting
#
# u'' = -0.25*pi**2 (u + 1)
#
# with u(0) = 0, u(1) = 1
#
# this has the analytic solution: u(x) = cos(pi x/2) + 2 sin(pi x/2) - 1
#
# M. Zingale (2013-02-18)
import numpy
import math
import pylab
# for plotting different colors
colors = ["k", "r", "g", "b", "c"]
def rk4(y1_0, y2_0, rhs, xl=0.0, xr=1.0, n=100):
""" R-K 4 integration:
y1_0 and y2_0 are y1(0) and y2(0)
rhs is the righthand side function
xl and xr are the domain limits
n is the number of integration points """
x = numpy.linspace(xl, xr, n)
h = x[1] - x[0] # stepsize
y1 = numpy.zeros(n)
y2 = numpy.zeros(n)
# left boundary initialization
y1[0] = y1_0
y2[0] = y2_0
m = 0
while (m < n - 1):
dy1dx_1, dy2dx_1 = rhs(y1[m], y2[m])
dy1dx_2, dy2dx_2 = rhs(
y1[m] + 0.5 * h * dy1dx_1, y2[m] + 0.5 * h * dy2dx_1)
dy1dx_3, dy2dx_3 = rhs(
y1[m] + 0.5 * h * dy1dx_2, y2[m] + 0.5 * h * dy2dx_2)
dy1dx_4, dy2dx_4 = rhs(y1[m] + h * dy1dx_3, y2[m] + h * dy2dx_3)
y1[m + 1] = y1[m] + (h / 6.0) * (dy1dx_1 + 2.0 *
dy1dx_2 + 2.0 * dy1dx_3 + dy1dx_4)
y2[m + 1] = y2[m] + (h / 6.0) * (dy2dx_1 + 2.0 *
dy2dx_2 + 2.0 * dy2dx_3 + dy2dx_4)
m += 1
return y1, y2
def rhs(y1, y2):
""" RHS function. Here y1 = u, y2 = u'
This means that our original system is:
y2' = u'' = -0.25*pi**2 (u+1) """
dy1dx = y2
dy2dx = -0.25 * math.pi**2 * (y1 + 1.0)
return dy1dx, dy2dx
def analytic(x):
""" analytic solution """
return numpy.cos(math.pi * x / 2) + 2.0 * numpy.sin(math.pi * x / 2) - 1.0
# shoot from x = 0 to x = 1. We will do this by selecting a boundary
# value for y2 and use a secant method to adjust it until we reach the
# desired boundary condition at y1(1)
# number of integration points
npts = 32
# desired tolerance
eps = 1.e-8
# initial guess
y1_0 = 0.0 # this is the correct boundary condition a x = 0
y2_0 = 0.0 # this is what we will adjust to get the desired y1(1)
# desired right BC, y1(1)
y1_1_true = 1.0
# integrate
y1_old, y2_old = rk4(y1_0, y2_0, rhs, xl=0.0, xr=1.0, n=npts)
x = numpy.linspace(0.0, 1.0, npts)
pylab.scatter(x, y1_old, label="initial guess", marker="x", c=colors[0])
# new guess -- we don't have any info on how to compute this yet, so
# just choose something
y2_m1 = y2_0 # store the old guess
y2_0 = -1.0
# Secant loop
dy = 1000.0 # fail first time through
# keep track of iteration for plotting
iter = 1
while (dy > eps):
# integrate
y1, y2 = rk4(y1_0, y2_0, rhs, xl=0.0, xr=1.0, n=npts)
pylab.scatter(x, y1, label="iteration %d" % (iter), marker="x",
c=colors[iter % len(colors)])
# do a Secant method to get
# let eta = our current y2(0) -- this is what we control
# we want to zero f(eta) = y1_1_true(1) - y1_1(eta)
# derivative (for Secant)
dfdeta = ((1.0 - y1_old[npts - 1]) - (1.0 - y1[npts - 1])) / (y2_m1 - y2_0)
# correction by f(eta) = 0 = f(eta_0) + dfdeta deta
deta = -(1.0 - y1[npts - 1]) / dfdeta
y2_m1 = y2_0
y2_0 += deta
dy = abs(deta)
y1_old = y1
y2_old = y2
iter += 1
pylab.plot(x, analytic(x), color="0.5", label="analytic")
leg = pylab.legend(loc=2)
ltext = leg.get_texts()
pylab.setp(ltext, fontsize='small')
leg.draw_frame(0)
pylab.xlim(0.0, 1.0)
pylab.savefig("solve_poisson_(shooting).png")
os.system("pause")
| NicovincX2/Python-3.5 | Analyse (mathématiques)/Analyse à plusieurs variables/Équation aux dérivées partielles/Équation de Poisson/solve_poisson_(shooting).py | Python | gpl-3.0 | 3,652 |
#!/usr/bin/env python
import fdpexpect, pexpect
import unittest
import PexpectTestCase
import sys
import os
class ExpectTestCase(PexpectTestCase.PexpectTestCase):
def setUp(self):
print self.id()
PexpectTestCase.PexpectTestCase.setUp(self)
def test_fd (self):
fd = os.open ('TESTDATA.txt', os.O_RDONLY)
s = fdpexpect.fdspawn (fd)
s.expect ('This is the end of test data:')
s.expect (pexpect.EOF)
assert s.before == ' END\n'
def test_maxread (self):
fd = os.open ('TESTDATA.txt', os.O_RDONLY)
s = fdpexpect.fdspawn (fd)
s.maxread = 100
s.expect('2')
s.expect ('This is the end of test data:')
s.expect (pexpect.EOF)
assert s.before == ' END\n'
def test_fd_isalive (self):
fd = os.open ('TESTDATA.txt', os.O_RDONLY)
s = fdpexpect.fdspawn (fd)
assert s.isalive()
os.close (fd)
assert not s.isalive(), "Should not be alive after close()"
def test_fd_isatty (self):
fd = os.open ('TESTDATA.txt', os.O_RDONLY)
s = fdpexpect.fdspawn (fd)
assert not s.isatty()
#os.close(fd)
s.close()
### def test_close_does_not_close_fd (self):
### """Calling close() on a fdpexpect.fdspawn object should not
### close the underlying file descriptor.
### """
### fd = os.open ('TESTDATA.txt', os.O_RDONLY)
### s = fdpexpect.fdspawn (fd)
### try:
### s.close()
### self.fail('Expected an Exception.')
### except pexpect.ExceptionPexpect, e:
### pass
if __name__ == '__main__':
unittest.main()
suite = unittest.makeSuite(ExpectTestCase, 'test')
#fout = open('delete_me_1','wb')
#fout.write(the_old_way)
#fout.close
#fout = open('delete_me_2', 'wb')
#fout.write(the_new_way)
#fout.close
| elitak/pexpect | tests/test_filedescriptor.py | Python | mit | 1,881 |
from __future__ import unicode_literals
from setuptools import setup, find_packages
import os
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
required = ['Twisted', 'zope.interface']
setup(
name = 'twistedinput',
version = '0.0.1',
author = 'Ivo Slanina',
author_email = 'ivo.slanina@gmail.com',
description = 'Reading input devices with Twisted.',
license = 'Unlicence',
keywords = 'twisted gamepad input joystick mouse',
url = 'https://github.com/buben19/twistedinput',
long_description = read('README.md'),
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Framework :: Twisted',
'Intended Audience :: Developers',
'License :: Freely Distributable',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Hardware :: Hardware Drivers'],
install_requires = required,
packages = find_packages())
| buben19/twistedinput | setup.py | Python | unlicense | 1,197 |
#==============================================================================
# purpose: bivariate normal distribution simulation using PyMC
# author: tirthankar chakravarty
# created: 1/7/15
# revised:
# comments:
# 1. install PyMC
# 2. not clear on why we are helping the sampler along. We want to sample from the
# bivariate
#==============================================================================
import random
import numpy as np
import matplotlib.pyplot as mpl
sample_size = 5e5
rhp = 0.9
mean = [10, 20]
std_dev = [1, 1]
biv_random = np.zeros([sample_size, 2]) | tchakravarty/PythonExamples | Code/kirk2015/chapter3/bivariate_normal.py | Python | apache-2.0 | 582 |
#!/usr/bin/python2
#
# Yapps 2 - yet another python parser system
# Copyright 1999-2003 by Amit J. Patel <amitp@cs.stanford.edu>
#
# This version of Yapps 2 can be distributed under the
# terms of the MIT open source license, either found in the LICENSE file
# included with the Yapps distribution
# <http://theory.stanford.edu/~amitp/yapps/> or at
# <http://www.opensource.org/licenses/mit-license.php>
#
import sys, os, re
BASE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
sys.path.insert(0, os.path.join(BASE, "lib", "python"))
from yapps import runtime, parsetree
def generate(inputfilename, outputfilename='', dump=0, **flags):
"""Generate a grammar, given an input filename (X.g)
and an output filename (defaulting to X.py)."""
if not outputfilename:
if inputfilename.endswith('.g'):
outputfilename = inputfilename[:-2] + '.py'
else:
raise Exception('Must specify output filename if input filename is not *.g')
DIVIDER = '\n%%\n' # This pattern separates the pre/post parsers
preparser, postparser = None, None # Code before and after the parser desc
# Read the entire file
s = open(inputfilename,'r').read()
# See if there's a separation between the pre-parser and parser
f = s.find(DIVIDER)
if f >= 0: preparser, s = s[:f]+'\n\n', s[f+len(DIVIDER):]
# See if there's a separation between the parser and post-parser
f = s.find(DIVIDER)
if f >= 0: s, postparser = s[:f], '\n\n'+s[f+len(DIVIDER):]
# Create the parser and scanner and parse the text
scanner = grammar.ParserDescriptionScanner(s, filename=inputfilename)
if preparser: scanner.del_line += preparser.count('\n')
parser = grammar.ParserDescription(scanner)
t = runtime.wrap_error_reporter(parser, 'Parser')
if t is None: return 1 # Failure
if preparser is not None: t.preparser = preparser
if postparser is not None: t.postparser = postparser
# Check the options
for f in t.options.keys():
for opt,_,_ in yapps_options:
if f == opt: break
else:
print >>sys.stderr, 'Warning: unrecognized option', f
# Add command line options to the set
for f in flags.keys(): t.options[f] = flags[f]
# Generate the output
if dump:
t.dump_information()
else:
t.output = open(outputfilename, 'w')
t.generate_output()
return 0
if __name__ == '__main__':
import doctest
doctest.testmod(sys.modules['__main__'])
doctest.testmod(parsetree)
# Someday I will use optparse, but Python 2.3 is too new at the moment.
yapps_options = [
('context-insensitive-scanner',
'context-insensitive-scanner',
'Scan all tokens (see docs)'),
]
import getopt
optlist, args = getopt.getopt(sys.argv[1:], 'f:', ['help', 'dump', 'use-devel-grammar'])
if not args or len(args) > 2:
print >>sys.stderr, 'Usage:'
print >>sys.stderr, ' python2', sys.argv[0], '[flags] input.g [output.py]'
print >>sys.stderr, 'Flags:'
print >>sys.stderr, (' --dump' + ' '*40)[:35] + 'Dump out grammar information'
print >>sys.stderr, (' --use-devel-grammar' + ' '*40)[:35] + 'Use the devel grammar parser from yapps_grammar.py instead of the stable grammar from grammar.py'
for flag, _, doc in yapps_options:
print >>sys.stderr, (' -f' + flag + ' '*40)[:35] + doc
else:
# Read in the options and create a list of flags
flags = {}
use_devel_grammar = 0
for opt in optlist:
for flag, name, _ in yapps_options:
if opt == ('-f', flag):
flags[name] = 1
break
else:
if opt == ('--dump', ''):
flags['dump'] = 1
elif opt == ('--use-devel-grammar', ''):
use_devel_grammar = 1
else:
print >>sys.stderr, 'Warning: unrecognized option', opt[0], opt[1]
if use_devel_grammar:
import yapps_grammar as grammar
else:
from yapps import grammar
sys.exit(generate(*tuple(args), **flags))
| strahlex/machinekit | src/hal/utils/yapps.py | Python | lgpl-2.1 | 4,265 |
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Sebek: kernel module for data collection on honeypots.
"""
# scapy.contrib.description = Sebek
# scapy.contrib.status = loads
from scapy.fields import *
from scapy.packet import *
from scapy.layers.inet import UDP
### SEBEK
class SebekHead(Packet):
name = "Sebek header"
fields_desc = [ XIntField("magic", 0xd0d0d0),
ShortField("version", 1),
ShortEnumField("type", 0, {"read":0, "write":1,
"socket":2, "open":3}),
IntField("counter", 0),
IntField("time_sec", 0),
IntField("time_usec", 0) ]
def mysummary(self):
return self.sprintf("Sebek Header v%SebekHead.version% %SebekHead.type%")
# we need this because Sebek headers differ between v1 and v3, and
# between v3 type socket and v3 others
class SebekV1(Packet):
name = "Sebek v1"
fields_desc = [ IntField("pid", 0),
IntField("uid", 0),
IntField("fd", 0),
StrFixedLenField("cmd", "", 12),
FieldLenField("data_length", None, "data",fmt="I"),
StrLenField("data", "", length_from=lambda x:x.data_length) ]
def mysummary(self):
if isinstance(self.underlayer, SebekHead):
return self.underlayer.sprintf("Sebek v1 %SebekHead.type% (%SebekV1.cmd%)")
else:
return self.sprintf("Sebek v1 (%SebekV1.cmd%)")
class SebekV3(Packet):
name = "Sebek v3"
fields_desc = [ IntField("parent_pid", 0),
IntField("pid", 0),
IntField("uid", 0),
IntField("fd", 0),
IntField("inode", 0),
StrFixedLenField("cmd", "", 12),
FieldLenField("data_length", None, "data",fmt="I"),
StrLenField("data", "", length_from=lambda x:x.data_length) ]
def mysummary(self):
if isinstance(self.underlayer, SebekHead):
return self.underlayer.sprintf("Sebek v%SebekHead.version% %SebekHead.type% (%SebekV3.cmd%)")
else:
return self.sprintf("Sebek v3 (%SebekV3.cmd%)")
class SebekV2(SebekV3):
def mysummary(self):
if isinstance(self.underlayer, SebekHead):
return self.underlayer.sprintf("Sebek v%SebekHead.version% %SebekHead.type% (%SebekV2.cmd%)")
else:
return self.sprintf("Sebek v2 (%SebekV2.cmd%)")
class SebekV3Sock(Packet):
name = "Sebek v2 socket"
fields_desc = [ IntField("parent_pid", 0),
IntField("pid", 0),
IntField("uid", 0),
IntField("fd", 0),
IntField("inode", 0),
StrFixedLenField("cmd", "", 12),
IntField("data_length", 15),
IPField("dip", "127.0.0.1"),
ShortField("dport", 0),
IPField("sip", "127.0.0.1"),
ShortField("sport", 0),
ShortEnumField("call", 0, { "bind":2,
"connect":3, "listen":4,
"accept":5, "sendmsg":16,
"recvmsg":17, "sendto":11,
"recvfrom":12}),
ByteEnumField("proto", 0, IP_PROTOS) ]
def mysummary(self):
if isinstance(self.underlayer, SebekHead):
return self.underlayer.sprintf("Sebek v%SebekHead.version% %SebekHead.type% (%SebekV3Sock.cmd%)")
else:
return self.sprintf("Sebek v3 socket (%SebekV3Sock.cmd%)")
class SebekV2Sock(SebekV3Sock):
def mysummary(self):
if isinstance(self.underlayer, SebekHead):
return self.underlayer.sprintf("Sebek v%SebekHead.version% %SebekHead.type% (%SebekV2Sock.cmd%)")
else:
return self.sprintf("Sebek v2 socket (%SebekV2Sock.cmd%)")
bind_layers( UDP, SebekHead, sport=1101)
bind_layers( UDP, SebekHead, dport=1101)
bind_layers( UDP, SebekHead, dport=1101, sport=1101)
bind_layers( SebekHead, SebekV1, version=1)
bind_layers( SebekHead, SebekV2Sock, version=2, type=2)
bind_layers( SebekHead, SebekV2, version=2)
bind_layers( SebekHead, SebekV3Sock, version=3, type=2)
bind_layers( SebekHead, SebekV3, version=3)
| CodeNameGhost/shiva | thirdparty/scapy/contrib/sebek.py | Python | mit | 4,668 |
""" Rtorrent Output Plugin.
Copyright (c) 2011 The PyroScope Project <pyroscope.project@gmail.com>
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from flexget import plugin, validator
#from flexget import feed as flexfeed
#from flexget.plugins import plugin_torrent
from pyrocore import error
from pyrocore import config as pyrocfg
from pyrocore.util import os, pymagic
#from pyrocore.torrent import engine
class Rtorrent(plugin.Plugin):
""" Adds entries to a rTorrent client.
"""
PRIO = 144
def __init__(self, *args, **kw): # bogus super error pylint: disable=E1002
""" Set plugin attribute defaults.
"""
super(Rtorrent, self).__init__(*args, **kw)
#self.LOG = pymagic.get_class_logger(self)
self.proxy = None
self.log = self.log # happy now, pylint?
def validator(self):
""" Our configuration model.
"""
root = validator.factory()
return root
def _sanitize_config(self, config):
""" Check config for correctness and make its content canonical.
"""
if config in (True, False):
# Enabled or disabled, with only defaults
config = {"enabled": config}
elif isinstance(config, basestring):
# Only path to rtorrent config given
config = {"rtorrent_rc": config}
else:
config = config.copy()
config["rtorrent_rc"] = os.path.expanduser(config["config_dir"])
return config
def _open_proxy(self, config):
""" Open proxy, if enabled and not yet open.
"""
cfg = self._sanitize_config(config)
if cfg and cfg["enabled"] and self.proxy is None:
try:
# Open the connection
self.proxy = pyrocfg.engine.open()
self.log.info(self.proxy) # where are we connected?
except error.LoggableError, exc:
raise plugin.PluginError(str(exc))
return self.proxy
def on_process_start(self, feed, config):
""" Open the connection, if necessary.
"""
##LOG.warn("PROCSTART %r with %r" % (feed, config))
self._open_proxy(config) # make things fail fast if they do
def on_process_end(self, feed, config):
""" Show final XMLRPC stats.
"""
if self.proxy:
self.log.info("XMLRPC stats: %s" % (self.proxy,))
self.proxy = None
def on_feed_start(self, feed, config):
""" Feed starting.
"""
self.config = self._sanitize_config(config)
def on_feed_exit(self, feed, config):
""" Feed exiting.
"""
self.config = None
# Feed aborted, clean up
on_feed_abort = on_feed_exit
@plugin.priority(PRIO)
def on_feed_output(self, feed, _):
""" Load entries into rTorrent.
"""
if not self.config["enabled"]:
self.log.debugall("plugin disabled")
return
if self.proxy:
try:
pass
except error.LoggableError, exc:
raise plugin.PluginError(exc)
# TODO: if rT is not up, save torrent in watch dir as a fallback;
# possibly add meta info to a special key then, so it can later be restored from session
| Rudde/pyroscope | pyrocore/src/pyrocore/flexget/output.py | Python | gpl-2.0 | 4,035 |
from django.apps import AppConfig
class RoomConfig(AppConfig):
name = 'room'
| godspeedcorporation/clinic | room/apps.py | Python | mit | 83 |
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.db.models import Q
from django.forms import ModelForm
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import View, TemplateView
from judge.models import TestGroup, Problem, Test
class TestGroupForm(ModelForm):
class Meta:
model = TestGroup
exclude = ['problem']
class TestGroupEdit(PermissionRequiredMixin, View):
permission_required = 'judge.change_testgroup'
temlpate_name = 'judge/test_group_edit.html'
title = 'Edit Test group'
redir_pattern = 'judge:test_list'
def get_context(self, **kwargs):
context = {'title': self.title }
if 'pk' in kwargs:
context['test_group'] = get_object_or_404(TestGroup, pk = kwargs['pk'])
problem = context['test_group'].problem
else:
problem = get_object_or_404(Problem, pk = kwargs['problem_id'])
context['problem'] = problem
if 'form' in kwargs:
context['form'] = kwargs['form']
elif 'test_group' in context:
context['form'] = TestGroupForm(instance = context['test_group'])
else:
context['form'] = TestGroupForm()
tests_Q = Q(test_group__isnull = True)
if 'test_group' in context:
tests_Q |= Q(test_group = context['test_group'])
context['selected'] = context['test_group'].test_set.all()
context['tests'] = problem.test_set.filter(tests_Q)
return context
def get(self, request, **kwargs):
return render(request, self.temlpate_name, self.get_context(**kwargs))
def update_tests(self, testGroup, request):
for test in testGroup.test_set.all():
test.test_group = None
test.save()
testPk = request.POST.getlist('test-select')
for test in Test.objects.filter(pk__in = testPk):
test.test_group = testGroup
test.save()
def post(self, request, pk):
testGroup = get_object_or_404(TestGroup, pk = pk)
form = TestGroupForm(request.POST, instance = testGroup)
if form.is_valid():
testGroup = form.save()
self.update_tests(testGroup, request)
testGroup.problem.update_max_score()
return redirect(self.redir_pattern, problem_id = testGroup.problem.pk)
else:
context = self.get_context(form = form, pk = pk)
return render(request, self.template_name, context)
class TestGroupNew(TestGroupEdit):
permission_required = 'judge.add_testgroup'
title = 'New Test group'
def post(self, request, problem_id):
form = TestGroupForm(request.POST)
if form.is_valid():
testGroup = form.save(commit = False)
testGroup.problem = get_object_or_404(Problem, pk = problem_id)
testGroup.save()
self.update_tests(testGroup, request)
testGroup.problem.update_max_score()
return redirect(self.redir_pattern, problem_id = problem_id)
else:
context = self.get_context(form = form, problem_id = problem_id)
return render(request, self.template_name, context)
class TestGroupDelete(PermissionRequiredMixin, View):
permission_required = 'judge.delete_problem'
template_name = 'judge/test_group_delete.html'
def get(self, request, pk):
testGroup = get_object_or_404(TestGroup, pk = pk)
context = {
'test_group': testGroup,
'problem': testGroup.problem
}
return render(request, self.template_name, context)
def post(self, request, pk):
testGroup = get_object_or_404(TestGroup, pk = pk)
testGroup.delete()
testGroup.problem.update_max_score()
messages.success(request, 'Test group deleted successfully')
return redirect('judge:test_list', problem_id = testGroup.problem.pk)
| Alaxe/judgeSystem | judge/views/testgroup.py | Python | gpl-2.0 | 4,074 |
import sha, uuid, hmac, json
from datetime import datetime, timedelta
from base64 import b64encode
from django.conf import settings
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import user_passes_test
from django.views.decorators.http import require_POST
S3DIRECT_DIR = getattr(settings, "S3DIRECT_DIR", 's3direct')
@csrf_exempt
@require_POST
@user_passes_test(lambda u: u.is_staff)
def get_upload_params(request, upload_to=''):
content_type = request.POST['type']
expires = (datetime.now() + timedelta(hours=24)).strftime('%Y-%m-%dT%H:%M:%S.000Z')
policy_object = json.dumps({
"expiration": expires,
"conditions": [
{"bucket": settings.AWS_STORAGE_BUCKET_NAME},
{"acl": "public-read"},
{"Content-Type": content_type},
["starts-with", "$key", ""],
{"success_action_status": "201"}
]
})
policy = b64encode(policy_object.replace('\n', '').replace('\r', ''))
signature = b64encode(hmac.new(settings.AWS_SECRET_ACCESS_KEY, policy, sha).digest())
key = "%s/%s/${filename}" % (upload_to or S3DIRECT_DIR, uuid.uuid4().hex)
data = {
"policy": policy,
"signature": signature,
"key": key,
"AWSAccessKeyId": settings.AWS_ACCESS_KEY_ID,
"form_action": "http://%s.s3.amazonaws.com" % settings.AWS_STORAGE_BUCKET_NAME,
"success_action_status": "201",
"acl": "public-read",
"Content-Type": content_type
}
return HttpResponse(json.dumps(data), content_type="application/json") | Vostopia/clickjogoshost | apps/s3direct/views.py | Python | mit | 1,654 |
"""
Module with views for slidelint site.
"""
from pyramid.view import view_config
from pyramid.renderers import render
from pyramid.response import Response
from .validators import validate_rule, validate_upload_file
from pyramid_mailer.message import Message
from pyramid_mailer import get_mailer
import transaction
import logging
LOGGER = logging.getLogger(__name__)
@view_config(route_name='feedback', request_method="POST", renderer='json')
def feedback(request):
"""
Feedback view - send to site administrator user feedback message
"""
mgs = request.json_body.get('message', None)
uid = request.json_body.get('uid', '')
if not mgs:
request.response.status_code = 400
return {'error': 'you should provide message and job uid'}
mailer = get_mailer(request)
settings = request.registry.settings
body = "Job id: %s\nFeedback text:\n%s" % (uid, mgs)
message = Message(
subject=settings['mail.subject'],
sender=settings['mail.sender'],
recipients=settings['mail.recipients'].split(','),
body=body)
mailer.send(message)
transaction.commit()
return {'status': 'ok'}
@view_config(context='.models.Counter')
def main_view(context, request):
"""
Main site page view. Renders main template with angularjs app.
It returns to renderer only number of checked presentations
"""
if request.method == 'GET':
return Response(
render('templates/index.pt', {'count': context.count}, request))
settings = request.registry.settings
rule = request.POST.get('check_rule', None)
validation_error = validate_rule(rule)
if validation_error:
request.response.status_code = 400
return Response(render('json', validation_error, request))
max_allowed_size = int(settings.get('max_allowed_size', 15000000))
upload_file = request.POST.get('file', None)
validation_error = validate_upload_file(upload_file, max_allowed_size)
if validation_error:
request.response.status_code = 400
return Response(render('json', validation_error, request))
jobs_manager = settings['jobs_manager']
info = jobs_manager.add_new_job(upload_file.file, rule)
request.response.status_code = info.pop('status_code')
context.increment()
return Response(render('json', info, request))
@view_config(route_name='results', request_method='POST', renderer="json")
def results_view(request):
"""
checks if uid is in results
"""
uid = request.json_body.get('uid', None)
jobs_manager = request.registry.settings['jobs_manager']
LOGGER.debug(
'looking for "%s" in "%s"' % (uid, jobs_manager.results.keys()))
if uid in jobs_manager.results:
rez = jobs_manager.results.pop(uid)
LOGGER.debug("send results to client")
request.response.status_code = rez.get('status_code', 500)
result = rez.get('result', 'something goes really wild')
icons = rez.get('icons', [])
return {'result': result, 'icons': icons}
request.response.status_code = 404
return {'msg': 'job "%s" was not found in results' % uid}
@view_config(
name='app.js', context='.models.Counter', renderer='templates/app_js.pt')
def app_js(context, request):
"""
pass to app.js some arguments, like file size or number
of checked presentations
"""
request.response.content_type = 'text/javascript'
settings = request.registry.settings
max_allowed_size = int(settings.get('max_allowed_size', 15000000))
return {'max_allowed_size': max_allowed_size, 'count': context.count}
| enkidulan/slidelint_site | slidelint_site/views.py | Python | apache-2.0 | 3,629 |
__author__ = 'Tom Schaul, tom@idsia.ch'
from gomokutask import GomokuTask
from pybrain.rl.environments.twoplayergames.gomokuplayers import ModuleDecidingPlayer
from pybrain.rl.environments.twoplayergames import GomokuGame
from pybrain.rl.environments.twoplayergames.gomokuplayers.gomokuplayer import GomokuPlayer
from pybrain.structure.networks.custom.capturegame import CaptureGameNetwork
class RelativeGomokuTask(GomokuTask):
""" returns the (anti-symmetric) relative score of p1 with respect to p2.
(p1 and p2 are CaptureGameNetworks)
The score depends on:
- greedy play
- moves-until-win or moves-until-defeat (winning faster is better)
- play with noisy moves (e.g. adjusting softmax temperature)
"""
# are networks provided?
useNetworks = False
# maximal number of games per evaluation
maxGames = 3
minTemperature = 0
maxTemperature = 0.2
verbose = False
# coefficient determining the importance of long vs. short games w.r. to winning/losing
numMovesCoeff = 0.5
def __init__(self, size, **args):
self.setArgs(**args)
self.size = size
self.task = GomokuTask(self.size)
self.env = self.task.env
self.maxmoves = self.env.size[0] * self.env.size[1]
self.minmoves = 9
def __call__(self, p1, p2):
self.temp = self.minTemperature
if self.useNetworks:
p1 = ModuleDecidingPlayer(p1, self.task.env, temperature = self.temp)
p2 = ModuleDecidingPlayer(p2, self.task.env, temperature = self.temp)
else:
assert isinstance(p1, GomokuPlayer)
assert isinstance(p2, GomokuPlayer)
p1.game = self.task.env
p2.game = self.task.env
p1.color = GomokuGame.BLACK
p2.color = -p1.color
self.player = p1
self.opponent = p2
# the games with increasing temperatures and lower coefficients
coeffSum = 0.
res = 0.
for i in range(self.maxGames):
coeff = 1/(10*self.temp+1)
res += coeff * self._oneGame()
coeffSum += coeff
if i > 0:
self._globalWarming()
return res / coeffSum
def _globalWarming(self):
""" increase temperature """
if self.temp == 0:
self.temp = 0.02
else:
self.temp *= 1.2
if self.temp > self.maxTemperature:
return False
elif self._setTemperature() == False:
# not adjustable, keep it fixed then.
self.temp = self.minTemperature
return False
return True
def _setTemperature(self):
if self.useNetworks:
self.opponent.temperature = self.temp
self.player.temperature = self.temp
return True
elif hasattr(self.opponent, 'randomPartMoves'):
# an approximate conversion of temperature into random proportion:
randPart = self.temp/(self.temp+1)
self.opponent.randomPartMoves = randPart
self.player.randomPartMoves = randPart
return True
else:
return False
def _oneGame(self, preset = None):
""" a single black stone can be set as the first move. """
self.env.reset()
if preset != None:
self.env._setStone(GomokuGame.BLACK, preset)
self.env.movesDone += 1
self.env.playToTheEnd(self.opponent, self.player)
else:
self.env.playToTheEnd(self.player, self.opponent)
moves = self.env.movesDone
win = self.env.winner == self.player.color
if self.verbose:
print 'Preset:', preset, 'T:', self.temp, 'Win:', win, 'after', moves, 'moves.'
res = 1 - self.numMovesCoeff * (moves -self.minmoves)/(self.maxmoves-self.minmoves)
if win:
return res
else:
return -res
if __name__ == '__main__':
net1 = CaptureGameNetwork(hsize = 1)
net2 = CaptureGameNetwork(hsize = 1)
r = RelativeGomokuTask(7, maxGames = 10, useNetworks = True)
print r(net1, net2)
print r(net2, net1)
print r.env
r.maxGames = 50
print r(net1, net2)
print r(net2, net1)
print r.env
| iut-ibk/Calimero | site-packages/pybrain/rl/environments/twoplayergames/tasks/relativegomokutask.py | Python | gpl-2.0 | 4,395 |
size(600, 600)
# Use a grid to generate a bubble-like composition.
# This example shows that a grid doesn't have to be rigid at all.
# It's very easy to breake loose from the coordinates NodeBox
# passes you, as is shown here. The trick is to add or subtract
# something from the x and y values NodeBox passes on. Here,
# we also use random sizes.
# We use a little bit of math to define the fill colors.
# Sinus and cosinus are not standard functions of NodeBox.
# Instead, they are in Python's math library. The next
# line imports those functions.
from math import sin, cos
gridSize = 40
# Translate a bit to the right and a bit to the bottom to
# create a margin.
translate(100,100)
startval = random()
c = random()
for x, y in grid(10,10, gridSize, gridSize):
fill(sin(startval + y*x/100.0), cos(c), cos(c),random())
s = random()*gridSize
oval(x, y,s, s)
fill(cos(startval + y*x/100.0), cos(c), cos(c),random())
deltaX = (random()-0.5)*10
deltaY = (random()-0.5)*10
deltaS = (random()-0.5)*200
oval(x+deltaX, y+deltaY,deltaS, deltaS)
c += 0.01 | karstenw/nodebox-pyobjc | examples/Grid/Balls.py | Python | mit | 1,089 |
#!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Check that the WiFi is working."""
import socket
import subprocess
import traceback
WPA_CONF_PATH = '/etc/wpa_supplicant/wpa_supplicant.conf'
GOOGLE_SERVER_ADDRESS = ('speech.googleapis.com', 443)
def check_wifi_is_configured():
"""Check wpa_supplicant.conf has at least one network configured."""
output = subprocess.check_output(['sudo', 'cat', WPA_CONF_PATH]).decode('utf-8')
return 'network=' in output
def check_wifi_is_connected():
"""Check wlan0 has an IP address."""
output = subprocess.check_output(['ifconfig', 'wlan0']).decode('utf-8')
return 'inet addr' in output
def check_can_reach_google_server():
"""Check the API server is reachable on port 443."""
print("Trying to contact Google's servers...")
try:
sock = socket.create_connection(GOOGLE_SERVER_ADDRESS, timeout=10)
sock.close()
return True
except Exception: # pylint: disable=W0703
return False
def main():
"""Run all checks and print status."""
print('Checking the WiFi connection...')
if not check_wifi_is_configured():
print('Please click the WiFi icon at the top right to set up a WiFi network.')
return
if not check_wifi_is_connected():
print(
"""You are not connected to WiFi. Please click the WiFi icon at the top right
to check your settings.""")
return
if not check_can_reach_google_server():
print(
"""Failed to reach Google servers. Please check that your WiFi network is
connected to the internet.""")
return
print('The WiFi connection seems to be working.')
if __name__ == '__main__':
try:
main()
input('Press Enter to close...')
except: # pylint: disable=bare-except
traceback.print_exc()
input('Press Enter to close...')
| t1m0thyj/aiyprojects-raspbian | checkpoints/check_wifi.py | Python | apache-2.0 | 2,436 |
#-*- coding: utf-8 -*-
import class_db
import time
import sys
import logger
import registroVenta
import threading
import select
import os
import InhibirMDB
from libErrores import registroError
import socket
import libVenta
IP_UDP = "127.0.0.1"
PUERTO_UDP = 8000
MESSAGE = "...."
POOL_TIME = 0.1
tarifaActual = 0 # Tarifa actual
tiempoActual = 0 # Tiempo de apertura actual
turnoActual = 0
maxIntentos = 10
DEBUG = 1
__ACTIVO__ = '1'
__INACTIVO__ = '0'
# ------ PETICIONES DEL SOCKET DE C ------
__POOL__ = "P|x"
__TURNO__ = "T|x"
__ERROR__ = "E|"
__MONTO__ = "M|"
__TARIFA__ = "R|x"
__TIEMPO__ = "O|x"
__TICKET__ = "K|x"
__CANALES__ = "C|x"
__VENTA__ = " \x02"
#BasicValidator = "/home/linaro/projects/ITLSSPLinux_6mod/BasicValidator6/BasicValidator"
BasicValidator = "/home/odroid/projects/ITLSSPLinux_6mod/BasicValidator6/BasicValidator"
def socketC():
try:
os.system(BasicValidator)
except:
if DEBUG:
print "No se puede iniciar el socket de C, BasicValidator encontrado revise la ruta del archivo"
logger.error("No se puede iniciar el socket de C, BasicValidator encontrado revise la ruta del archivo")
def iniciarSocketC():
hilo_C = threading.Thread(target=socketC, name="Hilo del socket de C")
hilo_C.start()
# ------------------- FUNCIONES ---------------------------
def venta(msg):
registroVenta.main(msg)
def turnoActivo():
turno = class_db.turnoActual()
global turnoActual
turnoActual = turno
turno = "T|" + str(turno)
if DEBUG:
print "TX: ", turno
return turno
def tarifa():
tarif = class_db.tarifa()
tarif = tarif * 100
tarif = int(tarif)
global tarifaActual
tarifaActual = tarif # Guarda en la variable global la tarifa
tarif = "R|" + str(tarif)
if DEBUG:
print "TX: ", tarif
return tarif
def tiempoApertura():
tApertura = str(class_db.tiempo_apertura())
global tiempoActual
tiempoActual = tApertura # Guarda en la variable global el tiempo de apertura
tApertura = "O|" + tApertura
if DEBUG:
print "TX: " + tApertura
return tApertura
def ticket():
ultimoTicket = int(class_db.ticket()) + 1
ultimoTicket = "K|" + str(ultimoTicket)
return ultimoTicket
def cambiarTarifa():
nuevaTarifa = class_db.tarifa() # Consulta la tarifa de la configuracion
nuevaTarifa = int(nuevaTarifa * 100)
if nuevaTarifa != tarifaActual: # Si hay cambio de tarifa retorna True para mandar la nueva tarifa
return True
else:
return False
def cambiarTiempoA():
nuevoTiempo = class_db.tiempo_apertura()
if str(nuevoTiempo) != str(tiempoActual):
return True
else:
return False
# ------------------------------ CUERPO DEL PROGRAMA -------------------------------------
def Socket():
direccion = ""
cambioTarifa = False
cambioTiempo = False
peticionesFallidas = 0
corteTurno = False
cambioMonedero = False
corteAutomatico = False
if DEBUG:
print "Conectando a %s por el puerto %i" % (IP_UDP, PUERTO_UDP)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((IP_UDP, PUERTO_UDP))
sock.setblocking(0)
if DEBUG:
print "Iniciando Conexión"
sock.sendto("Iniciando socket", (IP_UDP, PUERTO_UDP))
if DEBUG:
print "Conexión establecida"
class_db.estadoSocketPython('1')
except:
if DEBUG:
print "No se puede iniciar el socket de python, la dirreción o puerto pude estar ocupada por otro proceso"
logger.error("No se puede iniciar el socket de python, la dirreción o puerto pude estar ocupada por otro proceso")
sys.exit(0)
while True:
time.sleep(POOL_TIME)
corteAutomatico = class_db.tipoCorte()
if corteAutomatico:
libVenta.hacerCorteAutomatico()
msgSocket = select.select([sock], [], [], 0.5)
# ------ Banderas para monitorear los cambios al Monedero -----
cambioTarifa = cambiarTarifa()
cambioTiempo = cambiarTiempoA()
corteTurno = libVenta.hacerCorteTurno()
cambioMonedero = libVenta.cambiarCanalesHoppper()
if msgSocket[0]:
class_db.estadoSocketPython(__ACTIVO__)
peticionesFallidas = 0
intentosActivar = 0
RX = ""
RX, direccion = sock.recvfrom(1024)
if DEBUG:
if RX != 'P|x':
print "RX: %s" % (RX)
if cambioMonedero:
sock.sendto( libVenta.estadoCanales(), direccion )
class_db.desactivarCambioMonedero()
if DEBUG:
print "Cambiando los estados de los canales del monedero"
if cambioTarifa:
sock.sendto(tarifa(), direccion)
if DEBUG:
print "Cambiando la tarifa a $%f" % ( tarifaActual/100 )
logger.debug("Cambio de tarifa a $%i0" % ( tarifaActual/100 ) )
if cambioTiempo:
sock.sendto(tiempoApertura(), direccion)
if DEBUG:
print "Cambio de tiempo de apertura a %s seg." % ( tiempoActual )
logger.debug("Cambio de tiempo de apertura a %s seg." % ( tiempoActual ) )
if corteTurno:
sock.sendto("M|x", direccion)
class_db.desactivarCorteTurno()
# ---- Selección de la operación a realizar ----
if len(RX) > 2:
if RX == __TARIFA__:
sock.sendto(tarifa(), direccion)
if RX == __TIEMPO__:
sock.sendto(tiempoApertura(), direccion)
if RX == __TICKET__:
sock.sendto(ticket(), direccion)
if RX == __TURNO__:
sock.sendto(turnoActivo(), direccion)
if RX == __CANALES__:
sock.sendto(estadoCanales(), direccion)
if RX == __POOL__:
sock.sendto(MESSAGE, direccion)
if RX.startswith(__ERROR__):
registroError(RX)
if RX.startswith(__MONTO__):
fondo = libVenta.totalMonto(RX)
class_db.corteTurno(fondo)
sock.sendto(turnoActivo(), direccion)
if DEBUG:
print "*" * 10, "Se realizo un corte de Turno" , "*" * 10
if len(RX) > 70:
if DEBUG:
print "Venta entrante : %s" % ( RX )
try:
hilo_venta = threading.Thread(target = venta, args = (RX, ), name = "Hilo para registro de la venta")
hilo_venta.start()
except:
logger.error("No se puede registrar la venta, revisar la base de datos y consultas posible error con los"\
" nombre de las columnas")
if DEBUG:
print "No se puede registrar la venta, revisar la base de datos y consultas posible error con los"\
" nombre de las columnas"
# El socket de C no constesta
else:
class_db.estadoSocketC(__INACTIVO__)
peticionesFallidas += 1
if DEBUG:
print "*" * 10, " Conexion perdida ", "*" * 10
if peticionesFallidas == maxIntentos:
while intentosActivar < maxIntentos:
time.sleep(0.5)
logger.warning("Reinicio automatico del Socket C")
if DEBUG:
print "*" * 10, " Reinicio automatico del Socket C ", "*" * 10
try:
InhibirMDB.main()
iniciarSocketC()
break
except:
intentosActivar += 1
| the-adrian/KernotekV2.0 | libSocket.py | Python | gpl-3.0 | 6,853 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
#
# Use period and Journal for selection or resources
#
class journal_print(report_sxw.rml_parse):
def lines(self, journal_id, *args):
self.cr.execute('select id from account_analytic_line where journal_id=%s order by date,id', (journal_id,))
ids = map(lambda x: x[0], self.cr.fetchall())
res = self.pool.get('account.analytic.line').browse(self.cr, self.uid, ids)
return res
def _sum_lines(self, journal_id):
self.cr.execute('select sum(amount) from account_analytic_line where journal_id=%s', (journal_id,))
return self.cr.fetchone()[0] or 0.0
def __init__(self, cr, uid, name, context):
super(journal_print, self).__init__(cr, uid, name, context=context)
self.localcontext = {
'time': time,
'lines': self.lines,
'sum_lines': self._sum_lines,
}
report_sxw.report_sxw('report.account.analytic.journal.print', 'account.analytic.journal', 'addons/account/project/report/analytic_journal.rml',parser=journal_print)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| aricchen/openHR | openerp/addons/account/project/report/account_journal.py | Python | agpl-3.0 | 2,142 |
# -*- coding: UTF-8 -*-
# Created by mcxiaoke on 2015/7/6 22:20.
__author__ = 'mcxiaoke'
import sys, os
from os import path
from datetime import datetime
print 'curren dir is', os.getcwd()
print 'command line args is', sys.argv
if len(sys.argv) < 2:
sys.exit(1)
# 批量重命名照片文件
# 根据文件修改日期重命名文件,然后移动到目标文件夹
FILE_NAME_FORMAT = "IMG_%Y%m%d_%H%M%S"
start_dir = path.abspath(sys.argv[1])
output_dir = path.join(path.dirname(start_dir), 'output')
if not path.exists(output_dir):
os.mkdir(output_dir)
print 'start dir is %s' % start_dir
print 'output dir is %s' % output_dir
bn = []
an = []
def handler(arg, dirname, names):
dir_path = path.join(dirname, dirname)
print ("current dir is %s" % dir_path)
for file in names:
file_path = path.abspath(path.join(dirname, file))
print "processing file: %s" % file
# print 'path is file: ', path.isfile(file_path)
if not path.isfile(file_path):
continue
_, ext = path.splitext(file)
file_st = os.stat(file_path)
fm = datetime.fromtimestamp(file_st.st_mtime)
print 'file modified time is', fm.strftime("%Y-%m-%d %H:%M:%S"), fm.microsecond
src_name = file
dest_name = fm.strftime(FILE_NAME_FORMAT) + ext
print 'src name is %s' % src_name
print 'dest name is %s' % dest_name
if src_name != dest_name:
bn.append(path.abspath(path.join(dirname, src_name)))
an.append(path.abspath(path.join(output_dir, dest_name)))
return 0
os.path.walk(start_dir, handler, ())
if bn and an:
for src, dest in zip(bn, an):
print src, dest
if path.exists(src) and path.isfile(src) and not path.exists(dest):
ret = os.rename(src, dest)
print 'rename result=', ret
print 'rename %s to %s' % (src, dest)
else:
print "%s not changed" % src
| mcxiaoke/python-labs | labs/photos_walker_00.py | Python | apache-2.0 | 1,959 |
# -*- coding: utf-8 -*-
from os import path
import io
import yaml
PROJ_PATH = path.sep.join(__file__.split(path.sep)[:-2])
DATA_PATH = path.join(
PROJ_PATH, 'hebrew-special-numbers-default.yml')
specialnumbers = yaml.safe_load(io.open(DATA_PATH, encoding='utf8'))
MAP = (
(1, u'א'),
(2, u'ב'),
(3, u'ג'),
(4, u'ד'),
(5, u'ה'),
(6, u'ו'),
(7, u'ז'),
(8, u'ח'),
(9, u'ט'),
(10, u'י'),
(20, u'כ'),
(30, u'ל'),
(40, u'מ'),
(50, u'נ'),
(60, u'ס'),
(70, u'ע'),
(80, u'פ'),
(90, u'צ'),
(100, u'ק'),
(200, u'ר'),
(300, u'ש'),
(400, u'ת'),
(500, u'ך'),
(600, u'ם'),
(700, u'ן'),
(800, u'ף'),
(900, u'ץ')
)
MAP_DICT = dict([(k, v) for v, k in MAP])
GERESH = set(("'", '׳'))
def gematria_to_int(string):
res = 0
for i, char in enumerate(string):
if char in GERESH and i < len(string)-1:
res *= 1000
if char in MAP_DICT:
res += MAP_DICT[char]
return res
# adapted from hebrew-special-numbers documentation
def int_to_gematria(num, gershayim=True):
"""convert integers between 1 an 999 to Hebrew numerals.
- set gershayim flag to False to ommit gershayim
"""
# 1. Lookup in specials
if num in specialnumbers['specials']:
retval = specialnumbers['specials'][num]
return _add_gershayim(retval) if gershayim else retval
# 2. Generate numeral normally
parts = []
rest = str(num)
while rest:
digit = int(rest[0])
rest = rest[1:]
if digit == 0:
continue
power = 10 ** len(rest)
parts.append(specialnumbers['numerals'][power * digit])
retval = ''.join(parts)
# 3. Add gershayim
return _add_gershayim(retval) if gershayim else retval
def _add_gershayim(s):
if len(s) == 1:
s += specialnumbers['separators']['geresh']
else:
s = ''.join([
s[:-1],
specialnumbers['separators']['gershayim'],
s[-1:]
])
return s
| OriHoch/python-hebrew-numbers | hebrew_numbers/__init__.py | Python | mit | 2,084 |
from sklearn_explain.tests.skl_datasets_reg import skl_datasets_test as skltest
skltest.test_reg_dataset_and_model("RandomReg_500" , "SVR_poly_8")
| antoinecarme/sklearn_explain | tests/skl_datasets_reg/RandomReg_500/skl_dataset_RandomReg_500_SVR_poly_8_code_gen.py | Python | bsd-3-clause | 149 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Generate various plots using IAPWS."""
from math import pi, atan, log
import matplotlib.pyplot as plt
import numpy as np
import iapws
from iapws._iapws import Pt, Pc, Tc
from iapws.iapws97 import _PSat_T, _P23_T
###############################################################################
# Configuration section
###############################################################################
# Define standard to use in plot, IAPWS95 very slow!
fluid = iapws.IAPWS97
# fluid = iapws.IAPWS95
# Define kind of plot
xAxis = "s"
yAxis = "P"
# Point count for line, high value get more definition but slow calculate time
points = 50
# Saturation line format
isosat_kw = {"ls": "-", "color": "black", "lw": 1}
# Isoquality lines to plot
isoq = np.arange(0.1, 1, 0.1)
isoq_kw = {"ls": "--", "color": "black", "lw": 0.5}
labelq_kw = {"size": "xx-small", "ha": "right", "va": "center"}
# Isotherm lines to plot, values in ºC
isoT = [0, 50, 100, 200, 300, 400, 500, 600, 700, 800, 1200, 1600, 2000]
isoT_kw = {"ls": "-", "color": "red", "lw": 0.5}
labelT_kw = {"size": "xx-small", "ha": "right", "va": "bottom"}
# Isobar lines to plot
isoP = [Pt, 0.001, 0.01, 0.1, 1, 10, 20, 50, 100]
isoP_kw = {"ls": "-", "color": "blue", "lw": 0.5}
labelP_kw = {"size": "xx-small", "ha": "center", "va": "center"}
# Isoenthalpic lines to plot
isoh = np.arange(200, 4400, 200)
isoh_kw = {"ls": "-", "color": "green", "lw": 0.5}
labelh_kw = {"size": "xx-small", "ha": "center", "va": "center"}
# Isoentropic lines to plot
isos = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
isos_kw = {"ls": "-", "color": "brown", "lw": 0.5}
labels_kw = {"size": "xx-small", "ha": "center", "va": "center"}
# # Isochor lines to plot
isov = [0.1, 1, 10, 100]
isov_kw = {"ls": "-", "color": "green", "lw": 0.5}
# Show region limits
regionBoundary = True
# Show region5
region5 = False
###############################################################################
# Calculate
###############################################################################
# Set plot label
title = {
"T": "T, K",
"P": "P, MPa",
"v": "v, m³/kg",
"h": "h, kJ/kg",
"s": "s, kJ/kgK"}
# Check axis correct definition
validAxis = ", ".join(title.keys())
if xAxis not in title:
raise ValueError("X axis variable don´t supported, valid only ", validAxis)
if yAxis not in title:
raise ValueError("Y axis variable don´t supported, valid only ", validAxis)
if xAxis == yAxis:
raise ValueError("X and Y axis can't show same variable")
# Set plot legend
plt.title("%s-%s Diagram" % (yAxis, xAxis))
xtitle = title[xAxis]
plt.xlabel(xtitle)
ytitle = title[yAxis]
plt.ylabel(ytitle)
# Set logaritmic scale if apropiate
if xAxis in ["P", "v"]:
plt.xscale("log")
if yAxis in ["P", "v"]:
plt.yscale("log")
plt.grid(True)
# Calculate point of isolines
Ps = list(np.concatenate([
np.logspace(np.log10(Pt), np.log10(0.1*Pc), points),
np.linspace(0.1*Pc, 0.9*Pc, points),
np.linspace(0.9*Pc, 0.99*Pc, points),
np.linspace(0.99*Pc, Pc, points)]))
Pl = list(np.concatenate([
np.logspace(np.log10(Pt), np.log10(0.1*Pc), points),
np.linspace(0.1*Pc, 0.5*Pc, points),
np.linspace(0.5*Pc, 0.9*Pc, points),
np.linspace(0.9*Pc, 0.99*Pc, points),
np.linspace(0.99*Pc, Pc, points),
np.linspace(Pc, 1.01*Pc, points),
np.linspace(1.01*Pc, 1.1*Pc, points),
np.linspace(1.1*Pc, 50, points),
np.linspace(50, 100, points)]))
Tl = list(np.concatenate([
np.linspace(0, 25, points),
np.linspace(25, 0.5*Tc, points),
np.linspace(0.5*Tc, 0.9*Tc, points),
np.linspace(0.9*Tc, Tc, points),
np.linspace(Tc, 1.1*Tc, points),
np.linspace(1.1*Tc, 1.1*Tc, points),
np.linspace(1.1*Tc, 800, points),
np.linspace(800, 2000, points)]))
# Calculate saturation line
print("Calculating saturation lines...")
liq = [fluid(P=p, x=0) for p in Ps]
xliq = [l.__getattribute__(xAxis) for l in liq]
yliq = [l.__getattribute__(yAxis) for l in liq]
plt.plot(xliq, yliq, **isosat_kw)
vap = [fluid(P=p, x=1) for p in Ps]
xvap = [v.__getattribute__(xAxis) for v in vap]
yvap = [v.__getattribute__(yAxis) for v in vap]
plt.plot(xvap, yvap, **isosat_kw)
# Calculate isoquality lines
print("Calculating isoquality lines...")
Q = {}
for q in isoq:
Q["%s" % q] = {}
txt = "x=%s" % q
print(" %s" % txt)
pts = [fluid(P=p, x=q) for p in Ps]
x = [p.__getattribute__(xAxis) for p in pts]
y = [p.__getattribute__(yAxis) for p in pts]
Q["%s" % q]["x"] = x
Q["%s" % q]["y"] = y
plt.plot(x, y, **isoq_kw)
# Calculate isotherm lines
if xAxis != "T" and yAxis != "T":
print("Calculating isotherm lines...")
T_ = {}
for T in isoT:
T_["%s" % T] = {}
print(" T=%sºC" % T)
# Calculate the saturation point if available
if T+273.15 < Tc:
liqsat = fluid(T=T+273.15, x=0)
vapsat = fluid(T=T+273.15, x=1)
sat = True
else:
sat = False
pts = []
for p in Pl:
try:
point = fluid(P=p, T=T+273.15)
if fluid == iapws.IAPWS97 and not region5 and \
point.region == 5:
continue
# Add saturation point if neccesary
if sat and T+273.15 < Tc and point.s < vapsat.s:
pts.append(vapsat)
pts.append(liqsat)
sat = False
pts.append(point)
except NotImplementedError:
pass
x = []
y = []
for p in pts:
if p.status:
x.append(p.__getattribute__(xAxis))
y.append(p.__getattribute__(yAxis))
plt.plot(x, y, **isoT_kw)
T_["%s" % T]["x"] = x
T_["%s" % T]["y"] = y
# Calculate isobar lines
if xAxis != "P" and yAxis != "P":
print("Calculating isobar lines...")
P_ = {}
for P in isoP:
print(" P=%sMPa" % P)
P_["%s" % P] = {}
# Calculate the saturation point if available
if P < Pc:
liqsat = fluid(P=P, x=0)
vapsat = fluid(P=P, x=1)
sat = True
else:
sat = False
pts = []
for t in Tl:
try:
point = fluid(P=P, T=t+273.15)
if fluid == iapws.IAPWS97 and not region5 and \
point.region == 5:
continue
# Add saturation point if neccesary
if sat and P < Pc and point.status and point.s > vapsat.s:
pts.append(liqsat)
pts.append(vapsat)
sat = False
pts.append(point)
except NotImplementedError:
pass
x = []
y = []
for p in pts:
if p.status:
x.append(p.__getattribute__(xAxis))
y.append(p.__getattribute__(yAxis))
plt.plot(x, y, **isoP_kw)
P_["%s" % P]["x"] = x
P_["%s" % P]["y"] = y
# Calculate isoenthalpic lines
if xAxis != "h" and yAxis != "h":
print("Calculating isoenthalpic lines...")
H_ = {}
for h in isoh:
print(" h=%skJ/kg" % h)
H_["%s" % h] = {}
pts = []
for p in Pl:
try:
point = fluid(P=p, h=h)
if fluid == iapws.IAPWS97 and not region5 and \
point.region == 5:
continue
pts.append(point)
except NotImplementedError:
pass
x = []
y = []
for p in pts:
if p.status:
x.append(p.__getattribute__(xAxis))
y.append(p.__getattribute__(yAxis))
plt.plot(x, y, **isoh_kw)
H_["%s" % h]["x"] = x
H_["%s" % h]["y"] = y
# Calculate isoentropic lines
if xAxis != "s" and yAxis != "s":
print("Calculating isoentropic lines...")
S_ = {}
for s in isos:
print(" s=%skJ/kgK" % s)
S_["%s" % s] = {}
pts = []
for p in Pl:
try:
point = fluid(P=p, s=s)
if fluid == iapws.IAPWS97 and not region5 and \
point.region == 5:
continue
pts.append(point)
except NotImplementedError:
pass
x = []
y = []
for p in pts:
if p.status:
x.append(p.__getattribute__(xAxis))
y.append(p.__getattribute__(yAxis))
plt.plot(x, y, **isos_kw)
S_["%s" % s]["x"] = x
S_["%s" % s]["y"] = y
# Calculate isochor lines
if xAxis != "v" and yAxis != "v":
print("Calculating isochor lines...")
for v in isov:
print(" v=%s" % v)
pts = [iapws.IAPWS95(T=t, v=v) for t in Tl]
x = []
y = []
for p in pts:
if p.status:
x.append(p.__getattribute__(xAxis))
y.append(p.__getattribute__(yAxis))
plt.plot(x, y, **isov_kw)
# Plot region limits
if regionBoundary:
# Boundary 1-3
Po = _PSat_T(623.15)
P = np.linspace(Po, 100, points)
pts = [fluid(P=p, T=623.15) for p in P]
x = [p.__getattribute__(xAxis) for p in pts]
y = [p.__getattribute__(yAxis) for p in pts]
plt.plot(x, y, **isosat_kw)
# Boundary 2-3
T = np.linspace(623.15, 863.15)
P = [_P23_T(t) for t in T]
P[-1] = 100 # Avoid round problem with value out of range > 100 MPa
pts = [fluid(P=p, T=t) for p, t in zip(P, T)]
x = [p.__getattribute__(xAxis) for p in pts]
y = [p.__getattribute__(yAxis) for p in pts]
plt.plot(x, y, **isosat_kw)
# Show annotate in plot
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
for q in isoq:
x = Q["%s" % q]["x"]
y = Q["%s" % q]["y"]
txt = "x=%s" % q
i = 0
j = i+1
if xAxis in ["P", "v"]:
fx = (log(x[i])-log(x[j]))/(log(xmax)-log(xmin))
else:
fx = (x[i]-x[j])/(xmax-xmin)
if yAxis in ["P", "v"]:
fy = (log(y[i])-log(y[j]))/(log(ymax)-log(ymin))
else:
fy = (y[i]-y[j])/(ymax-ymin)
rot = atan(fy/fx)*360/2/pi
plt.annotate(txt, (x[i], y[i]), rotation=rot, **labelq_kw)
if xAxis != "T" and yAxis != "T":
for T in isoT:
x = T_["%s" % T]["x"]
y = T_["%s" % T]["y"]
if not x:
continue
txt = "%sºC" % T
i = 0
j = i+2
if xAxis in ["P", "v"]:
fx = (log(x[i])-log(x[j]))/(log(xmax)-log(xmin))
else:
fx = (x[i]-x[j])/(xmax-xmin)
if yAxis in ["P", "v"]:
fy = (log(y[i])-log(y[j]))/(log(ymax)-log(ymin))
else:
fy = (y[i]-y[j])/(ymax-ymin)
rot = atan(fy/fx)*360/2/pi
plt.annotate(txt, (x[i], y[i]), rotation=rot, **labelT_kw)
if xAxis != "P" and yAxis != "P":
for P in isoP:
x = P_["%s" % P]["x"]
y = P_["%s" % P]["y"]
if not x:
continue
txt = "%sMPa" % P
i = len(x)-15
j = i-2
if xAxis in ["P", "v"]:
fx = (log(x[i])-log(x[j]))/(log(xmax)-log(xmin))
else:
fx = (x[i]-x[j])/(xmax-xmin)
if yAxis in ["P", "v"]:
fy = (log(y[i])-log(y[j]))/(log(ymax)-log(ymin))
else:
fy = (y[i]-y[j])/(ymax-ymin)
rot = atan(fy/fx)*360/2/pi
plt.annotate(txt, (x[i], y[i]), rotation=rot, **labelP_kw)
if xAxis != "h" and yAxis != "h":
for h in isoh:
x = H_["%s" % h]["x"]
y = H_["%s" % h]["y"]
if not x:
continue
if h % 1000:
continue
txt = "%s J/g" % h
i = points
j = i+2
if xAxis in ["P", "v"]:
fx = (log(x[i])-log(x[j]))/(log(xmax)-log(xmin))
else:
fx = (x[i]-x[j])/(xmax-xmin)
if yAxis in ["P", "v"]:
fy = (log(y[i])-log(y[j]))/(log(ymax)-log(ymin))
else:
fy = (y[i]-y[j])/(ymax-ymin)
rot = atan(fy/fx)*360/2/pi
plt.annotate(txt, (x[i], y[i]), rotation=rot, **labelh_kw)
if xAxis != "s" and yAxis != "s":
for s in isos:
x = S_["%s" % s]["x"]
y = S_["%s" % s]["y"]
txt = "%s J/gK" % s
i = len(x)//2
if s > 10:
j = i+1
else:
j = i+5
if xAxis in ["P", "v"]:
fx = (log(x[i])-log(x[j]))/(log(xmax)-log(xmin))
else:
fx = (x[i]-x[j])/(xmax-xmin)
if yAxis in ["P", "v"]:
fy = (log(y[i])-log(y[j]))/(log(ymax)-log(ymin))
else:
fy = (y[i]-y[j])/(ymax-ymin)
rot = atan(fy/fx)*360/2/pi
plt.annotate(txt, (x[i], y[i]), rotation=rot, **labels_kw)
plt.show()
| jjgomera/iapws | plots.py | Python | gpl-3.0 | 12,849 |
# -*- coding: utf-8 -*-
# Copyright 2009-2016 Jason Stitt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
import unittest
from tidylib import Tidy, PersistentTidy, tidy_document
class TestDocs1(unittest.TestCase):
def test_not_find_lib(self):
with self.assertRaises(OSError):
tidy = Tidy(lib_names=[])
| cloudera/hue | desktop/core/ext-py/pytidylib-0.3.2/tests/test_init.py | Python | apache-2.0 | 1,380 |
'''
Created on Mar 25, 2013
@author: dmitchell
'''
import datetime
import subprocess
import unittest
import uuid
from importlib import import_module
from xblock.fields import Scope
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.exceptions import InsufficientSpecificationError, ItemNotFoundError, VersionConflictError, \
DuplicateItemError
from xmodule.modulestore.locator import CourseLocator, BlockUsageLocator, VersionTree, DefinitionLocator
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.x_module import XModuleMixin
from pytz import UTC
from path import path
import re
import random
class SplitModuleTest(unittest.TestCase):
'''
The base set of tests manually populates a db w/ courses which have
versions. It creates unique collection names and removes them after all
tests finish.
'''
# Snippets of what would be in the django settings envs file
DOC_STORE_CONFIG = {
'host': 'localhost',
'db': 'test_xmodule',
'collection': 'modulestore{0}'.format(uuid.uuid4().hex),
}
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': '',
'xblock_mixins': (InheritanceMixin, XModuleMixin)
}
MODULESTORE = {
'ENGINE': 'xmodule.modulestore.split_mongo.SplitMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
}
# don't create django dependency; so, duplicates common.py in envs
match = re.search(r'(.*?/common)(?:$|/)', path(__file__))
COMMON_ROOT = match.group(1)
modulestore = None
# These version_guids correspond to values hard-coded in fixture files
# used for these tests. The files live in mitx/fixtures/splitmongo_json/*
GUID_D0 = "1d00000000000000dddd0000" # v12345d
GUID_D1 = "1d00000000000000dddd1111" # v12345d1
GUID_D2 = "1d00000000000000dddd2222" # v23456d
GUID_D3 = "1d00000000000000dddd3333" # v12345d0
GUID_D4 = "1d00000000000000dddd4444" # v23456d0
GUID_D5 = "1d00000000000000dddd5555" # v345679d
GUID_P = "1d00000000000000eeee0000" # v23456p
@staticmethod
def bootstrapDB():
'''
Loads the initial data into the db ensuring the collection name is
unique.
'''
collection_prefix = SplitModuleTest.MODULESTORE['DOC_STORE_CONFIG']['collection'] + '.'
dbname = SplitModuleTest.MODULESTORE['DOC_STORE_CONFIG']['db']
processes = [
subprocess.Popen([
'mongoimport', '-d', dbname, '-c',
collection_prefix + collection, '--jsonArray',
'--file',
SplitModuleTest.COMMON_ROOT + '/test/data/splitmongo_json/' + collection + '.json'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
for collection in ('active_versions', 'structures', 'definitions')]
for p in processes:
stdout, stderr = p.communicate()
if p.returncode != 0:
print "Couldn't run mongoimport:"
print stdout
print stderr
raise Exception("DB did not init correctly")
@classmethod
def tearDownClass(cls):
collection_prefix = SplitModuleTest.MODULESTORE['DOC_STORE_CONFIG']['collection'] + '.'
if SplitModuleTest.modulestore:
for collection in ('active_versions', 'structures', 'definitions'):
modulestore().db.drop_collection(collection_prefix + collection)
# drop the modulestore to force re init
SplitModuleTest.modulestore = None
def findByIdInResult(self, collection, _id):
"""
Result is a collection of descriptors. Find the one whose block id
matches the _id.
"""
for element in collection:
if element.location.usage_id == _id:
return element
class SplitModuleCourseTests(SplitModuleTest):
'''
Course CRUD operation tests
'''
def test_get_courses(self):
courses = modulestore().get_courses(branch='draft')
# should have gotten 3 draft courses
self.assertEqual(len(courses), 3, "Wrong number of courses")
# check metadata -- NOTE no promised order
course = self.findByIdInResult(courses, "head12345")
self.assertEqual(course.location.course_id, "GreekHero")
self.assertEqual(
str(course.location.version_guid), self.GUID_D0,
"course version mismatch"
)
self.assertEqual(course.category, 'course', 'wrong category')
self.assertEqual(len(course.tabs), 6, "wrong number of tabs")
self.assertEqual(
course.display_name, "The Ancient Greek Hero",
"wrong display name"
)
self.assertEqual(
course.advertised_start, "Fall 2013",
"advertised_start"
)
self.assertEqual(
len(course.children), 3,
"children")
self.assertEqual(str(course.definition_locator.definition_id), "ad00000000000000dddd0000")
# check dates and graders--forces loading of descriptor
self.assertEqual(course.edited_by, "testassist@edx.org")
self.assertEqual(str(course.previous_version), self.GUID_D1)
self.assertDictEqual(course.grade_cutoffs, {"Pass": 0.45})
def test_branch_requests(self):
# query w/ branch qualifier (both draft and published)
def _verify_published_course(courses_published):
""" Helper function for verifying published course. """
self.assertEqual(len(courses_published), 1, len(courses_published))
course = self.findByIdInResult(courses_published, "head23456")
self.assertIsNotNone(course, "published courses")
self.assertEqual(course.location.course_id, "wonderful")
self.assertEqual(str(course.location.version_guid), self.GUID_P,
course.location.version_guid)
self.assertEqual(course.category, 'course', 'wrong category')
self.assertEqual(len(course.tabs), 4, "wrong number of tabs")
self.assertEqual(course.display_name, "The most wonderful course",
course.display_name)
self.assertIsNone(course.advertised_start)
self.assertEqual(len(course.children), 0,
"children")
_verify_published_course(modulestore().get_courses(branch='published'))
# default for branch is 'published'.
_verify_published_course(modulestore().get_courses())
def test_search_qualifiers(self):
# query w/ search criteria
courses = modulestore().get_courses(branch='draft', qualifiers={'org': 'testx'})
self.assertEqual(len(courses), 2)
self.assertIsNotNone(self.findByIdInResult(courses, "head12345"))
self.assertIsNotNone(self.findByIdInResult(courses, "head23456"))
courses = modulestore().get_courses(
branch='draft',
qualifiers={'edited_on': {"$lt": datetime.datetime(2013, 3, 28, 15)}})
self.assertEqual(len(courses), 2)
courses = modulestore().get_courses(
branch='draft',
qualifiers={'org': 'testx', "prettyid": "test_course"})
self.assertEqual(len(courses), 1)
self.assertIsNotNone(self.findByIdInResult(courses, "head12345"))
def test_get_course(self):
'''
Test the various calling forms for get_course
'''
locator = CourseLocator(version_guid=self.GUID_D1)
course = modulestore().get_course(locator)
self.assertIsNone(course.location.course_id)
self.assertEqual(str(course.location.version_guid), self.GUID_D1)
self.assertEqual(course.category, 'course')
self.assertEqual(len(course.tabs), 6)
self.assertEqual(course.display_name, "The Ancient Greek Hero")
self.assertEqual(course.graceperiod, datetime.timedelta(hours=2))
self.assertIsNone(course.advertised_start)
self.assertEqual(len(course.children), 0)
self.assertEqual(str(course.definition_locator.definition_id), "ad00000000000000dddd0001")
# check dates and graders--forces loading of descriptor
self.assertEqual(course.edited_by, "testassist@edx.org")
self.assertDictEqual(course.grade_cutoffs, {"Pass": 0.55})
locator = CourseLocator(course_id='GreekHero', branch='draft')
course = modulestore().get_course(locator)
self.assertEqual(course.location.course_id, "GreekHero")
self.assertEqual(str(course.location.version_guid), self.GUID_D0)
self.assertEqual(course.category, 'course')
self.assertEqual(len(course.tabs), 6)
self.assertEqual(course.display_name, "The Ancient Greek Hero")
self.assertEqual(course.advertised_start, "Fall 2013")
self.assertEqual(len(course.children), 3)
# check dates and graders--forces loading of descriptor
self.assertEqual(course.edited_by, "testassist@edx.org")
self.assertDictEqual(course.grade_cutoffs, {"Pass": 0.45})
locator = CourseLocator(course_id='wonderful', branch='published')
course = modulestore().get_course(locator)
self.assertEqual(course.location.course_id, "wonderful")
self.assertEqual(str(course.location.version_guid), self.GUID_P)
locator = CourseLocator(course_id='wonderful', branch='draft')
course = modulestore().get_course(locator)
self.assertEqual(str(course.location.version_guid), self.GUID_D2)
def test_get_course_negative(self):
# Now negative testing
self.assertRaises(InsufficientSpecificationError,
modulestore().get_course, CourseLocator(course_id='edu.meh.blah'))
self.assertRaises(ItemNotFoundError,
modulestore().get_course, CourseLocator(course_id='nosuchthing', branch='draft'))
self.assertRaises(ItemNotFoundError,
modulestore().get_course,
CourseLocator(course_id='GreekHero', branch='published'))
def test_course_successors(self):
"""
get_course_successors(course_locator, version_history_depth=1)
"""
locator = CourseLocator(version_guid=self.GUID_D3)
result = modulestore().get_course_successors(locator)
self.assertIsInstance(result, VersionTree)
self.assertIsNone(result.locator.course_id)
self.assertEqual(str(result.locator.version_guid), self.GUID_D3)
self.assertEqual(len(result.children), 1)
self.assertEqual(str(result.children[0].locator.version_guid), self.GUID_D1)
self.assertEqual(len(result.children[0].children), 0, "descended more than one level")
result = modulestore().get_course_successors(locator, version_history_depth=2)
self.assertEqual(len(result.children), 1)
self.assertEqual(str(result.children[0].locator.version_guid), self.GUID_D1)
self.assertEqual(len(result.children[0].children), 1)
result = modulestore().get_course_successors(locator, version_history_depth=99)
self.assertEqual(len(result.children), 1)
self.assertEqual(str(result.children[0].locator.version_guid), self.GUID_D1)
self.assertEqual(len(result.children[0].children), 1)
class SplitModuleItemTests(SplitModuleTest):
'''
Item read tests including inheritance
'''
def test_has_item(self):
'''
has_item(BlockUsageLocator)
'''
course_id = 'GreekHero'
# positive tests of various forms
locator = BlockUsageLocator(version_guid=self.GUID_D1, usage_id='head12345')
self.assertTrue(modulestore().has_item(course_id, locator),
"couldn't find in %s" % self.GUID_D1)
locator = BlockUsageLocator(course_id='GreekHero', usage_id='head12345', branch='draft')
self.assertTrue(
modulestore().has_item(locator.course_id, locator),
"couldn't find in 12345"
)
self.assertTrue(
modulestore().has_item(locator.course_id, BlockUsageLocator(
course_id=locator.course_id,
branch='draft',
usage_id=locator.usage_id
)),
"couldn't find in draft 12345"
)
self.assertFalse(
modulestore().has_item(locator.course_id, BlockUsageLocator(
course_id=locator.course_id,
branch='published',
usage_id=locator.usage_id)),
"found in published 12345"
)
locator.branch = 'draft'
self.assertTrue(
modulestore().has_item(locator.course_id, locator),
"not found in draft 12345"
)
# not a course obj
locator = BlockUsageLocator(course_id='GreekHero', usage_id='chapter1', branch='draft')
self.assertTrue(
modulestore().has_item(locator.course_id, locator),
"couldn't find chapter1"
)
# in published course
locator = BlockUsageLocator(course_id="wonderful", usage_id="head23456", branch='draft')
self.assertTrue(
modulestore().has_item(
locator.course_id,
BlockUsageLocator(course_id=locator.course_id, usage_id=locator.usage_id, branch='published')
), "couldn't find in 23456"
)
locator.branch = 'published'
self.assertTrue(modulestore().has_item(course_id, locator), "couldn't find in 23456")
def test_negative_has_item(self):
# negative tests--not found
# no such course or block
course_id = 'GreekHero'
locator = BlockUsageLocator(course_id="doesnotexist", usage_id="head23456", branch='draft')
self.assertFalse(modulestore().has_item(course_id, locator))
locator = BlockUsageLocator(course_id="wonderful", usage_id="doesnotexist", branch='draft')
self.assertFalse(modulestore().has_item(course_id, locator))
# negative tests--insufficient specification
self.assertRaises(InsufficientSpecificationError, BlockUsageLocator)
self.assertRaises(InsufficientSpecificationError,
modulestore().has_item, None, BlockUsageLocator(version_guid=self.GUID_D1))
self.assertRaises(InsufficientSpecificationError,
modulestore().has_item, None, BlockUsageLocator(course_id='GreekHero'))
def test_get_item(self):
'''
get_item(blocklocator)
'''
# positive tests of various forms
locator = BlockUsageLocator(version_guid=self.GUID_D1, usage_id='head12345')
block = modulestore().get_item(locator)
self.assertIsInstance(block, CourseDescriptor)
# get_instance just redirects to get_item, ignores course_id
self.assertIsInstance(modulestore().get_instance("course_id", locator), CourseDescriptor)
def verify_greek_hero(block):
self.assertEqual(block.location.course_id, "GreekHero")
self.assertEqual(len(block.tabs), 6, "wrong number of tabs")
self.assertEqual(block.display_name, "The Ancient Greek Hero")
self.assertEqual(block.advertised_start, "Fall 2013")
self.assertEqual(len(block.children), 3)
self.assertEqual(str(block.definition_locator.definition_id), "ad00000000000000dddd0000")
# check dates and graders--forces loading of descriptor
self.assertEqual(block.edited_by, "testassist@edx.org")
self.assertDictEqual(
block.grade_cutoffs, {"Pass": 0.45},
)
locator = BlockUsageLocator(course_id='GreekHero', usage_id='head12345', branch='draft')
verify_greek_hero(modulestore().get_item(locator))
# get_instance just redirects to get_item, ignores course_id
verify_greek_hero(modulestore().get_instance("course_id", locator))
# try to look up other branches
self.assertRaises(ItemNotFoundError,
modulestore().get_item,
BlockUsageLocator(course_id=locator.as_course_locator(),
usage_id=locator.usage_id,
branch='published'))
locator.branch = 'draft'
self.assertIsInstance(
modulestore().get_item(locator),
CourseDescriptor
)
def test_get_non_root(self):
# not a course obj
locator = BlockUsageLocator(course_id='GreekHero', usage_id='chapter1', branch='draft')
block = modulestore().get_item(locator)
self.assertEqual(block.location.course_id, "GreekHero")
self.assertEqual(block.category, 'chapter')
self.assertEqual(str(block.definition_locator.definition_id), "cd00000000000000dddd0020")
self.assertEqual(block.display_name, "Hercules")
self.assertEqual(block.edited_by, "testassist@edx.org")
# in published course
locator = BlockUsageLocator(course_id="wonderful", usage_id="head23456", branch='published')
self.assertIsInstance(
modulestore().get_item(locator),
CourseDescriptor
)
# negative tests--not found
# no such course or block
locator = BlockUsageLocator(course_id="doesnotexist", usage_id="head23456", branch='draft')
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(locator)
locator = BlockUsageLocator(course_id="wonderful", usage_id="doesnotexist", branch='draft')
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(locator)
# negative tests--insufficient specification
with self.assertRaises(InsufficientSpecificationError):
modulestore().get_item(BlockUsageLocator(version_guid=self.GUID_D1))
with self.assertRaises(InsufficientSpecificationError):
modulestore().get_item(BlockUsageLocator(course_id='GreekHero', branch='draft'))
# pylint: disable=W0212
def test_matching(self):
'''
test the block and value matches help functions
'''
self.assertTrue(modulestore()._value_matches('help', 'help'))
self.assertFalse(modulestore()._value_matches('help', 'Help'))
self.assertTrue(modulestore()._value_matches(['distract', 'help', 'notme'], 'help'))
self.assertFalse(modulestore()._value_matches(['distract', 'Help', 'notme'], 'help'))
self.assertFalse(modulestore()._value_matches({'field': ['distract', 'Help', 'notme']}, {'field': 'help'}))
self.assertFalse(modulestore()._value_matches(['distract', 'Help', 'notme'], {'field': 'help'}))
self.assertTrue(modulestore()._value_matches(
{'field': ['distract', 'help', 'notme'],
'irrelevant': 2},
{'field': 'help'}))
self.assertTrue(modulestore()._value_matches('I need some help', {'$regex': 'help'}))
self.assertTrue(modulestore()._value_matches(['I need some help', 'today'], {'$regex': 'help'}))
self.assertFalse(modulestore()._value_matches('I need some help', {'$regex': 'Help'}))
self.assertFalse(modulestore()._value_matches(['I need some help', 'today'], {'$regex': 'Help'}))
self.assertTrue(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 1}))
self.assertTrue(modulestore()._block_matches({'a': 1, 'b': 2}, {'c': None}))
self.assertTrue(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 1, 'c': None}))
self.assertFalse(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 2}))
self.assertFalse(modulestore()._block_matches({'a': 1, 'b': 2}, {'c': 1}))
self.assertFalse(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 1, 'c': 1}))
def test_get_items(self):
'''
get_items(locator, qualifiers, [branch])
'''
locator = CourseLocator(version_guid=self.GUID_D0)
# get all modules
matches = modulestore().get_items(locator)
self.assertEqual(len(matches), 6)
matches = modulestore().get_items(locator, qualifiers={})
self.assertEqual(len(matches), 6)
matches = modulestore().get_items(locator, qualifiers={'category': 'chapter'})
self.assertEqual(len(matches), 3)
matches = modulestore().get_items(locator, qualifiers={'category': 'garbage'})
self.assertEqual(len(matches), 0)
matches = modulestore().get_items(
locator,
qualifiers=
{
'category': 'chapter',
'fields': {'display_name': {'$regex': 'Hera'}}
}
)
self.assertEqual(len(matches), 2)
matches = modulestore().get_items(locator, qualifiers={'fields': {'children': 'chapter2'}})
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0].location.usage_id, 'head12345')
def test_get_parents(self):
'''
get_parent_locations(locator, [usage_id], [branch]): [BlockUsageLocator]
'''
locator = BlockUsageLocator(course_id="GreekHero", branch='draft', usage_id='chapter1')
parents = modulestore().get_parent_locations(locator)
self.assertEqual(len(parents), 1)
self.assertEqual(parents[0].usage_id, 'head12345')
self.assertEqual(parents[0].course_id, "GreekHero")
locator.usage_id = 'chapter2'
parents = modulestore().get_parent_locations(locator)
self.assertEqual(len(parents), 1)
self.assertEqual(parents[0].usage_id, 'head12345')
locator.usage_id = 'nosuchblock'
parents = modulestore().get_parent_locations(locator)
self.assertEqual(len(parents), 0)
def test_get_children(self):
"""
Test the existing get_children method on xdescriptors
"""
locator = BlockUsageLocator(course_id="GreekHero", usage_id="head12345", branch='draft')
block = modulestore().get_item(locator)
children = block.get_children()
expected_ids = [
"chapter1", "chapter2", "chapter3"
]
for child in children:
self.assertEqual(child.category, "chapter")
self.assertIn(child.location.usage_id, expected_ids)
expected_ids.remove(child.location.usage_id)
self.assertEqual(len(expected_ids), 0)
class TestItemCrud(SplitModuleTest):
"""
Test create update and delete of items
"""
# DHM do I need to test this case which I believe won't work:
# 1) fetch a course and some of its blocks
# 2) do a series of CRUD operations on those previously fetched elements
# The problem here will be that the version_guid of the items will be the version at time of fetch.
# Each separate save will change the head version; so, the 2nd piecemeal change will flag the version
# conflict. That is, if versions are v0..vn and start as v0 in initial fetch, the first CRUD op will
# say it's changing an object from v0, splitMongo will process it and make the current head v1, the next
# crud op will pass in its v0 element and splitMongo will flag the version conflict.
# What I don't know is how realistic this test is and whether to wrap the modulestore with a higher level
# transactional operation which manages the version change or make the threading cache reason out whether or
# not the changes are independent and additive and thus non-conflicting.
# A use case I expect is
# (client) change this metadata
# (server) done, here's the new info which, btw, updates the course version to v1
# (client) add these children to this other node (which says it came from v0 or
# will the client have refreshed the version before doing the op?)
# In this case, having a server side transactional model won't help b/c the bug is a long-transaction on the
# on the client where it would be a mistake for the server to assume anything about client consistency. The best
# the server could do would be to see if the parent's children changed at all since v0.
def test_create_minimal_item(self):
"""
create_item(course_or_parent_locator, category, user, definition_locator=None, fields): new_desciptor
"""
# grab link to course to ensure new versioning works
locator = CourseLocator(course_id="GreekHero", branch='draft')
premod_course = modulestore().get_course(locator)
premod_time = datetime.datetime.now(UTC) - datetime.timedelta(seconds=1)
# add minimal one w/o a parent
category = 'sequential'
new_module = modulestore().create_item(
locator, category, 'user123',
fields={'display_name': 'new sequential'}
)
# check that course version changed and course's previous is the other one
self.assertEqual(new_module.location.course_id, "GreekHero")
self.assertNotEqual(new_module.location.version_guid, premod_course.location.version_guid)
self.assertIsNone(locator.version_guid, "Version inadvertently filled in")
current_course = modulestore().get_course(locator)
self.assertEqual(new_module.location.version_guid, current_course.location.version_guid)
history_info = modulestore().get_course_history_info(current_course.location)
self.assertEqual(history_info['previous_version'], premod_course.location.version_guid)
self.assertEqual(str(history_info['original_version']), self.GUID_D3)
self.assertEqual(history_info['edited_by'], "user123")
self.assertGreaterEqual(history_info['edited_on'], premod_time)
self.assertLessEqual(history_info['edited_on'], datetime.datetime.now(UTC))
# check block's info: category, definition_locator, and display_name
self.assertEqual(new_module.category, 'sequential')
self.assertIsNotNone(new_module.definition_locator)
self.assertEqual(new_module.display_name, 'new sequential')
# check that block does not exist in previous version
locator = BlockUsageLocator(
version_guid=premod_course.location.version_guid,
usage_id=new_module.location.usage_id
)
self.assertRaises(ItemNotFoundError, modulestore().get_item, locator)
def test_create_parented_item(self):
"""
Test create_item w/ specifying the parent of the new item
"""
locator = BlockUsageLocator(course_id="wonderful", usage_id="head23456", branch='draft')
premod_course = modulestore().get_course(locator)
category = 'chapter'
new_module = modulestore().create_item(
locator, category, 'user123',
fields={'display_name': 'new chapter'},
definition_locator=DefinitionLocator("cd00000000000000dddd0022")
)
# check that course version changed and course's previous is the other one
self.assertNotEqual(new_module.location.version_guid, premod_course.location.version_guid)
parent = modulestore().get_item(locator)
self.assertIn(new_module.location.usage_id, parent.children)
self.assertEqual(str(new_module.definition_locator.definition_id), "cd00000000000000dddd0022")
def test_unique_naming(self):
"""
Check that 2 modules of same type get unique usage_ids. Also check that if creation provides
a definition id and new def data that it branches the definition in the db.
Actually, this tries to test all create_item features not tested above.
"""
locator = BlockUsageLocator(course_id="contender", usage_id="head345679", branch='draft')
category = 'problem'
premod_time = datetime.datetime.now(UTC) - datetime.timedelta(seconds=1)
new_payload = "<problem>empty</problem>"
new_module = modulestore().create_item(
locator, category, 'anotheruser',
fields={'display_name': 'problem 1', 'data': new_payload},
)
another_payload = "<problem>not empty</problem>"
another_module = modulestore().create_item(
locator, category, 'anotheruser',
fields={'display_name': 'problem 2', 'data': another_payload},
definition_locator=DefinitionLocator("0d00000040000000dddd0031"),
)
# check that course version changed and course's previous is the other one
parent = modulestore().get_item(locator)
self.assertNotEqual(new_module.location.usage_id, another_module.location.usage_id)
self.assertIn(new_module.location.usage_id, parent.children)
self.assertIn(another_module.location.usage_id, parent.children)
self.assertEqual(new_module.data, new_payload)
self.assertEqual(another_module.data, another_payload)
# check definition histories
new_history = modulestore().get_definition_history_info(new_module.definition_locator)
self.assertIsNone(new_history['previous_version'])
self.assertEqual(new_history['original_version'], new_module.definition_locator.definition_id)
self.assertEqual(new_history['edited_by'], "anotheruser")
self.assertLessEqual(new_history['edited_on'], datetime.datetime.now(UTC))
self.assertGreaterEqual(new_history['edited_on'], premod_time)
another_history = modulestore().get_definition_history_info(another_module.definition_locator)
self.assertEqual(str(another_history['previous_version']), '0d00000040000000dddd0031')
def test_create_continue_version(self):
"""
Test create_item using the continue_version flag
"""
# start transaction w/ simple creation
user = random.getrandbits(32)
new_course = modulestore().create_course('test_org', 'test_transaction', user)
new_course_locator = new_course.location.as_course_locator()
index_history_info = modulestore().get_course_history_info(new_course.location)
course_block_prev_version = new_course.previous_version
course_block_update_version = new_course.update_version
self.assertIsNotNone(new_course_locator.version_guid, "Want to test a definite version")
versionless_course_locator = CourseLocator(
course_id=new_course_locator.course_id, branch=new_course_locator.branch
)
# positive simple case: no force, add chapter
new_ele = modulestore().create_item(
new_course.location, 'chapter', user,
fields={'display_name': 'chapter 1'},
continue_version=True
)
# version info shouldn't change
self.assertEqual(new_ele.update_version, course_block_update_version)
self.assertEqual(new_ele.update_version, new_ele.location.version_guid)
refetch_course = modulestore().get_course(versionless_course_locator)
self.assertEqual(refetch_course.location.version_guid, new_course.location.version_guid)
self.assertEqual(refetch_course.previous_version, course_block_prev_version)
self.assertEqual(refetch_course.update_version, course_block_update_version)
refetch_index_history_info = modulestore().get_course_history_info(refetch_course.location)
self.assertEqual(refetch_index_history_info, index_history_info)
self.assertIn(new_ele.location.usage_id, refetch_course.children)
# try to create existing item
with self.assertRaises(DuplicateItemError):
_fail = modulestore().create_item(
new_course.location, 'chapter', user,
usage_id=new_ele.location.usage_id,
fields={'display_name': 'chapter 2'},
continue_version=True
)
# start a new transaction
new_ele = modulestore().create_item(
new_course.location, 'chapter', user,
fields={'display_name': 'chapter 2'},
continue_version=False
)
transaction_guid = new_ele.location.version_guid
# ensure force w/ continue gives exception
with self.assertRaises(VersionConflictError):
_fail = modulestore().create_item(
new_course.location, 'chapter', user,
fields={'display_name': 'chapter 2'},
force=True, continue_version=True
)
# ensure trying to continue the old one gives exception
with self.assertRaises(VersionConflictError):
_fail = modulestore().create_item(
new_course.location, 'chapter', user,
fields={'display_name': 'chapter 3'},
continue_version=True
)
# add new child to old parent in continued (leave off version_guid)
course_module_locator = BlockUsageLocator(
course_id=new_course.location.course_id,
usage_id=new_course.location.usage_id,
branch=new_course.location.branch
)
new_ele = modulestore().create_item(
course_module_locator, 'chapter', user,
fields={'display_name': 'chapter 4'},
continue_version=True
)
self.assertNotEqual(new_ele.update_version, course_block_update_version)
self.assertEqual(new_ele.location.version_guid, transaction_guid)
# check children, previous_version
refetch_course = modulestore().get_course(versionless_course_locator)
self.assertIn(new_ele.location.usage_id, refetch_course.children)
self.assertEqual(refetch_course.previous_version, course_block_update_version)
self.assertEqual(refetch_course.update_version, transaction_guid)
def test_update_metadata(self):
"""
test updating an items metadata ensuring the definition doesn't version but the course does if it should
"""
locator = BlockUsageLocator(course_id="GreekHero", usage_id="problem3_2", branch='draft')
problem = modulestore().get_item(locator)
pre_def_id = problem.definition_locator.definition_id
pre_version_guid = problem.location.version_guid
self.assertIsNotNone(pre_def_id)
self.assertIsNotNone(pre_version_guid)
premod_time = datetime.datetime.now(UTC) - datetime.timedelta(seconds=1)
self.assertNotEqual(problem.max_attempts, 4, "Invalidates rest of test")
problem.max_attempts = 4
problem.save() # decache above setting into the kvs
updated_problem = modulestore().update_item(problem, 'changeMaven')
# check that course version changed and course's previous is the other one
self.assertEqual(updated_problem.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_problem.location.version_guid, pre_version_guid)
self.assertEqual(updated_problem.max_attempts, 4)
# refetch to ensure original didn't change
original_location = BlockUsageLocator(
version_guid=pre_version_guid,
usage_id=problem.location.usage_id
)
problem = modulestore().get_item(original_location)
self.assertNotEqual(problem.max_attempts, 4, "original changed")
current_course = modulestore().get_course(locator)
self.assertEqual(updated_problem.location.version_guid, current_course.location.version_guid)
history_info = modulestore().get_course_history_info(current_course.location)
self.assertEqual(history_info['previous_version'], pre_version_guid)
self.assertEqual(str(history_info['original_version']), self.GUID_D3)
self.assertEqual(history_info['edited_by'], "changeMaven")
self.assertGreaterEqual(history_info['edited_on'], premod_time)
self.assertLessEqual(history_info['edited_on'], datetime.datetime.now(UTC))
def test_update_children(self):
"""
test updating an item's children ensuring the definition doesn't version but the course does if it should
"""
locator = BlockUsageLocator(course_id="GreekHero", usage_id="chapter3", branch='draft')
block = modulestore().get_item(locator)
pre_def_id = block.definition_locator.definition_id
pre_version_guid = block.location.version_guid
# reorder children
self.assertGreater(len(block.children), 0, "meaningless test")
moved_child = block.children.pop()
block.save() # decache model changes
updated_problem = modulestore().update_item(block, 'childchanger')
# check that course version changed and course's previous is the other one
self.assertEqual(updated_problem.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_problem.location.version_guid, pre_version_guid)
self.assertEqual(updated_problem.children, block.children)
self.assertNotIn(moved_child, updated_problem.children)
locator.usage_id = "chapter1"
other_block = modulestore().get_item(locator)
other_block.children.append(moved_child)
other_block.save() # decache model changes
other_updated = modulestore().update_item(other_block, 'childchanger')
self.assertIn(moved_child, other_updated.children)
def test_update_definition(self):
"""
test updating an item's definition: ensure it gets versioned as well as the course getting versioned
"""
locator = BlockUsageLocator(course_id="GreekHero", usage_id="head12345", branch='draft')
block = modulestore().get_item(locator)
pre_def_id = block.definition_locator.definition_id
pre_version_guid = block.location.version_guid
block.grading_policy['GRADER'][0]['min_count'] = 13
block.save() # decache model changes
updated_block = modulestore().update_item(block, 'definition_changer')
self.assertNotEqual(updated_block.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_block.location.version_guid, pre_version_guid)
self.assertEqual(updated_block.grading_policy['GRADER'][0]['min_count'], 13)
def test_update_manifold(self):
"""
Test updating metadata, children, and definition in a single call ensuring all the versioning occurs
"""
# first add 2 children to the course for the update to manipulate
locator = BlockUsageLocator(course_id="contender", usage_id="head345679", branch='draft')
category = 'problem'
new_payload = "<problem>empty</problem>"
modulestore().create_item(
locator, category, 'test_update_manifold',
fields={'display_name': 'problem 1', 'data': new_payload},
)
another_payload = "<problem>not empty</problem>"
modulestore().create_item(
locator, category, 'test_update_manifold',
fields={'display_name': 'problem 2', 'data': another_payload},
definition_locator=DefinitionLocator("0d00000040000000dddd0031"),
)
# pylint: disable=W0212
modulestore()._clear_cache()
# now begin the test
block = modulestore().get_item(locator)
pre_def_id = block.definition_locator.definition_id
pre_version_guid = block.location.version_guid
self.assertNotEqual(block.grading_policy['GRADER'][0]['min_count'], 13)
block.grading_policy['GRADER'][0]['min_count'] = 13
block.children = block.children[1:] + [block.children[0]]
block.advertised_start = "Soon"
block.save() # decache model changes
updated_block = modulestore().update_item(block, "test_update_manifold")
self.assertNotEqual(updated_block.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_block.location.version_guid, pre_version_guid)
self.assertEqual(updated_block.grading_policy['GRADER'][0]['min_count'], 13)
self.assertEqual(updated_block.children[0], block.children[0])
self.assertEqual(updated_block.advertised_start, "Soon")
def test_delete_item(self):
course = self.create_course_for_deletion()
self.assertRaises(ValueError,
modulestore().delete_item,
course.location,
'deleting_user')
reusable_location = BlockUsageLocator(
course_id=course.location.course_id,
usage_id=course.location.usage_id,
branch='draft')
# delete a leaf
problems = modulestore().get_items(reusable_location, {'category': 'problem'})
locn_to_del = problems[0].location
new_course_loc = modulestore().delete_item(locn_to_del, 'deleting_user', delete_children=True)
deleted = BlockUsageLocator(course_id=reusable_location.course_id,
branch=reusable_location.branch,
usage_id=locn_to_del.usage_id)
self.assertFalse(modulestore().has_item(reusable_location.course_id, deleted))
self.assertRaises(VersionConflictError, modulestore().has_item, reusable_location.course_id, locn_to_del)
locator = BlockUsageLocator(
version_guid=locn_to_del.version_guid,
usage_id=locn_to_del.usage_id
)
self.assertTrue(modulestore().has_item(reusable_location.course_id, locator))
self.assertNotEqual(new_course_loc.version_guid, course.location.version_guid)
# delete a subtree
nodes = modulestore().get_items(reusable_location, {'category': 'chapter'})
new_course_loc = modulestore().delete_item(nodes[0].location, 'deleting_user', delete_children=True)
# check subtree
def check_subtree(node):
if node:
node_loc = node.location
self.assertFalse(modulestore().has_item(reusable_location.course_id,
BlockUsageLocator(
course_id=node_loc.course_id,
branch=node_loc.branch,
usage_id=node.location.usage_id)))
locator = BlockUsageLocator(
version_guid=node.location.version_guid,
usage_id=node.location.usage_id)
self.assertTrue(modulestore().has_item(reusable_location.course_id, locator))
if node.has_children:
for sub in node.get_children():
check_subtree(sub)
check_subtree(nodes[0])
def create_course_for_deletion(self):
course = modulestore().create_course('nihilx', 'deletion', 'deleting_user')
root = BlockUsageLocator(
course_id=course.location.course_id,
usage_id=course.location.usage_id,
branch='draft')
for _ in range(4):
self.create_subtree_for_deletion(root, ['chapter', 'vertical', 'problem'])
return modulestore().get_item(root)
def create_subtree_for_deletion(self, parent, category_queue):
if not category_queue:
return
node = modulestore().create_item(parent, category_queue[0], 'deleting_user')
node_loc = BlockUsageLocator(parent.as_course_locator(), usage_id=node.location.usage_id)
for _ in range(4):
self.create_subtree_for_deletion(node_loc, category_queue[1:])
class TestCourseCreation(SplitModuleTest):
"""
Test create_course, duh :-)
"""
def test_simple_creation(self):
"""
The simplest case but probing all expected results from it.
"""
# Oddly getting differences of 200nsec
pre_time = datetime.datetime.now(UTC) - datetime.timedelta(milliseconds=1)
new_course = modulestore().create_course('test_org', 'test_course', 'create_user')
new_locator = new_course.location
# check index entry
index_info = modulestore().get_course_index_info(new_locator)
self.assertEqual(index_info['org'], 'test_org')
self.assertEqual(index_info['prettyid'], 'test_course')
self.assertGreaterEqual(index_info["edited_on"], pre_time)
self.assertLessEqual(index_info["edited_on"], datetime.datetime.now(UTC))
self.assertEqual(index_info['edited_by'], 'create_user')
# check structure info
structure_info = modulestore().get_course_history_info(new_locator)
self.assertEqual(structure_info['original_version'], index_info['versions']['draft'])
self.assertIsNone(structure_info['previous_version'])
self.assertGreaterEqual(structure_info["edited_on"], pre_time)
self.assertLessEqual(structure_info["edited_on"], datetime.datetime.now(UTC))
self.assertEqual(structure_info['edited_by'], 'create_user')
# check the returned course object
self.assertIsInstance(new_course, CourseDescriptor)
self.assertEqual(new_course.category, 'course')
self.assertFalse(new_course.show_calculator)
self.assertTrue(new_course.allow_anonymous)
self.assertEqual(len(new_course.children), 0)
self.assertEqual(new_course.edited_by, "create_user")
self.assertEqual(len(new_course.grading_policy['GRADER']), 4)
self.assertDictEqual(new_course.grade_cutoffs, {"Pass": 0.5})
def test_cloned_course(self):
"""
Test making a course which points to an existing draft and published but not making any changes to either.
"""
pre_time = datetime.datetime.now(UTC)
original_locator = CourseLocator(course_id="wonderful", branch='draft')
original_index = modulestore().get_course_index_info(original_locator)
new_draft = modulestore().create_course(
'leech', 'best_course', 'leech_master', id_root='best',
versions_dict=original_index['versions'])
new_draft_locator = new_draft.location
self.assertRegexpMatches(new_draft_locator.course_id, r'best.*')
# the edited_by and other meta fields on the new course will be the original author not this one
self.assertEqual(new_draft.edited_by, 'test@edx.org')
self.assertLess(new_draft.edited_on, pre_time)
self.assertEqual(new_draft.location.version_guid, original_index['versions']['draft'])
# however the edited_by and other meta fields on course_index will be this one
new_index = modulestore().get_course_index_info(new_draft_locator)
self.assertGreaterEqual(new_index["edited_on"], pre_time)
self.assertLessEqual(new_index["edited_on"], datetime.datetime.now(UTC))
self.assertEqual(new_index['edited_by'], 'leech_master')
new_published_locator = CourseLocator(course_id=new_draft_locator.course_id, branch='published')
new_published = modulestore().get_course(new_published_locator)
self.assertEqual(new_published.edited_by, 'test@edx.org')
self.assertLess(new_published.edited_on, pre_time)
self.assertEqual(new_published.location.version_guid, original_index['versions']['published'])
# changing this course will not change the original course
# using new_draft.location will insert the chapter under the course root
new_item = modulestore().create_item(
new_draft.location, 'chapter', 'leech_master',
fields={'display_name': 'new chapter'}
)
new_draft_locator.version_guid = None
new_index = modulestore().get_course_index_info(new_draft_locator)
self.assertNotEqual(new_index['versions']['draft'], original_index['versions']['draft'])
new_draft = modulestore().get_course(new_draft_locator)
self.assertEqual(new_item.edited_by, 'leech_master')
self.assertGreaterEqual(new_item.edited_on, pre_time)
self.assertNotEqual(new_item.location.version_guid, original_index['versions']['draft'])
self.assertNotEqual(new_draft.location.version_guid, original_index['versions']['draft'])
structure_info = modulestore().get_course_history_info(new_draft_locator)
self.assertGreaterEqual(structure_info["edited_on"], pre_time)
self.assertLessEqual(structure_info["edited_on"], datetime.datetime.now(UTC))
self.assertEqual(structure_info['edited_by'], 'leech_master')
original_course = modulestore().get_course(original_locator)
self.assertEqual(original_course.location.version_guid, original_index['versions']['draft'])
self.assertFalse(
modulestore().has_item(new_draft_locator.course_id, BlockUsageLocator(
original_locator,
usage_id=new_item.location.usage_id
))
)
def test_derived_course(self):
"""
Create a new course which overrides metadata and course_data
"""
pre_time = datetime.datetime.now(UTC)
original_locator = CourseLocator(course_id="contender", branch='draft')
original = modulestore().get_course(original_locator)
original_index = modulestore().get_course_index_info(original_locator)
fields = {}
for field in original.fields.values():
if field.scope == Scope.content and field.name != 'location':
fields[field.name] = getattr(original, field.name)
elif field.scope == Scope.settings:
fields[field.name] = getattr(original, field.name)
fields['grading_policy']['GRADE_CUTOFFS'] = {'A': .9, 'B': .8, 'C': .65}
fields['display_name'] = 'Derivative'
new_draft = modulestore().create_course(
'leech', 'derivative', 'leech_master', id_root='counter',
versions_dict={'draft': original_index['versions']['draft']},
fields=fields
)
new_draft_locator = new_draft.location
self.assertRegexpMatches(new_draft_locator.course_id, r'counter.*')
# the edited_by and other meta fields on the new course will be the original author not this one
self.assertEqual(new_draft.edited_by, 'leech_master')
self.assertGreaterEqual(new_draft.edited_on, pre_time)
self.assertNotEqual(new_draft.location.version_guid, original_index['versions']['draft'])
# however the edited_by and other meta fields on course_index will be this one
new_index = modulestore().get_course_index_info(new_draft_locator)
self.assertGreaterEqual(new_index["edited_on"], pre_time)
self.assertLessEqual(new_index["edited_on"], datetime.datetime.now(UTC))
self.assertEqual(new_index['edited_by'], 'leech_master')
self.assertEqual(new_draft.display_name, fields['display_name'])
self.assertDictEqual(
new_draft.grading_policy['GRADE_CUTOFFS'],
fields['grading_policy']['GRADE_CUTOFFS']
)
def test_update_course_index(self):
"""
Test changing the org, pretty id, etc of a course. Test that it doesn't allow changing the id, etc.
"""
locator = CourseLocator(course_id="GreekHero", branch='draft')
modulestore().update_course_index(locator, {'org': 'funkyU'})
course_info = modulestore().get_course_index_info(locator)
self.assertEqual(course_info['org'], 'funkyU')
modulestore().update_course_index(locator, {'org': 'moreFunky', 'prettyid': 'Ancient Greek Demagods'})
course_info = modulestore().get_course_index_info(locator)
self.assertEqual(course_info['org'], 'moreFunky')
self.assertEqual(course_info['prettyid'], 'Ancient Greek Demagods')
self.assertRaises(ValueError, modulestore().update_course_index, locator, {'_id': 'funkygreeks'})
with self.assertRaises(ValueError):
modulestore().update_course_index(
locator,
{'edited_on': datetime.datetime.now(UTC)}
)
with self.assertRaises(ValueError):
modulestore().update_course_index(
locator,
{'edited_by': 'sneak'}
)
self.assertRaises(ValueError, modulestore().update_course_index, locator,
{'versions': {'draft': self.GUID_D1}})
# an allowed but not necessarily recommended way to revert the draft version
versions = course_info['versions']
versions['draft'] = self.GUID_D1
modulestore().update_course_index(locator, {'versions': versions}, update_versions=True)
course = modulestore().get_course(locator)
self.assertEqual(str(course.location.version_guid), self.GUID_D1)
# an allowed but not recommended way to publish a course
versions['published'] = self.GUID_D1
modulestore().update_course_index(locator, {'versions': versions}, update_versions=True)
course = modulestore().get_course(CourseLocator(course_id=locator.course_id, branch="published"))
self.assertEqual(str(course.location.version_guid), self.GUID_D1)
def test_create_with_root(self):
"""
Test create_course with a specified root id and category
"""
user = random.getrandbits(32)
new_course = modulestore().create_course(
'test_org', 'test_transaction', user,
root_usage_id='top', root_category='chapter'
)
self.assertEqual(new_course.location.usage_id, 'top')
self.assertEqual(new_course.category, 'chapter')
# look at db to verify
db_structure = modulestore().structures.find_one({
'_id': new_course.location.as_object_id(new_course.location.version_guid)
})
self.assertIsNotNone(db_structure, "Didn't find course")
self.assertNotIn('course', db_structure['blocks'])
self.assertIn('top', db_structure['blocks'])
self.assertEqual(db_structure['blocks']['top']['category'], 'chapter')
class TestInheritance(SplitModuleTest):
"""
Test the metadata inheritance mechanism.
"""
def test_inheritance(self):
"""
The actual test
"""
# Note, not testing value where defined (course) b/c there's no
# defined accessor for it on CourseDescriptor.
locator = BlockUsageLocator(course_id="GreekHero", usage_id="problem3_2", branch='draft')
node = modulestore().get_item(locator)
# inherited
self.assertEqual(node.graceperiod, datetime.timedelta(hours=2))
locator = BlockUsageLocator(course_id="GreekHero", usage_id="problem1", branch='draft')
node = modulestore().get_item(locator)
# overridden
self.assertEqual(node.graceperiod, datetime.timedelta(hours=4))
# TODO test inheritance after set and delete of attrs
#===========================================
# This mocks the django.modulestore() function and is intended purely to disentangle
# the tests from django
def modulestore():
def load_function(engine_path):
module_path, _, name = engine_path.rpartition('.')
return getattr(import_module(module_path), name)
if SplitModuleTest.modulestore is None:
SplitModuleTest.bootstrapDB()
class_ = load_function(SplitModuleTest.MODULESTORE['ENGINE'])
options = {}
options.update(SplitModuleTest.MODULESTORE['OPTIONS'])
options['render_template'] = render_to_template_mock
# pylint: disable=W0142
SplitModuleTest.modulestore = class_(
SplitModuleTest.MODULESTORE['DOC_STORE_CONFIG'],
**options
)
return SplitModuleTest.modulestore
# pylint: disable=W0613
def render_to_template_mock(*args):
pass
| abo-abo/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/test_split_modulestore.py | Python | agpl-3.0 | 55,767 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fused_batch_norm related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad
from tensorflow.python.ops import nn_impl
from tensorflow.python.platform import test
class BatchNormalizationTest(test.TestCase):
def _batch_norm(self, x, mean, var, offset, scale, epsilon):
# We compute the batch norm manually in this function because
# nn_impl.batch_normalization does not support float16 yet.
# TODO(reedwm): Add float16 support to nn_impl.batch_normalization.
inv = math_ops.rsqrt(var + epsilon) * scale
y = math_ops.cast(x, scale.dtype) * inv + (offset - mean * inv)
return math_ops.cast(y, x.dtype)
def _inference_ref(self, x, scale, offset, mean, var, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError('data_format must be NCHW or NHWC, '
'got %s.' % data_format)
if data_format == 'NCHW':
x = array_ops.transpose(x, [0, 2, 3, 1])
y = self._batch_norm(x, mean, var, offset, scale, epsilon)
if data_format == 'NCHW':
y = array_ops.transpose(y, [0, 3, 1, 2])
return self.evaluate(y)
def _test_inference(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.cached_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
mean = constant_op.constant(mean_val, name='mean')
var = constant_op.constant(var_val, name='variance')
epsilon = 0.001
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=mean,
variance=var,
epsilon=epsilon,
data_format=data_format,
is_training=False)
y_val = self.evaluate(y)
y_ref = self._inference_ref(x, scale, offset, mean, var, epsilon,
data_format)
# An atol value of 1e-3 is too small for float16's, because some adjacent
# float16 values that y_val can take are greater than 1e-3 apart, e.g.
# 2.16602 and 2.16797.
atol = 2e-3 if x_dtype == np.float16 else 1e-3
self.assertAllClose(y_ref, y_val, atol=atol)
def _training_ref(self, x, scale, offset, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError('data_format must be NCHW or NHWC, '
'got %s.' % data_format)
if data_format == 'NCHW':
x = array_ops.transpose(x, [0, 2, 3, 1])
mean, var = nn_impl.moments(
math_ops.cast(x, scale.dtype), [0, 1, 2], keep_dims=False)
y = self._batch_norm(x, mean, var, offset, scale, epsilon)
if data_format == 'NCHW':
y = array_ops.transpose(y, [0, 3, 1, 2])
return self.evaluate(y), self.evaluate(mean), self.evaluate(var)
def _test_training(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.cached_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
epsilon = 0.001
y, mean, var = nn_impl.fused_batch_norm(
x,
scale,
offset,
epsilon=epsilon,
data_format=data_format,
is_training=True)
y_val, mean_val, var_val = self.evaluate([y, mean, var])
y_ref, mean_ref, var_ref = self._training_ref(x, scale, offset, epsilon,
data_format)
y_atol = 2e-3 if x_dtype == np.float16 else 1e-3
self.assertAllClose(y_ref, y_val, atol=y_atol)
self.assertAllClose(mean_ref, mean_val, atol=1e-3)
# This is for Bessel's correction. tf.nn.moments uses n, instead of n-1, as
# the denominator in the formula to calculate variance, while
# tf.compat.v1.nn.fused_batch_norm has Bessel's correction built in.
sample_size = x_val.size / scale_val.size
var_ref = var_ref * sample_size / (max(sample_size - 1.0, 1.0))
self.assertAllClose(var_ref, var_val, atol=1e-3)
def _compute_gradient_error_float16(self, x, x32, x_shape, y, y32, y_shape):
"""Computes the gradient error for float16 inputs and/or outputs.
This returns the same value as gradient_checker.compute_gradient_error. The
difference is that gradient_checker.compute_gradient_error does not
numerically compute the gradients in a numerically stable way for float16
tensors. To fix this, this function requires float32 versions of x and y to
numerically compute the gradients, to compare with the float16 symbolically
computed gradients.
Args:
x: The input tensor.
x32: A float32 version of x.
x_shape: The shape of x.
y: The output tensor.
y32: A float32 version of y. Must be calculated based on x32, not x.
y_shape: The shape of y.
Returns:
The maximum error in between the two Jacobians, as in
gradient_checker.compute_gradient_error.
"""
x_init_val = np.random.random_sample(x_shape).astype(np.float16)
x32_init_val = x_init_val.astype(np.float32)
# TODO(reedwm): Do not perform the unnecessary computations in
# compute_gradient, since they double the computation time of this function.
theoretical_grad, _ = gradient_checker.compute_gradient(
x, x_shape, y, y_shape, delta=1e-3, x_init_value=x_init_val)
_, numerical_grad = gradient_checker.compute_gradient(
x32, x_shape, y32, y_shape, delta=1e-3, x_init_value=x32_init_val)
# If grad is empty, no error.
if theoretical_grad.size == 0 and numerical_grad.size == 0:
return 0
return np.fabs(theoretical_grad - numerical_grad).max()
def _test_gradient(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
data_format='NHWC',
is_training=True):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
if is_training:
pop_mean = None
pop_var = None
else:
pop_mean = np.random.random_sample(scale_shape).astype(scale_dtype)
pop_var = np.random.random_sample(scale_shape).astype(scale_dtype)
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
if x_dtype != np.float16:
err_x = gradient_checker.compute_gradient_error(x, x_shape, y, x_shape)
err_scale = gradient_checker.compute_gradient_error(
scale, scale_shape, y, x_shape)
err_offset = gradient_checker.compute_gradient_error(
offset, scale_shape, y, x_shape)
else:
x32 = constant_op.constant(x_val, name='x32', dtype=dtypes.float32)
y32, _, _ = nn_impl.fused_batch_norm(
x32,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
err_x = self._compute_gradient_error_float16(x, x32, x_shape, y, y32,
x_shape)
err_scale = self._compute_gradient_error_float16(
scale, scale, scale_shape, y, y32, x_shape)
err_offset = self._compute_gradient_error_float16(
offset, offset, scale_shape, y, y32, x_shape)
x_err_tolerance = 2e-3 if x_dtype == np.float16 else 1e-3
scale_err_tolerance = 1e-3
self.assertLess(err_x, x_err_tolerance)
self.assertLess(err_scale, scale_err_tolerance)
self.assertLess(err_offset, scale_err_tolerance)
def _test_grad_grad(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
data_format='NHWC',
is_training=True,
err_tolerance=1e-3):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
grad_y_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.cached_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
grad_y = constant_op.constant(grad_y_val, name='grad_y')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
if is_training:
pop_mean = None
pop_var = None
else:
pop_mean = np.random.random_sample(scale_shape).astype(scale_dtype)
pop_var = np.random.random_sample(scale_shape).astype(scale_dtype)
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
grad_x, grad_scale, grad_offset = gradients_impl.gradients(
y, [x, scale, offset], grad_y)
if is_training:
epsilon = y.op.get_attr('epsilon')
data_format = y.op.get_attr('data_format')
grad_vals = self.evaluate([grad_x, grad_scale, grad_offset])
grad_internal = nn_grad._BatchNormGrad(grad_y, x, scale, pop_mean,
pop_var, epsilon, data_format)
grad_internal_vals = self.evaluate(list(grad_internal))
for grad_val, grad_internal_val in zip(grad_vals, grad_internal_vals):
self.assertAllClose(grad_val, grad_internal_val, atol=err_tolerance)
if x_dtype != np.float16:
err_grad_grad_y_1 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_x, x_shape)
err_grad_grad_y_2 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_scale, scale_shape)
err_grad_grad_y_3 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_offset, scale_shape)
# In freeze mode, grad_x is not a function of x.
if is_training:
err_grad_x_1 = gradient_checker.compute_gradient_error(
x, x_shape, grad_x, x_shape)
err_grad_x_2 = gradient_checker.compute_gradient_error(
x, x_shape, grad_scale, scale_shape)
err_grad_scale = gradient_checker.compute_gradient_error(
scale, scale_shape, grad_x, x_shape)
else:
x32 = constant_op.constant(x_val, dtype=dtypes.float32, name='x32')
grad_y32 = constant_op.constant(
grad_y_val, dtype=dtypes.float32, name='grad_y32')
y32, _, _ = nn_impl.fused_batch_norm(
x32,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
grad_x32, grad_scale32, grad_offset32 = gradients_impl.gradients(
y32, [x32, scale, offset], grad_y32)
err_grad_grad_y_1 = self._compute_gradient_error_float16(
grad_y, grad_y32, x_shape, grad_x, grad_x32, x_shape)
err_grad_grad_y_2 = self._compute_gradient_error_float16(
grad_y, grad_y32, x_shape, grad_scale, grad_scale32, scale_shape)
err_grad_grad_y_3 = self._compute_gradient_error_float16(
grad_y, grad_y32, x_shape, grad_offset, grad_offset32, scale_shape)
# In freeze mode, grad_x is not a function of x.
if is_training:
err_grad_x_1 = self._compute_gradient_error_float16(
x, x32, x_shape, grad_x, grad_x32, x_shape)
err_grad_x_2 = self._compute_gradient_error_float16(
x, x32, x_shape, grad_scale, grad_scale32, scale_shape)
err_grad_scale = self._compute_gradient_error_float16(
scale, scale, scale_shape, grad_x, grad_x32, x_shape)
self.assertLess(err_grad_grad_y_1, err_tolerance)
self.assertLess(err_grad_grad_y_2, err_tolerance)
self.assertLess(err_grad_grad_y_3, err_tolerance)
if is_training:
self.assertLess(err_grad_x_1, err_tolerance)
self.assertLess(err_grad_x_2, err_tolerance)
self.assertLess(err_grad_scale, err_tolerance)
def testInferenceShape1(self):
x_shape = [1, 1, 6, 1]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_inference(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NHWC')
self._test_inference(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NCHW')
self._test_inference(
x_shape, dtype, [1], np.float32, use_gpu=False, data_format='NHWC')
def testInferenceShape2(self):
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_inference(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NHWC')
self._test_inference(
x_shape, dtype, [2], np.float32, use_gpu=False, data_format='NHWC')
def testInferenceShape3(self):
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_inference(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NCHW')
def testInferenceShape4(self):
x_shape = [27, 131, 127, 6]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_inference(
x_shape, dtype, [131], np.float32, use_gpu=True, data_format='NCHW')
self._test_inference(
x_shape, dtype, [6], np.float32, use_gpu=True, data_format='NHWC')
self._test_inference(
x_shape, dtype, [6], np.float32, use_gpu=False, data_format='NHWC')
def testInferenceShape5(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
x_shape = [0, 131, 127, 6]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_inference(
x_shape,
dtype, [131],
np.float32,
use_gpu=True,
data_format='NCHW')
self._test_inference(
x_shape, dtype, [6], np.float32, use_gpu=True, data_format='NHWC')
self._test_inference(
x_shape, dtype, [6], np.float32, use_gpu=False, data_format='NHWC')
def testTrainingShape1(self):
x_shape = [1, 1, 6, 1]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_training(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NHWC')
self._test_training(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NCHW')
self._test_training(
x_shape, dtype, [1], np.float32, use_gpu=False, data_format='NHWC')
def testTrainingShape2(self):
x_shape = [1, 1, 6, 2]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_training(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NHWC')
self._test_training(
x_shape, dtype, [2], np.float32, use_gpu=False, data_format='NHWC')
def testTrainingShape3(self):
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_training(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NCHW')
def testTrainingShape4(self):
x_shape = [27, 131, 127, 6]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_training(
x_shape, dtype, [131], np.float32, use_gpu=True, data_format='NCHW')
self._test_training(
x_shape, dtype, [6], np.float32, use_gpu=True, data_format='NHWC')
self._test_training(
x_shape, dtype, [6], np.float32, use_gpu=False, data_format='NHWC')
def testTrainingShape5(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
x_shape = [0, 131, 127, 6]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_training(
x_shape,
dtype, [131],
np.float32,
use_gpu=True,
data_format='NCHW')
self._test_training(
x_shape, dtype, [6], np.float32, use_gpu=True, data_format='NHWC')
self._test_training(
x_shape, dtype, [6], np.float32, use_gpu=False, data_format='NHWC')
@test_util.run_deprecated_v1
def testBatchNormGradShape1(self):
for is_training in [True, False]:
x_shape = [1, 1, 6, 1]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape,
dtype, [1],
np.float32,
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape,
dtype, [1],
np.float32,
use_gpu=True,
data_format='NCHW',
is_training=is_training)
self._test_gradient(
x_shape,
dtype, [1],
np.float32,
use_gpu=False,
data_format='NHWC',
is_training=is_training)
@test_util.run_deprecated_v1
def testBatchNormGradShape2(self):
for is_training in [True, False]:
x_shape = [1, 1, 6, 2]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape,
dtype, [2],
np.float32,
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape,
dtype, [2],
np.float32,
use_gpu=False,
data_format='NHWC',
is_training=is_training)
@test_util.run_deprecated_v1
def testBatchNormGradShape3(self):
for is_training in [True, False]:
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_gradient(
x_shape,
dtype, [2],
np.float32,
use_gpu=True,
data_format='NCHW',
is_training=is_training)
@test_util.run_deprecated_v1
def testBatchNormGradShape4(self):
for is_training in [True, False]:
x_shape = [5, 7, 11, 4]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape,
dtype, [7],
np.float32,
use_gpu=True,
data_format='NCHW',
is_training=is_training)
self._test_gradient(
x_shape,
dtype, [4],
np.float32,
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape,
dtype, [4],
np.float32,
use_gpu=False,
data_format='NHWC',
is_training=is_training)
@test_util.run_deprecated_v1
@test_util.disable_xla('This test never passed for XLA')
def testBatchNormGradShape5(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
for is_training in [True, False]:
x_shape = [0, 7, 11, 4]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape,
dtype, [7],
np.float32,
use_gpu=True,
data_format='NCHW',
is_training=is_training)
self._test_gradient(
x_shape,
dtype, [4],
np.float32,
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape,
dtype, [4],
np.float32,
use_gpu=False,
data_format='NHWC',
is_training=is_training)
def _testBatchNormGradGrad(self, config):
shape = config['shape']
err_tolerance = config['err_tolerance']
dtype = config['dtype']
for is_training in [True, False]:
if test.is_gpu_available(cuda_only=True):
self._test_grad_grad(
shape,
dtype, [shape[3]],
np.float32,
use_gpu=True,
data_format='NHWC',
is_training=is_training,
err_tolerance=err_tolerance)
self._test_grad_grad(
shape,
dtype, [shape[1]],
np.float32,
use_gpu=True,
data_format='NCHW',
is_training=is_training,
err_tolerance=err_tolerance)
self._test_grad_grad(
shape,
dtype, [shape[3]],
np.float32,
use_gpu=False,
data_format='NHWC',
is_training=is_training,
err_tolerance=err_tolerance)
@test_util.run_deprecated_v1
def testBatchNormGradGradConfig1(self):
config = {
'shape': [2, 3, 4, 5],
'err_tolerance': 1e-2,
'dtype': np.float32,
}
self._testBatchNormGradGrad(config)
@test_util.run_deprecated_v1
def testBatchNormGradGradConfig2(self):
config = {
'shape': [2, 3, 2, 2],
'err_tolerance': 1e-3,
'dtype': np.float32,
}
self._testBatchNormGradGrad(config)
@test_util.run_deprecated_v1
def testBatchNormGradGradConfig3(self):
config = {
'shape': [2, 3, 4, 5],
'err_tolerance': 1e-2,
'dtype': np.float16,
}
self._testBatchNormGradGrad(config)
@test_util.run_deprecated_v1
def testBatchNormGradGradConfig4(self):
config = {
'shape': [2, 3, 2, 2],
'err_tolerance': 2e-3,
'dtype': np.float16,
}
self._testBatchNormGradGrad(config)
if __name__ == '__main__':
test.main()
| ghchinoy/tensorflow | tensorflow/python/ops/nn_fused_batchnorm_test.py | Python | apache-2.0 | 24,929 |
'''
Copyright (c) OS-Networks, http://os-networks.net
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the HWIOS Project nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.'''
from django.utils.translation import ugettext_lazy as _
ws_table = {
'notify-error': 0,
'notify-warning': 1,
'notify-info': 2
}
_('Register')
_('Logout')
_('Maps')
_('Profiles')
_('Profile Management')
_('Region Management')
_('Avatar Management')
_('Settings') | Knygar/hwios | services/web_ui/models/statics.py | Python | bsd-3-clause | 1,735 |
#! /usr/bin/env python
import sys
import os
import time
import numpy
from scipy.io.wavfile import read,write
import MySQLdb
#fp = "/home/pi/dr.wav"
fp = '/home/pi/Recordings/log.wav'
duration = 10 # seconds
shell_cmd = 'arecord -D plughw:0 --duration=' + str(duration) + ' -f cd -vv ' + fp # plughw:0 set to whatever usb port the mic is plugged into
os.system(shell_cmd) #record for 10s
time.sleep(1) # even though os.system waits for its process to complete, wait 1s for good measure
rate,data = read(fp)
y = data[:,1]
data = data - numpy.average(data)
data = numpy.absolute(data)
avg = numpy.average(data) #avg, max samples in the recording
maxval = numpy.max(data)
print avg
print maxval
db = MySQLdb.connect(host="localhost", user="root", passwd="toor", db="pythia")
cur = db.cursor()
sql = 'INSERT INTO `audio` (`avg`, `max`) VALUES (\'' + str(avg) + '\',\'' + str(maxval) + '\')' #store in db
cur.execute(sql)
db.commit()
db.close()
| austingayler/pythia | scripts/record.py | Python | apache-2.0 | 945 |
from collections import Counter
from typing import List, Iterable
import numpy as np
import tensorflow as tf
from docqa.configurable import Configurable
from docqa.nn.layers import Encoder
from docqa.utils import ResourceLoader
"""
Classes for embedding words/chars
"""
class WordEmbedder(Configurable):
"""
Responsible for mapping words -> ids, ids -> words, and ids -> embeddings
matrices. Needs to be initialized from a corpus by `set_vocab` after construction
"""
def set_vocab(self, corpus, word_vec_loader: ResourceLoader, special_tokens: List[str]):
raise NotImplementedError()
def is_vocab_set(self):
raise NotImplementedError()
def question_word_to_ix(self, word, is_train) -> int:
raise NotImplementedError()
def context_word_to_ix(self, word, is_train) -> int:
raise NotImplementedError()
def query_once(self) -> bool:
"""
Should the embedder be queried once for each unique word in the input, or once for each word.
Intended to support placeholders, although I ended up not experimenting much w/that route
"""
return False
def init(self, word_vec_loader, voc: Iterable[str]):
raise NotImplementedError()
def embed(self, is_train, *word_ix_and_mask):
""" [(word_ix, mask)...] -> [word_embed, ...]"""
raise NotImplemented()
class CharEmbedder(Configurable):
"""
Responsible for mapping char -> ids, ids -> char, and ids -> embeddings
Needs to be initialized from a corpus by `set_vocab` after construction
"""
def set_vocab(self, corpus):
raise NotImplementedError()
def get_word_size_th(self):
raise ValueError()
def char_to_ix(self, char):
raise NotImplementedError()
def init(self, word_vec_loader, voc: Iterable[str]):
raise NotImplementedError()
def embed(self, is_train, *char_ix_and_mask):
""" [(char_ix, mask)...] -> [word_embed, ...]"""
raise NotImplemented()
class LearnedCharEmbedder(CharEmbedder):
def __init__(self, word_size_th: int, char_th: int, char_dim: int,
init_scale: float=0.1, force_cpu: bool=False):
self.force_cpu = force_cpu
self.word_size_th = word_size_th
self.char_th = char_th
self.char_dim = char_dim
self.init_scale = init_scale
self._char_to_ix = None
def get_word_size_th(self):
return self.word_size_th
def set_vocab(self, corpus):
w_counts = corpus.get_word_counts()
counts = Counter()
for w,count in w_counts.items():
for c in w:
counts[c] += count
self._char_to_ix = {c:i+2 for i,c in enumerate(c for c,count in counts.items() if count >= self.char_th)}
print("Learning an embedding for %d characters" % (len(self._char_to_ix)-1))
def init(self, word_vec_loader, voc: Iterable[str]):
pass
def char_to_ix(self, char):
return self._char_to_ix.get(char, 1)
def embed(self, is_train, *char_ix):
if self.force_cpu:
with tf.device('/cpu:0'):
return self._embed(*char_ix)
else:
return self._embed(*char_ix)
def _embed(self, *char_ix):
zero = tf.zeros((1, self.char_dim), dtype=np.float32)
mat = tf.get_variable("char_emb_mat", (len(self._char_to_ix)+1, self.char_dim),
tf.float32, initializer=tf.random_uniform_initializer(-self.init_scale, self.init_scale))
emb_mat = tf.concat([zero, mat], axis=0)
x = char_ix[0]
tf.nn.embedding_lookup(emb_mat, x[0])
return [tf.nn.embedding_lookup(emb_mat, x[0]) for x in char_ix]
def __setstate__(self, state):
if "state" in state:
if "force_cpu" not in state["state"]:
state["state"]["force_cpu"] = False
super().__setstate__(state)
class CharWordEmbedder(Configurable):
"""
Derives word embeddings from character embeddings by combining a character embedder and a reduce layer
"""
def __init__(self, embedder: CharEmbedder, layer: Encoder,
shared_parameters: bool):
self.embeder = embedder
self.layer = layer
self.shared_parameters = shared_parameters
def embed(self, is_train, *char_ix):
embeds = self.embeder.embed(is_train, *char_ix)
if self.shared_parameters:
with tf.variable_scope("embedding"):
output = [self.layer.apply(is_train, embeds[0], char_ix[0][1])]
with tf.variable_scope("embedding", reuse=True):
for i in range(1, len(embeds)):
output.append(self.layer.apply(is_train, embeds[i], char_ix[i][1]))
else:
output = []
for i, emb in enumerate(embeds):
with tf.variable_scope("embedding%d_%s" % (i, emb.name)):
output.append(self.layer.apply(is_train, emb, char_ix[i][1]))
return output
def __setstate__(self, state):
if "state" in state:
state["state"]["version"] = state["version"]
state = state["state"]
if "share" in state:
state["shared_parameters"] = state["share"]
del state["share"]
super().__setstate__(state)
def shrink_embed(mat, word_ixs: List):
"""
Build an embedding matrix that contains only the elements in `word_ixs`,
and map `word_ixs` to tensors the index into they new embedding matrix.
Useful if you want to dropout the embeddings w/o dropping out the entire matrix
"""
all_words, out_id = tf.unique(tf.concat([tf.reshape(x, (-1,)) for x in word_ixs], axis=0))
mat = tf.gather(mat, all_words)
partitions = tf.split(out_id, [tf.reduce_prod(tf.shape(x)) for x in word_ixs])
partitions = [tf.reshape(x, tf.shape(o)) for x,o in zip(partitions, word_ixs)]
return mat, partitions
class FixedWordEmbedder(WordEmbedder):
def __init__(self,
vec_name: str,
word_vec_init_scale: float = 0.05,
learn_unk: bool = True,
keep_probs: float = 1,
keep_word: float= 1,
shrink_embed: bool=False,
cpu=False):
self.keep_word = keep_word
self.keep_probs = keep_probs
self.word_vec_init_scale = word_vec_init_scale
self.learn_unk = learn_unk
self.vec_name = vec_name
self.cpu = cpu
self.shrink_embed = shrink_embed
# Built in `init`
self._word_to_ix = None
self._word_emb_mat = None
self._special_tokens = None
def set_vocab(self, _, loader: ResourceLoader, special_tokens: List[str]):
if special_tokens is not None:
self._special_tokens = sorted(special_tokens)
def is_vocab_set(self):
return True
def question_word_to_ix(self, word, is_train):
ix = self._word_to_ix.get(word, 1)
if ix == 1:
return self._word_to_ix.get(word.lower(), 1)
else:
return ix
def context_word_to_ix(self, word, is_train):
# print(word)
ix = self._word_to_ix.get(word, 1)
if ix == 1:
return self._word_to_ix.get(word.lower(), 1)
else:
return ix
@property
def version(self):
# added `cpu`
return 1
def init(self, loader: ResourceLoader, voc: Iterable[str]):
if self.cpu:
with tf.device("/cpu:0"):
self._init(loader, voc)
else:
self._init(loader, voc)
def _init(self, loader: ResourceLoader, voc: Iterable[str]):
# TODO we should not be building variables here
if voc is not None:
word_to_vec = loader.load_word_vec(self.vec_name, voc)
else:
word_to_vec = loader.load_word_vec(self.vec_name)
voc = set(word_to_vec.keys())
self._word_to_ix = {}
dim = next(iter(word_to_vec.values())).shape[0]
null_embed = tf.zeros((1, dim), dtype=tf.float32)
unk_embed = tf.get_variable(shape=(1, dim), name="unk_embed",
dtype=np.float32, trainable=self.learn_unk,
initializer=tf.random_uniform_initializer(-self.word_vec_init_scale,
self.word_vec_init_scale))
ix = 2
matrix_list = [null_embed, unk_embed]
if self._special_tokens is not None and len(self._special_tokens) > 0:
print("Building embeddings for %d special_tokens" % (len(self._special_tokens)))
tok_embed = tf.get_variable(shape=(len(self._special_tokens), dim), name="token_embed",
dtype=np.float32, trainable=True,
initializer=tf.random_uniform_initializer(-self.word_vec_init_scale,
self.word_vec_init_scale))
matrix_list.append(tok_embed)
for token in self._special_tokens:
self._word_to_ix[token] = ix
ix += 1
mat = []
for word in voc:
if word in self._word_to_ix:
continue # in case we already added due after seeing a capitalized version of `word`
if word in word_to_vec:
mat.append(word_to_vec[word])
self._word_to_ix[word] = ix
ix += 1
else:
lower = word.lower() # Full back to the lower-case version
if lower in word_to_vec and lower not in self._word_to_ix:
mat.append(word_to_vec[lower])
self._word_to_ix[lower] = ix
ix += 1
print("Had pre-trained word embeddings for %d of %d words" % (len(mat), len(voc)))
matrix_list.append(tf.constant(value=np.vstack(mat)))
self._word_emb_mat = tf.concat(matrix_list, axis=0)
def embed(self, is_train, *word_ix):
if any(len(x) != 2 for x in word_ix):
raise ValueError()
mat = self._word_emb_mat
if self.keep_probs < 1:
mat = tf.cond(is_train,
lambda: tf.nn.dropout(mat, self.keep_probs),
lambda: mat)
if self.keep_word < 1:
mat = tf.cond(is_train,
lambda: tf.nn.dropout(mat, self.keep_word, (mat.shape.as_list()[0], 1)),
lambda: mat)
if self.cpu:
with tf.device("/cpu:0"):
return [tf.nn.embedding_lookup(self._word_emb_mat, x[0]) for x in word_ix]
else:
return [tf.nn.embedding_lookup(self._word_emb_mat, x[0]) for x in word_ix]
def __getstate__(self):
state = dict(self.__dict__)
state["_word_emb_mat"] = None # we will rebuild these anyway
state["_word_to_ix"] = None
return dict(version=self.version, state=state)
def __setstate__(self, state):
if "state" in state:
if "cpu" not in state["state"]:
state["state"]["cpu"] = False
if "keep_probs" not in state["state"]:
state["state"]["keep_probs"] = 1.0
if "keep_word" not in state["state"]:
state["state"]["keep_word"] = 1.0
if "_special_tokens" not in state["state"]:
state["state"]["_special_tokens"] = []
super().__setstate__(state)
class FixedWordEmbedderPlaceholders(WordEmbedder):
def __init__(self,
vec_name: str,
word_vec_init_scale: float = 0.05,
keep_probs: float = 1,
keep_word: float= 1,
n_placeholders: int=1000,
placeholder_stddev: float=0.5,
placeholder_flag: bool=False,
cpu=False):
self.placeholder_stddev = placeholder_stddev
self.placeholder_flag = placeholder_flag
self.keep_word = keep_word
self.keep_probs = keep_probs
self.word_vec_init_scale = word_vec_init_scale
self.vec_name = vec_name
self.cpu = cpu
self.n_placeholders = n_placeholders
self._on_placeholder = 0
self._placeholder_start = None
# Built in `init`
self._word_to_ix = None
self._word_emb_mat = None
self._special_tokens = None
def set_vocab(self, _, loader: ResourceLoader, special_tokens: List[str]):
if special_tokens is not None:
self._special_tokens = sorted(special_tokens)
def is_vocab_set(self):
return True
def query_once(self) -> bool:
return True
def question_word_to_ix(self, word, is_train):
ix = self._word_to_ix.get(word)
if ix is None:
ix = self._word_to_ix.get(word.lower())
if ix is None:
self._on_placeholder = (self._on_placeholder + 1) % self.n_placeholders
ix = self._placeholder_start + self._on_placeholder
return ix
def context_word_to_ix(self, word, is_train):
ix = self._word_to_ix.get(word)
if ix is None:
ix = self._word_to_ix.get(word.lower())
if ix is None:
self._on_placeholder = (self._on_placeholder + 1) % self.n_placeholders
ix = self._placeholder_start + self._on_placeholder
return ix
def init(self, loader: ResourceLoader, voc: Iterable[str]):
if self.cpu:
with tf.device("/cpu:0"):
self._init(loader, voc)
else:
self._init(loader, voc)
def _init(self, loader: ResourceLoader, voc: Iterable[str]):
# TODO we should not be building variables here
if voc is not None:
word_to_vec = loader.load_word_vec(self.vec_name, voc)
else:
word_to_vec = loader.load_word_vec(self.vec_name)
voc = set(word_to_vec.keys())
self._word_to_ix = {}
dim = next(iter(word_to_vec.values())).shape[0]
if self.placeholder_flag:
dim += 1
null_embed = tf.zeros((1, dim), dtype=tf.float32)
ix = 1
matrix_list = [null_embed]
if self._special_tokens is not None and len(self._special_tokens) > 0:
print("Building embeddings for %d special_tokens" % (len(self._special_tokens)))
tok_embed = tf.get_variable(shape=(len(self._special_tokens), dim), name="token_embed",
dtype=np.float32, trainable=True,
initializer=tf.random_uniform_initializer(-self.word_vec_init_scale,
self.word_vec_init_scale))
matrix_list.append(tok_embed)
for token in self._special_tokens:
self._word_to_ix[token] = ix
ix += 1
mat = []
for word in voc:
if word in self._word_to_ix:
continue # in case we already added due after seeing a capitalized version of `word`
if word in word_to_vec:
mat.append(word_to_vec[word])
self._word_to_ix[word] = ix
ix += 1
else:
lower = word.lower() # Full back to the lower-case version
if lower in word_to_vec and lower not in self._word_to_ix:
mat.append(word_to_vec[lower])
self._word_to_ix[lower] = ix
ix += 1
print("Had pre-trained word embeddings for %d of %d words" % (len(mat), len(voc)))
mat = np.vstack(mat)
if self.placeholder_flag:
mat = np.concatenate([mat, np.zeros((len(mat), 1), dtype=np.float32)], axis=1)
matrix_list.append(tf.constant(value=mat))
self._placeholder_start = ix
if self.placeholder_flag:
def init(shape, dtype=None, partition_info=None):
out = tf.random_normal((self.n_placeholders, dim - 1), stddev=self.placeholder_stddev)
return tf.concat([out, tf.ones((self.n_placeholders, 1))], axis=1)
init_fn = init
else:
init_fn = tf.random_normal_initializer(stddev=self.placeholder_stddev)
matrix_list.append(tf.get_variable("placeholders", (self.n_placeholders, mat.shape[1]),
tf.float32, trainable=False,
initializer=init_fn))
self._word_emb_mat = tf.concat(matrix_list, axis=0)
def embed(self, is_train, *word_ix):
if any(len(x) != 2 for x in word_ix):
raise ValueError()
if self.cpu:
with tf.device("/cpu:0"):
return [tf.nn.embedding_lookup(self._word_emb_mat, x[0]) for x in word_ix]
else:
return [tf.nn.embedding_lookup(self._word_emb_mat, x[0]) for x in word_ix]
| allenai/document-qa | docqa/nn/embedder.py | Python | apache-2.0 | 17,154 |
from pyspark.sql import Row
#import boto_emr.parse_marc as parse_marc
from pyspark import SparkContext
import datetime
def process_by_fields(l):
host = None
date = None
text = None
ip_address = None
warc_type = None
for line in l[1]:
fields = line.split(':', 1)
if fields and len(fields) == 2:
if fields[0] == 'hostname':
host = fields[1].strip()
elif fields[0] == 'WARC-Date':
date = fields[1].strip()
elif fields[0] == 'WARC-IP-Address':
ip_address = fields[1].strip()
elif fields[0] == 'WARC-Type':
warc_type = fields[1].strip()
else:
text = line
return Row(host =host, date = date, text = text,
ip_address = ip_address, warc_type = warc_type)
def process_file(my_iter):
the_id = "init"
final = []
for chunk in my_iter:
lines = chunk[1].split("\n")
for line in lines:
if line[0:15] == 'WARC-Record-ID:':
the_id = line[15:]
final.append(Row(the_id = the_id, line = line))
return iter(final)
def get_hdfs_path():
return "/mnt/temp"
sc = SparkContext(appName = "test crawl {0}".format(datetime.datetime.now()))
rdd = sc.wholeTextFiles(get_hdfs_path())\
.mapPartitions(process_file)\
.map(lambda x: (x.the_id, x.line))\
.groupByKey()\
.map(process_by_fields)
print(rdd.take(10))
| paulhtremblay/boto_emr | examples/process_crawl_text1.py | Python | mit | 1,482 |
#!/usr/bin/env python3
"""
Convert result text files into a csv.
"""
from typing import List
from itertools import chain
import csv
import re
IN_FILE = 'results.txt'
OUT_FILE = 'results.csv'
def parse_learner_run(class_type: str,
class_param: str,
params: List[bool],
lines: List[str]):
"""Parse results for a single run of a supervised learner."""
ones = []
zeroes = []
accs = []
for line in lines:
line = line.strip()
if line.startswith('val-accuracy'):
val = float(line.split()[1])
if line.startswith('test-accuracy'):
test = float(line.split()[1])
if line.startswith('confusion'):
line = line.replace('[', '').replace(']', '')
top_half = line.split()[2:]
TP = int(top_half[0])
FP = int(top_half[1])
if line.startswith('['):
line = line.replace('[', '').replace(']', '')
bottom_half = line.split()
FN = int(bottom_half[0])
TN = int(bottom_half[1])
if line.startswith('0'):
prob, one, zero, acc = line.split()
ones.append(int(one))
zeroes.append(int(zero))
accs.append(float(acc))
conf_mat = [TP, FP, FN, TN]
row = chain([class_type, class_param], params, [val, test], conf_mat, ones,
zeroes, accs)
writer.writerow(row)
def chunk_result_file(in_file: str):
"""Chunk result text to discrete rows for parse_learner_run."""
with open(in_file) as f:
lines = f.readlines()
learner_lines = []
for line in lines:
if line.startswith('False') or line.startswith('True'):
params = line.replace(',', '').split()
continue
if line.startswith('\t'):
learner_lines.append(line.strip())
else:
if learner_lines:
parse_learner_run(class_type, class_param, params,
learner_lines)
learner_lines = []
if line[0].isupper():
class_type = line.split()[0]
class_param = re.search(r'\{(.*)\}', line).group(1)
with open(OUT_FILE, 'w') as f:
writer = csv.writer(f)
chunk_result_file(IN_FILE)
| bmassman/fake_news | fake_news/results/text_to_csv.py | Python | mit | 2,317 |
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# https://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from decimal import Decimal
import boto3.session
from boto3.compat import collections_abc
from boto3.dynamodb.conditions import Attr, Key
from boto3.dynamodb.types import Binary
from tests import unique_id, unittest
class BaseDynamoDBTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.session = boto3.session.Session(region_name='us-west-2')
cls.dynamodb = cls.session.resource('dynamodb')
cls.table_name = unique_id('boto3db')
cls.item_data = {
'MyHashKey': 'mykey',
'MyNull': None,
'MyBool': True,
'MyString': 'mystring',
'MyNumber': Decimal('1.25'),
'MyBinary': Binary(b'\x01'),
'MyStringSet': {'foo'},
'MyNumberSet': {Decimal('1.25')},
'MyBinarySet': {Binary(b'\x01')},
'MyList': ['foo'],
'MyMap': {'foo': 'bar'},
}
cls.table = cls.dynamodb.create_table(
TableName=cls.table_name,
ProvisionedThroughput={
"ReadCapacityUnits": 5,
"WriteCapacityUnits": 5,
},
KeySchema=[{"AttributeName": "MyHashKey", "KeyType": "HASH"}],
AttributeDefinitions=[
{"AttributeName": "MyHashKey", "AttributeType": "S"}
],
)
waiter = cls.dynamodb.meta.client.get_waiter('table_exists')
waiter.wait(TableName=cls.table_name)
@classmethod
def tearDownClass(cls):
cls.table.delete()
class TestDynamoDBTypes(BaseDynamoDBTest):
def test_put_get_item(self):
self.table.put_item(Item=self.item_data)
self.addCleanup(self.table.delete_item, Key={'MyHashKey': 'mykey'})
response = self.table.get_item(
Key={'MyHashKey': 'mykey'}, ConsistentRead=True
)
self.assertEqual(response['Item'], self.item_data)
class TestDynamoDBConditions(BaseDynamoDBTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.table.put_item(Item=cls.item_data)
@classmethod
def tearDownClass(cls):
cls.table.delete_item(Key={'MyHashKey': 'mykey'})
super().tearDownClass()
def scan(self, filter_expression):
return self.table.scan(
FilterExpression=filter_expression, ConsistentRead=True
)
def query(self, key_condition_expression, filter_expression=None):
kwargs = {
'KeyConditionExpression': key_condition_expression,
'ConsistentRead': True,
}
if filter_expression is not None:
kwargs['FilterExpression'] = filter_expression
return self.table.query(**kwargs)
def test_filter_expression(self):
r = self.scan(filter_expression=Attr('MyHashKey').eq('mykey'))
self.assertEqual(r['Items'][0]['MyHashKey'], 'mykey')
def test_key_condition_expression(self):
r = self.query(key_condition_expression=Key('MyHashKey').eq('mykey'))
self.assertEqual(r['Items'][0]['MyHashKey'], 'mykey')
def test_key_condition_with_filter_condition_expression(self):
r = self.query(
key_condition_expression=Key('MyHashKey').eq('mykey'),
filter_expression=Attr('MyString').eq('mystring'),
)
self.assertEqual(r['Items'][0]['MyString'], 'mystring')
def test_condition_less_than(self):
r = self.scan(filter_expression=Attr('MyNumber').lt(Decimal('1.26')))
self.assertTrue(r['Items'][0]['MyNumber'] < Decimal('1.26'))
def test_condition_less_than_equal(self):
r = self.scan(filter_expression=Attr('MyNumber').lte(Decimal('1.26')))
self.assertTrue(r['Items'][0]['MyNumber'] <= Decimal('1.26'))
def test_condition_greater_than(self):
r = self.scan(filter_expression=Attr('MyNumber').gt(Decimal('1.24')))
self.assertTrue(r['Items'][0]['MyNumber'] > Decimal('1.24'))
def test_condition_greater_than_equal(self):
r = self.scan(filter_expression=Attr('MyNumber').gte(Decimal('1.24')))
self.assertTrue(r['Items'][0]['MyNumber'] >= Decimal('1.24'))
def test_condition_begins_with(self):
r = self.scan(filter_expression=Attr('MyString').begins_with('my'))
self.assertTrue(r['Items'][0]['MyString'].startswith('my'))
def test_condition_between(self):
r = self.scan(
filter_expression=Attr('MyNumber').between(
Decimal('1.24'), Decimal('1.26')
)
)
self.assertTrue(r['Items'][0]['MyNumber'] > Decimal('1.24'))
self.assertTrue(r['Items'][0]['MyNumber'] < Decimal('1.26'))
def test_condition_not_equal(self):
r = self.scan(filter_expression=Attr('MyHashKey').ne('notmykey'))
self.assertNotEqual(r['Items'][0]['MyHashKey'], 'notmykey')
def test_condition_in(self):
r = self.scan(
filter_expression=Attr('MyHashKey').is_in(['notmykey', 'mykey'])
)
self.assertIn(r['Items'][0]['MyHashKey'], ['notmykey', 'mykey'])
def test_condition_exists(self):
r = self.scan(filter_expression=Attr('MyString').exists())
self.assertIn('MyString', r['Items'][0])
def test_condition_not_exists(self):
r = self.scan(filter_expression=Attr('MyFakeKey').not_exists())
self.assertNotIn('MyFakeKey', r['Items'][0])
def test_condition_contains(self):
r = self.scan(filter_expression=Attr('MyString').contains('my'))
self.assertIn('my', r['Items'][0]['MyString'])
def test_condition_size(self):
r = self.scan(
filter_expression=Attr('MyString').size().eq(len('mystring'))
)
self.assertEqual(len(r['Items'][0]['MyString']), len('mystring'))
def test_condition_attribute_type(self):
r = self.scan(filter_expression=Attr('MyMap').attribute_type('M'))
self.assertIsInstance(r['Items'][0]['MyMap'], collections_abc.Mapping)
def test_condition_and(self):
r = self.scan(
filter_expression=(
Attr('MyHashKey').eq('mykey') & Attr('MyString').eq('mystring')
)
)
item = r['Items'][0]
self.assertTrue(
item['MyHashKey'] == 'mykey' and item['MyString'] == 'mystring'
)
def test_condition_or(self):
r = self.scan(
filter_expression=(
Attr('MyHashKey').eq('mykey2')
| Attr('MyString').eq('mystring')
)
)
item = r['Items'][0]
self.assertTrue(
item['MyHashKey'] == 'mykey2' or item['MyString'] == 'mystring'
)
def test_condition_not(self):
r = self.scan(filter_expression=(~Attr('MyHashKey').eq('mykey2')))
item = r['Items'][0]
self.assertTrue(item['MyHashKey'] != 'mykey2')
def test_condition_in_map(self):
r = self.scan(filter_expression=Attr('MyMap.foo').eq('bar'))
self.assertEqual(r['Items'][0]['MyMap']['foo'], 'bar')
def test_condition_in_list(self):
r = self.scan(filter_expression=Attr('MyList[0]').eq('foo'))
self.assertEqual(r['Items'][0]['MyList'][0], 'foo')
class TestDynamodbBatchWrite(BaseDynamoDBTest):
def test_batch_write_items(self):
num_elements = 1000
items = []
for i in range(num_elements):
items.append({'MyHashKey': 'foo%s' % i, 'OtherKey': 'bar%s' % i})
with self.table.batch_writer() as batch:
for item in items:
batch.put_item(Item=item)
# Verify all the items were added to dynamodb.
for obj in self.table.scan(ConsistentRead=True)['Items']:
self.assertIn(obj, items)
| boto/boto3 | tests/integration/test_dynamodb.py | Python | apache-2.0 | 8,255 |
# coding: utf-8
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views import generic
from djutils.views.generic import TitleMixin, SortMixin
from ..generic import FuncAccessMixin
from .. import models
from .. import consts
class List(SortMixin, TitleMixin, FuncAccessMixin, LoginRequiredMixin, generic.ListView):
func_code = consts.SYS_READ_FUNC
title = 'Функции'
model = models.Func
sort_params = ('code', 'name', 'level', 'is_modify')
class Detail(TitleMixin, FuncAccessMixin, LoginRequiredMixin, generic.DetailView):
func_code = consts.SYS_READ_FUNC
model = models.Func
def get_title(self):
return 'Функция "%s"' % self.get_object().name
| telminov/sw-django-division-perm | division_perm/views/func.py | Python | mit | 719 |
#!/usr/bin/env python
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script deletes and recreates the prow configmaps
# USE AT YOUR OWN RISK! This is a break-glass tool.
# See September 25th, 2018 in docs/post-mortems.md
#
# USAGE: have KUBECONFIG pointed at your prow cluster then from test-infra root:
#
# hack/recreate_prow_configmaps.py [--wet]
#
from __future__ import print_function
from argparse import ArgumentParser
import os
import sys
import subprocess
def recreate_prow_config(wet, configmap_name, path):
print('recreating prow config:')
real_cmd = ['/bin/sh', '-c', 'gzip -k '+path]
print(real_cmd)
if wet:
subprocess.check_call(real_cmd)
cmd = (
'kubectl create configmap %s'
' --from-file=config.yaml=%s'
' --dry-run -o yaml | kubectl replace configmap config -f -'
) % (configmap_name, path)
real_cmd = ['/bin/sh', '-c', cmd]
print(real_cmd)
if wet:
subprocess.check_call(real_cmd)
real_cmd = ['/bin/sh', '-c', 'rm '+path+'.gz']
print(real_cmd)
if wet:
subprocess.check_call(real_cmd)
def recreate_plugins_config(wet, configmap_name, path):
print('recreating plugins config:')
cmd = (
'kubectl create configmap %s'
' --from-file=plugins.yaml=%s'
' --dry-run -o yaml | kubectl replace configmap config -f -'
) % (configmap_name, path)
real_cmd = ['/bin/sh', '-c', cmd]
print(real_cmd)
if wet:
subprocess.check_call(real_cmd)
def recreate_job_config(wet, job_configmap, job_config_dir):
print('recreating jobs config:')
# delete configmap (apply has size limit)
cmd = ["kubectl", "delete", "configmap", job_configmap]
print(cmd)
if wet:
subprocess.check_call(cmd)
# regenerate
cmd = ["kubectl", "create", "configmap", job_configmap]
for root, _, files in os.walk(job_config_dir):
for name in files:
if name.endswith(".yaml"):
cmd.append("--from-file=%s=%s" % (name, os.path.join(root, name)))
print(cmd)
if wet:
subprocess.check_call(cmd)
def main():
parser = ArgumentParser()
# jobs config
parser.add_argument("--job-configmap", default="job-config", help="name of prow jobs configmap")
parser.add_argument(
"--job-config-dir", default="config/jobs",
help="root dir of prow jobs configmap")
# prow config
parser.add_argument("--prow-configmap", default="config",
help="name of prow primary configmap")
parser.add_argument(
"--prow-config-path", default="prow/config.yaml",
help="path to the primary prow config")
# plugins config
parser.add_argument("--plugins-configmap", default="plugins",
help="name of prow plugins configmap")
parser.add_argument(
"--plugins-config-path", default="prow/plugins.yaml",
help="path to the prow plugins config")
# wet or dry?
parser.add_argument("--wet", action="store_true")
args = parser.parse_args()
# debug the current context
out = subprocess.check_output(['kubectl', 'config', 'current-context'])
print('Current KUBECONFIG context: '+out)
# require additional confirmation in --wet mode
prompt = '!'*65 + (
"\n!! WARNING THIS WILL RECREATE **ALL** PROW CONFIGMAPS. !!"
"\n!! ARE YOU SURE YOU WANT TO DO THIS? IF SO, ENTER 'YES'. !! "
) + '\n' + '!'*65 + '\n\n: '
if args.wet:
if raw_input(prompt) != "YES":
print("you did not enter 'YES'")
sys.exit(-1)
# first prow config
recreate_prow_config(args.wet, args.prow_configmap, args.prow_config_path)
print('')
# then plugins config
recreate_plugins_config(args.wet, args.plugins_configmap, args.plugins_config_path)
print('')
# finally jobs config
recreate_job_config(args.wet, args.job_configmap, args.job_config_dir)
if __name__ == '__main__':
main()
| lavalamp/test-infra | experiment/maintenance/recreate_configmaps.py | Python | apache-2.0 | 4,541 |
from fnmatch import fnmatch
from time import sleep
import subprocess
import random as rand
from utils import *
import utils
name = "admin"
cmds = ["join", "part", "nick", "quit", "raw", ">>", ">", "op", "deop",
"voice", "devoice", "ban", "kban", "unban", "sop", "sdeop",
"svoice", "sdevoice", "squiet", "sunquiet", "kick", "quiet",
"unquiet", "mode"]
def main(irc):
if not name in irc.plugins:
irc.plugins[name] = {}
if not name in irc.state["plugins"]:
irc.state["plugins"][name] = {}
@add_cmd
def join(irc, event, args):
"""<channel> [<key>,<channel>...]
Makes the bot join <channel> using <key> if given.
If no key is given but the bot already has a record
of the channel's key, it will attempt to use that.
"""
args = " ".join(args)
for channel in args.split(","):
channel = channel.split()
if is_allowed(irc, event.source, channel[0]):
if irc.is_channel(channel[0]):
if len(channel) > 1:
irc.join(channel[0], channel[1])
else:
if channel[0] in irc.channels.keys() and "key" in irc.channels[channel[0]].keys() and irc.channels[channel[0]]["key"]:
key = irc.channels[channel[0]]["key"]
irc.join(channel[0], key)
else:
irc.join(channel[0])
else:
irc.reply(event, "ERROR: Invalid channel: {}".format(channel[0]))
@add_cmd
def part(irc, event, args):
"""[<channel>] [<message>]
Parts <channel> with <message> if given. <channel>
is only necessary if the command isn't given in the
channel itself.
"""
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
reason = " ".join(args[1:])
else:
reason = event.source.nick
elif not is_private(event):
channel = event.target
reason = " ".join(args)
else:
irc.reply(event, "ERROR: No channel specified.")
return
elif not is_private(event):
channel = event.target
reason = event.source.nick
else:
irc.reply(event, "ERROR: No channel specified.")
return
if is_owner(irc, event.source, channel):
irc.part(channel, reason)
@add_cmd
def nick(irc, event, args):
"""<nick>
Changes the bot's nick to <nick>.
"""
if is_allowed(irc, event.source): # Checks if the user is on the global allowed list
irc.chgnick(args[0]) # Calls the nickname change if the above function returns True
def botquit(irc, event, args):
"""[<message>]
Makes the bot quit with <message> if given.
"""
if is_owner(irc, event.source):
if len(args) > 0:
irc.quit(" ".join(args))
else:
irc.quit(event.source.nick)
add_cmd(botquit, "quit")
@add_cmd
def raw(irc, event, args):
"""<command>
Sends <command> to the IRC server.
"""
if is_owner(irc, event.source):
irc.send(" ".join(args))
def _exec(irc, event, args):
"""<code>
Executes <code> in a Python interpreter.
"""
if is_owner(irc, event.source):
output = utils.console({"irc": irc, "utils": utils, "event": event}).run(" ".join(args))
if output is not None:
irc.reply(event, output)
add_cmd(_exec, ">>")
def _shell(irc, event, args):
"""<command>
Executes <command> on the shell.
"""
if is_owner(irc, event.source):
args = " ".join(args)
try:
s = subprocess.check_output(args+" | ./ircize --remove", stderr=subprocess.STDOUT, shell=True)
if s:
s = s.decode()
for line in str(s).splitlines():
irc.reply(event, line)
except subprocess.CalledProcessError as e:
irc.reply(event, e)
add_cmd(_shell, ">")
@add_cmd
def sop(irc, event, args):
"""[<channel>] [<nick>...]
Ops <nick> (or the bot if no <nick> is given) in <channel> using services.
<channel> is only necessary if the command isn't sent in the channel itself.
"""
try:
if utils.is_private(event):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
channel = event.target
nicks = args
else:
channel = event.target
nicks = [irc.get_nick()]
except IndexError:
irc.reply(event, utils.gethelp("sop"))
else:
if utils.is_allowed(irc, event.source, channel):
try:
if irc.channels[channel].get("chanserv", irc.chanserv):
for nick in nicks:
if irc.is_opped(nick, channel):
nicks.remove(nick)
if len(nicks) > 0:
irc.privmsg("ChanServ", "OP {} {}".format(channel, " ".join(nicks)))
except KeyError:
pass
@add_cmd
def sdeop(irc, event, args):
"""[<channel>] [<nick>...]
Deops <nick> (or the bot if no <nick> is given) in <channel> using services.
<channel> is only necessary if the command isn't sent in the channel itself.
"""
try:
if utils.is_private(event):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
channel = event.target
nicks = args
else:
channel = event.target
nicks = [irc.get_nick()]
except IndexError:
irc.reply(event, utils.gethelp("sdeop"))
else:
if utils.is_allowed(irc, event.source, channel):
try:
if irc.channels[channel].get("chanserv", irc.chanserv):
for nick in nicks:
if not irc.is_opped(nick, channel):
nicks.remove(nick)
if len(nicks) > 0:
irc.privmsg("ChanServ", "DEOP {} {}".format(channel, " ".join(nicks)))
except KeyError:
pass
@add_cmd
def svoice(irc, event, args):
"""[<channel>] [<nick>...]
Voices <nick> (or the bot if no <nick> is given) in <channel> using services.
<channel> is only necessary if the command isn't sent in the channel itself.
"""
try:
if utils.is_private(event):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
channel = event.target
nicks = args
else:
channel = event.target
nicks = [irc.get_nick()]
except IndexError:
irc.reply(event, utils.gethelp("svoice"))
else:
if utils.is_allowed(irc, event.source, channel):
try:
if irc.channels[channel].get("chanserv", irc.chanserv):
for nick in nicks:
if irc.is_voiced(nick, channel):
nicks.remove(nick)
if len(nicks) > 0:
irc.privmsg("ChanServ", "VOICE {} {}".format(channel, " ".join(nicks)))
except KeyError:
pass
@add_cmd
def sdevoice(irc, event, args):
"""[<channel>] [<nick>...]
Devoices <nick> (or the bot if no <nick> is given) in <channel> using services.
<channel> is only necessary if the command isn't sent in the channel itself.
"""
try:
if utils.is_private(event):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
channel = event.target
nicks = args
else:
channel = event.target
nicks = [irc.get_nick()]
except IndexError:
irc.reply(event, utils.gethelp("sdevoice"))
else:
if utils.is_allowed(irc, event.source, channel):
try:
if irc.channels[channel].get("chanserv", irc.chanserv):
for nick in nicks:
if not irc.is_voiced(nick, channel):
nicks.remove(nick)
if len(nicks) > 0:
irc.privmsg("ChanServ", "DEVOICE {} {}".format(channel, " ".join(nicks)))
except KeyError:
pass
@add_cmd
def squiet(irc, event, args):
"""[<channel>] <nick|hostmask> [<nick|hostmask>...]
Quiets <nick> in <channel> using services. <channel> is only necessary
if the command isn't sent in the channel itself.
"""
try:
if utils.is_private(event):
channel = args[0]
nicks = args[1:]
else:
if irc.is_channel(args[0]):
channel = args[0]
nicks = args[1:]
else:
channel = event.target
nicks = args
except IndexError:
irc.reply(event, utils.gethelp("squiet"))
else:
if utils.is_allowed(irc, event.source, channel):
try:
if irc.channels[channel].get("chanserv", irc.chanserv):
irc.privmsg("ChanServ", "QUIET {} {}".format(channel, " ".join(nicks)))
except KeyError:
pass
@add_cmd
def sunquiet(irc, event, args):
"""[<channel>] [<nick|hostmask>...]
Unquiets <nick> (or yourself if no <nick> is given) in <channel>
using services. <channel> is only necessary if the command isn't
sent in the channel itself.
"""
try:
if utils.is_private(event):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
channel = event.target
nicks = args
else:
channel = event.target
nicks = [event.source.nick]
except IndexError:
irc.reply(event, utils.gethelp("sunquiet"))
else:
if utils.is_allowed(irc, event.source, channel):
try:
if irc.channels[channel].get("chanserv", irc.chanserv):
irc.privmsg("ChanServ", "UNQUIET {} {}".format(channel, " ".join(nicks)))
except KeyError:
pass
@add_cmd
def op(irc, event, args):
"""[<channel>] [<nick>...]
Ops <nick> (or yourself if no <nick> is specified) in <channel>.
<channel> is only necessary if the command isn't sent in the
channel itself.
"""
setmodes = []
try:
if len(args) == 0:
nicks = [event.source.nick]
channel = event.target
elif irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
nicks = args
channel = event.target
except IndexError:
irc.reply(event, utils.gethelp("op"))
else:
if utils.is_allowed(irc, event.source, channel):
already_op = irc.is_opped(irc.get_nick(), channel)
if "*" in nicks:
nicks = irc.state["channels"][channel]["names"]
for nick in nicks:
if not irc.is_opped(nick, channel):
setmodes.append("+o {}".format(nick))
if len(setmodes) == 0:
return
if not already_op and irc.get_nick() not in nicks:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def deop(irc, event, args):
"""[<channel>] [<nick>...]
Deops <nick> (or yourself if no <nick> is specified) in <channel>.
<channel> is only necessary if the command isn't set in the channel
itself.
"""
setmodes = []
try:
if len(args) == 0:
nicks = [event.source.nick]
channel = event.target
elif irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
nicks = args
channel = event.target
except IndexError:
irc.reply(event, utils.gethelp("deop"))
else:
if utils.is_allowed(irc, event.source, channel):
already_op = irc.is_opped(irc.get_nick(), channel)
if "*" in nicks:
nicks = irc.state["channels"][channel]["names"]
if irc.get_nick() in nicks:
nicks.remove(irc.get_nick())
if irc.channels[channel].get("chanserv", irc.chanserv) and "ChanServ" in nicks:
nicks.remove("ChanServ")
for nick in nicks:
if irc.is_opped(nick, channel):
setmodes.append("-o {}".format(nick))
if len(setmodes) == 0:
return
if not already_op:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def voice(irc, event, args):
"""[<channel>] [<nick>...]
Voices <nick> (or yourself if no <nick> is specified) in <channel>.
<channel> is only necessary if the command isn't sent in the channel
itself.
"""
setmodes = []
try:
if len(args) == 0:
nicks = [event.source.nick]
channel = event.target
elif irc.is_channel(args):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
nicks = args
channel = event.target
except IndexError:
irc.reply(event, utils.gethelp("devoice"))
else:
if utils.is_allowed(irc, event.source, channel):
already_op = irc.is_opped(irc.get_nick(), channel)
if "*" in nicks:
nicks = irc.state["channels"][channel]["names"]
for nick in nicks:
if not irc.is_voiced(nick, channel):
setmodes.append("+v {}".format(nick))
if len(setmodes) == 0:
return
if not already_op:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def devoice(irc, event, args):
"""[<channel>] [<nick>...]
Devoices <nick> (or yourself if no <nick> is specified) in <channel>.
<channel> is only necessary if the command isn't sent in the channel
itself.
"""
setmodes = []
try:
if len(args) == 0:
nicks = [event.source.nick]
channel = event.target
elif irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
nicks = args
channel = event.target
except IndexError:
irc.reply(event, utils.gethelp("devoice"))
else:
if utils.is_allowed(irc, event.source, channel):
already_op = irc.is_opped(irc.get_nick(), channel)
if "*" in nicks:
nicks = irc.state["channels"][channel]["names"]
for nick in nicks:
if irc.is_voiced(nick, channel):
setmodes.append("-v {}".format(nick))
if len(setmodes) == 0:
return
if not already_op:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def ban(irc, event, args):
"""[<channel>] <nick|hostmask> [<nick|hostmask>...]
Bans <nick> in <channel>. <channel> is only necessary if the command
isn't sent in the channel itself.
"""
setmodes = []
affected = []
try:
if utils.is_private(event):
channel = args[0]
nicks = args[1:]
else:
if irc.is_channel(args[0]):
channel = args[0]
nicks = args[1:]
else:
channel = event.target
nicks = args
except IndexError:
irc.reply(event, utils.gethelp("ban"))
else:
if utils.is_allowed(irc, event.source, channel):
for nick in nicks:
if utils.is_hostmask(nick):
bmask = nick
else:
bmask = utils.banmask(irc, nick)
setmodes.append("+b {}".format(bmask))
for affect in utils.ban_affects(irc, channel, bmask):
if affect not in affected and affect != irc.get_nick():
affected.append(affect)
for nick in affected:
if irc.is_opped(nick, channel):
setmodes.append("-o {}".format(nick))
if irc.is_voiced(nick, channel):
setmodes.append("-v {}".format(nick))
if len(setmodes) == 0:
return
already_op = irc.is_opped(irc.get_nick(), channel)
if not already_op:
setmodes.append("-o {}".format(irc.get_nick())) # remove op from self after ban
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def kban(irc, event, args):
"""[<channel>] <nick|hostmask> [<nick|hostmask>...] [:][<reason>]
Bans <nick> in <channel> and kicks anyone affected using <reason>
as the kick message if specified. <channel> is only necessary if
the command isn't sent in the channel itself. It is recommended to
use ':' as a seperator between <nick> and <reason>, otherwise, if
there's a nick in the channel matching the first word in reason it
will be kicked.
"""
prepare_nicks = []
setmodes = []
affected = []
reason = None
try:
if utils.is_private(event):
channel = args[0]
nicks = args[1:]
else:
if irc.is_channel(args[0]):
channel = args[0]
nicks = args[1:]
else:
channel = event.target
nicks = args
except IndexError:
irc.reply(event, utils.gethelp("kban"))
else:
if utils.is_allowed(irc, event.source, channel):
for nick in nicks:
if nick in irc.state["channels"][channel]["names"] and nick not in prepare_nicks and not nick.startswith(":"):
prepare_nicks.append(nick)
elif utils.is_hostmask(nick):
prepare_nicks.append(nick)
else:
reason = " ".join(nicks[len(prepare_nicks):]).lstrip(": ")
break
nicks = prepare_nicks
for nick in nicks:
if utils.is_hostmask(nick):
bmask = nick
else:
bmask = utils.banmask(irc, nick)
setmodes.append("+b {}".format(bmask))
for affect in utils.ban_affects(irc, channel, bmask):
if affect not in affected and affect != irc.get_nick():
if irc.is_opped(affect, channel):
setmodes.append("-o {}".format(affect))
if irc.is_voiced(affect, channel):
setmodes.append("-v {}".format(affect))
affected.append(affect)
if len(setmodes) == 0:
return
already_op = irc.is_opped(irc.get_nick(), channel)
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
for nick in affected:
if reason:
irc.kick(channel, nick, reason)
else:
irc.kick(channel, nick)
if not already_op:
irc.mode(channel, "-o {}".format(irc.get_nick()))
@add_cmd
def kick(irc, event, args):
"""[<channel>] <nick> [<nick>...] [:][<reason>]
Kicks <nick> in <channel>. <channel> is only necessary if the
command isn't sent in the channel itself. It is recommended to
use ':' as a seperator between <nick> and <reason>, otherwise, if
there's a nick in the channel matching the first word in reason it
will be kicked.
"""
prepare_nicks = []
reason = None
try:
if utils.is_private(event):
channel = args[0]
nicks = args[1:]
else:
if irc.is_channel(args[0]):
channel = args[0]
nicks = args[1:]
else:
channel = event.target
nicks = args
except IndexError:
irc.reply(event, utils.gethelp("kick"))
else:
if utils.is_allowed(irc, event.source, channel):
for nick in nicks:
if nick in irc.state["channels"][channel]["names"] and nick not in prepare_nicks and not nick.startswith(":"):
prepare_nicks.append(nick)
else:
reason = " ".join(nicks[len(prepare_nicks):]).lstrip(": ")
break
nicks = prepare_nicks
already_op = irc.is_opped(irc.get_nick(), channel)
gotop = utils.getop(irc, channel)
if gotop:
for nick in nicks:
if reason:
irc.kick(channel, nick, reason)
else:
irc.kick(channel, nick)
if not already_op:
irc.mode(channel, "-o {}".format(irc.get_nick()))
@add_cmd
def unban(irc, event, args):
"""[<channel>] [<nick|hostmask>...]
Unbans <nick> (or yourself if no <nick> is specified) in <channel>.
<channel> is only necessary if the command isn't sent in the channel
itself.
"""
setmodes = []
try:
if utils.is_private(event):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
channel = event.target
nicks = args
else:
channel = event.target
nicks = [event.source.nick]
except IndexError:
irc.reply(event, utils.gethelp("unban"))
else:
if utils.is_allowed(irc, event.source, channel):
for nick in nicks:
if utils.is_hostmask(nick):
hmask = nick
else:
hmask = utils.gethm(irc, nick)
if hmask and channel in irc.state["channels"]:
for bmask in irc.state["channels"][channel]["bans"]:
if fnmatch(utils.irclower(hmask), utils.irclower(bmask)):
setmodes.append("-b {}".format(bmask))
else:
return
if len(setmodes) == 0:
return
already_op = irc.is_opped(irc.get_nick(), channel)
if not already_op:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def quiet(irc, event, args):
"""[<channel>] <nick|hostmask> [<nick|hostmask>...]
Quiets <nick> in <channel>. <channel> is only necessary if the command
isn't sent in the channel itself.
"""
setmodes = []
affected = []
try:
if utils.is_private(event):
channel = args[0]
nicks = args[1:]
else:
if irc.is_channel(args[0]):
channel = args[0]
nicks = args[1:]
else:
channel = event.target
nicks = args
except IndexError:
irc.reply(event, utils.gethelp("quiet"))
else:
if utils.is_allowed(irc, event.source, channel):
for nick in nicks:
if utils.is_hostmask(nick):
bmask = nick
else:
bmask = utils.banmask(irc, nick)
setmodes.append("+q {}".format(bmask))
for affect in utils.ban_affects(irc, channel, bmask):
if affect not in affected and affect != irc.get_nick():
affected.append(affect)
for nick in affected:
if irc.is_opped(nick, channel):
setmodes.append("-o {}".format(nick))
if irc.is_voiced(nick, channel):
setmodes.append("-v {}".format(nick))
if len(setmodes) == 0:
return
already_op = irc.is_opped(irc.get_nick(), channel)
if not already_op:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def unquiet(irc, event, args):
"""[<channel>] [<nick|hostmask>...]
Unquiets <nick> (or yourself if no <nick> is specified) in <channel>.
<channel> is only necessary if the command isn't sent in the channel
itself.
"""
setmodes = []
try:
if utils.is_private(event):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
channel = event.target
nicks = args
else:
channel = event.target
nicks = [event.source.nick]
except IndexError:
irc.reply(event, utils.gethelp("unquiet"))
else:
if utils.is_allowed(irc, event.source, channel):
for nick in nicks:
if utils.is_hostmask(nick):
hmask = nick
else:
hmask = utils.gethm(irc, nick)
if hmask and channel in irc.state["channels"]:
for bmask in irc.state["channels"][channel]["quiets"]:
if fnmatch(utils.irclower(hmask), utils.irclower(bmask)):
setmodes.append("-q {}".format(bmask))
else:
return
if len(setmodes) == 0:
return
already_op = irc.is_opped(irc.get_nick(), channel)
if not already_op:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def mode(irc, event, args):
"""[<channel>] <modes>
Sets <modes> in <channel>. <channel> is only necessary if the command
isn't sent in the channel itself.
"""
try:
if utils.is_private(event) or irc.is_channel(args[0]):
if args[0] in irc.state["channels"]:
channel = args[0]
setmodes = utils.split_modes(args[1:])
elif not utils.is_private(event):
channel = event.target
setmodes = utils.split_modes(args)
else:
irc.reply(event, utils.gethelp("mode"))
return
else:
channel = event.target
setmodes = utils.split_modes(args)
except IndexError:
irc.reply(event, utils.gethelp("mode"))
else:
if utils.is_allowed(irc, event.source, channel):
already_op = irc.is_opped(irc.get_nick(), channel)
if not already_op:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for modes in utils.unsplit_modes(setmodes):
irc.mode(channel, modes)
@add_cmd
def random(irc, event, args): # I'll delete this after
"""takes no arguments
Returns random statement
"""
random_events = ["moo{}".format("o"*rand.randint(0, 100)), "lol"]
irc.reply(event, rand.choice(random_events))
def on_mode(irc, conn, event):
channel = event.target
modes = utils.split_modes(event.arguments)
for mode in modes:
if mode.startswith("+b"):
if event.source.nick == irc.get_nick():
continue
mask = mode.split()[1]
affects = utils.ban_affects(irc, channel, mask)
names = irc.state["channels"][channel]["names"]
if len(affects) >= len(names) / 2:
setmodes = []
bmask = utils.banmask(irc, event.source)
setmodes.append("-b {}".format(mask))
baffects = utils.ban_affects(irc, channel, bmask)
for nick in baffects:
if irc.is_opped(nick, channel):
setmodes.append("-o {}".format(nick))
if irc.is_voiced(nick, channel):
setmodes.append("-v {}".format(nick))
setmodes.append("+b {}".format(bmask))
already_op = irc.is_opped(irc.get_nick(), channel)
gotop = utils.getop(irc, channel)
if gotop:
for modes in utils.unsplit_modes(setmodes):
irc.mode(channel, modes)
for nick in baffects:
irc.kick(channel, nick)
if not already_op:
irc.mode(channel, "-o {}".format(irc.get_nick()))
add_handler(on_mode, name)
| devzero-xyz/Andromeda | plugins/admin.py | Python | mit | 32,853 |
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
verts = [
(0., 0.), # left, bottom
(0., 1.), # left, top
(1., 1.), # right, top
(1., 0.), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
fig = plt.figure()
ax = fig.add_subplot(111)
patch = patches.PathPatch(path, facecolor='orange', lw=2)
ax.add_patch(patch)
ax.set_xlim(-2,2)
ax.set_ylim(-2,2)
plt.show() | leesavide/pythonista-docs | Documentation/matplotlib/users/path_tutorial-1.py | Python | apache-2.0 | 577 |
# -*- coding: iso-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
# Angelos Tzotsos <tzotsos@gmail.com>
#
# Copyright (c) 2010 Tom Kralidis
# Copyright (c) 2014 Angelos Tzotsos
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
from pycsw import gml, util
LOGGER = logging.getLogger(__name__)
MODEL = {
'GeometryOperands': {
'values': gml.TYPES
},
'SpatialOperators': {
'values': ['BBOX', 'Beyond', 'Contains', 'Crosses', 'Disjoint',
'DWithin', 'Equals', 'Intersects', 'Overlaps', 'Touches', 'Within']
},
'ComparisonOperators': {
'ogc:PropertyIsBetween': {'opname': 'Between', 'opvalue': 'and'},
'ogc:PropertyIsEqualTo': {'opname': 'EqualTo', 'opvalue': '='},
'ogc:PropertyIsGreaterThan': {'opname': 'GreaterThan', 'opvalue': '>'},
'ogc:PropertyIsGreaterThanOrEqualTo': {
'opname': 'GreaterThanEqualTo', 'opvalue': '>='},
'ogc:PropertyIsLessThan': {'opname': 'LessThan', 'opvalue': '<'},
'ogc:PropertyIsLessThanOrEqualTo': {
'opname': 'LessThanEqualTo', 'opvalue': '<='},
'ogc:PropertyIsLike': {'opname': 'Like', 'opvalue': 'like'},
'ogc:PropertyIsNotEqualTo': {'opname': 'NotEqualTo', 'opvalue': '!='},
'ogc:PropertyIsNull': {'opname': 'NullCheck', 'opvalue': 'is null'},
},
'Functions': {
'length': {'args': '1'},
'lower': {'args': '1'},
'ltrim': {'args': '1'},
'rtrim': {'args': '1'},
'trim': {'args': '1'},
'upper': {'args': '1'},
},
'Ids': {
'values': ['EID', 'FID']
}
}
def parse(element, queryables, dbtype, nsmap, orm='sqlalchemy', language='english', fts=False):
"""OGC Filter object support"""
boq = None
is_pg = dbtype.startswith('postgresql')
tmp = element.xpath('ogc:And|ogc:Or|ogc:Not', namespaces=nsmap)
if len(tmp) > 0: # this is binary logic query
boq = ' %s ' % util.xmltag_split(tmp[0].tag).lower()
LOGGER.debug('Binary logic detected; operator=%s' % boq)
tmp = tmp[0]
else:
tmp = element
pvalue_serial = [0] # in list as python 2 has no nonlocal variable
def assign_param():
if orm == 'django':
return '%s'
param = ':pvalue%d' % pvalue_serial[0]
pvalue_serial[0] += 1
return param
def _get_comparison_expression(elem):
"""return the SQL expression based on Filter query"""
fname = None
matchcase = elem.attrib.get('matchCase')
wildcard = elem.attrib.get('wildCard')
singlechar = elem.attrib.get('singleChar')
expression = None
if wildcard is None:
wildcard = '%'
if singlechar is None:
singlechar = '_'
if (elem.xpath('child::*')[0].tag ==
util.nspath_eval('ogc:Function', nsmap)):
LOGGER.debug('ogc:Function detected')
if (elem.xpath('child::*')[0].attrib['name'] not in
MODEL['Functions']):
raise RuntimeError('Invalid ogc:Function: %s' %
(elem.xpath('child::*')[0].attrib['name']))
fname = elem.xpath('child::*')[0].attrib['name']
try:
LOGGER.debug('Testing existence of ogc:PropertyName')
pname = queryables[elem.find(util.nspath_eval('ogc:Function/ogc:PropertyName', nsmap)).text]['dbcol']
except Exception as err:
raise RuntimeError('Invalid PropertyName: %s. %s' % (elem.find(util.nspath_eval('ogc:Function/ogc:PropertyName', nsmap)).text, str(err)))
else:
try:
LOGGER.debug('Testing existence of ogc:PropertyName')
pname = queryables[elem.find(
util.nspath_eval('ogc:PropertyName', nsmap)).text]['dbcol']
except Exception as err:
raise RuntimeError('Invalid PropertyName: %s. %s' %
(elem.find(util.nspath_eval('ogc:PropertyName',
nsmap)).text, str(err)))
if (elem.tag != util.nspath_eval('ogc:PropertyIsBetween', nsmap)):
if elem.tag in [util.nspath_eval('ogc:%s' % n, nsmap) for n in
MODEL['SpatialOperators']['values']]:
boolean_true = '\'true\''
boolean_false = '\'false\''
if dbtype == 'mysql':
boolean_true = 'true'
boolean_false = 'false'
return "%s = %s" % (_get_spatial_operator(queryables['pycsw:BoundingBox'], elem, dbtype, nsmap), boolean_true)
else:
pval = elem.find(util.nspath_eval('ogc:Literal', nsmap)).text
com_op = _get_comparison_operator(elem)
LOGGER.debug('Comparison operator: %s' % com_op)
# if this is a case insensitive search
# then set the DB-specific LIKE comparison operator
LOGGER.debug('Setting csw:AnyText property')
anytext = queryables['csw:AnyText']['dbcol']
if ((matchcase is not None and matchcase == 'false') or
pname == anytext):
com_op = 'ilike' if is_pg else 'like'
if (elem.tag == util.nspath_eval('ogc:PropertyIsBetween', nsmap)):
com_op = 'between'
lower_boundary = elem.find(
util.nspath_eval('ogc:LowerBoundary/ogc:Literal',
nsmap)).text
upper_boundary = elem.find(
util.nspath_eval('ogc:UpperBoundary/ogc:Literal',
nsmap)).text
expression = "%s %s %s and %s" % \
(pname, com_op, assign_param(), assign_param())
values.append(lower_boundary)
values.append(upper_boundary)
else:
if pname == anytext and is_pg and fts:
LOGGER.debug('PostgreSQL FTS specific search')
# do nothing, let FTS do conversion (#212)
pvalue = pval
else:
LOGGER.debug('PostgreSQL non-FTS specific search')
pvalue = pval.replace(wildcard, '%').replace(singlechar, '_')
if pname == anytext: # pad anytext with wildcards
LOGGER.debug('PostgreSQL non-FTS specific anytext search')
LOGGER.debug('old value: %s', pval)
pvalue = '%%%s%%' % pvalue.rstrip('%').lstrip('%')
LOGGER.debug('new value: %s', pvalue)
values.append(pvalue)
if boq == ' not ':
if fname is not None:
expression = "%s is null or not %s(%s) %s %s" % \
(pname, fname, pname, com_op, assign_param())
elif pname == anytext and is_pg and fts:
LOGGER.debug('PostgreSQL FTS specific search')
expression = ("%s is null or not plainto_tsquery('%s', %s) @@ anytext_tsvector" %
(anytext, language, assign_param()))
else:
LOGGER.debug('PostgreSQL non-FTS specific search')
expression = "%s is null or not %s %s %s" % \
(pname, pname, com_op, assign_param())
else:
if fname is not None:
expression = "%s(%s) %s %s" % \
(fname, pname, com_op, assign_param())
elif pname == anytext and is_pg and fts:
LOGGER.debug('PostgreSQL FTS specific search')
expression = ("plainto_tsquery('%s', %s) @@ anytext_tsvector" %
(language, assign_param()))
else:
LOGGER.debug('PostgreSQL non-FTS specific search')
expression = "%s %s %s" % (pname, com_op, assign_param())
return expression
queries = []
queries_nested = []
values = []
LOGGER.debug('Scanning children elements')
for child in tmp.xpath('child::*'):
com_op = ''
boolean_true = '\'true\''
boolean_false = '\'false\''
if dbtype == 'mysql':
boolean_true = 'true'
boolean_false = 'false'
if child.tag == util.nspath_eval('ogc:Not', nsmap):
LOGGER.debug('ogc:Not query detected')
child_not = child.xpath('child::*')[0]
if child_not.tag in \
[util.nspath_eval('ogc:%s' % n, nsmap) for n in
MODEL['SpatialOperators']['values']]:
LOGGER.debug('ogc:Not / spatial operator detected: %s' % child.tag)
queries.append("%s = %s" %
(_get_spatial_operator(
queryables['pycsw:BoundingBox'],
child.xpath('child::*')[0], dbtype, nsmap),
boolean_false))
else:
LOGGER.debug('ogc:Not / comparison operator detected: %s' % child.tag)
queries.append('not %s' % _get_comparison_expression(child_not))
elif child.tag in \
[util.nspath_eval('ogc:%s' % n, nsmap) for n in
MODEL['SpatialOperators']['values']]:
LOGGER.debug('spatial operator detected: %s' % child.tag)
if boq is not None and boq == ' not ':
# for ogc:Not spatial queries in PostGIS we must explictly
# test that pycsw:BoundingBox is null as well
# TODO: Do we need the same for 'postgresql+postgis+native'???
if dbtype == 'postgresql+postgis+wkt':
LOGGER.debug('Setting bbox is null test in PostgreSQL')
queries.append("%s = %s or %s is null" %
(_get_spatial_operator(
queryables['pycsw:BoundingBox'],
child, dbtype, nsmap), boolean_false,
queryables['pycsw:BoundingBox']))
else:
queries.append("%s = %s" %
(_get_spatial_operator(
queryables['pycsw:BoundingBox'],
child, dbtype, nsmap), boolean_false))
else:
queries.append("%s = %s" %
(_get_spatial_operator(
queryables['pycsw:BoundingBox'],
child, dbtype, nsmap), boolean_true))
elif child.tag == util.nspath_eval('ogc:FeatureId', nsmap):
LOGGER.debug('ogc:FeatureId filter detected')
queries.append("%s = %s" % (queryables['pycsw:Identifier'], assign_param()))
values.append(child.attrib.get('fid'))
else: # comparison operator
LOGGER.debug('Comparison operator processing')
tagname = ' %s ' % util.xmltag_split(child.tag).lower()
if tagname in [' or ', ' and ']: # this is a nested binary logic query
LOGGER.debug('Nested binary logic detected; operator=%s' % tagname)
for child2 in child.xpath('child::*'):
queries_nested.append(_get_comparison_expression(child2))
queries.append('(%s)' % tagname.join(queries_nested))
else:
queries.append(_get_comparison_expression(child))
where = boq.join(queries) if (boq is not None and boq != ' not ') \
else queries[0]
return where, values
def _get_spatial_operator(geomattr, element, dbtype, nsmap, postgis_geometry_column='wkb_geometry'):
"""return the spatial predicate function"""
property_name = element.find(util.nspath_eval('ogc:PropertyName', nsmap))
distance = element.find(util.nspath_eval('ogc:Distance', nsmap))
distance = 'false' if distance is None else distance.text
LOGGER.debug('Scanning for spatial property name')
if property_name is None:
raise RuntimeError('Missing ogc:PropertyName in spatial filter')
if (property_name.text.find('BoundingBox') == -1 and
property_name.text.find('Envelope') == -1):
raise RuntimeError('Invalid ogc:PropertyName in spatial filter: %s' %
property_name.text)
geometry = gml.Geometry(element, nsmap)
#make decision to apply spatial ranking to results
set_spatial_ranking(geometry)
spatial_predicate = util.xmltag_split(element.tag).lower()
LOGGER.debug('Spatial predicate: %s' % spatial_predicate)
if dbtype == 'mysql': # adjust spatial query for MySQL
LOGGER.debug('Adjusting spatial query for MySQL')
if spatial_predicate == 'bbox':
spatial_predicate = 'intersects'
if spatial_predicate == 'beyond':
spatial_query = "ifnull(distance(geomfromtext(%s), \
geomfromtext('%s')) > convert(%s, signed),false)" % \
(geomattr, geometry.wkt, distance)
elif spatial_predicate == 'dwithin':
spatial_query = "ifnull(distance(geomfromtext(%s), \
geomfromtext('%s')) <= convert(%s, signed),false)" % \
(geomattr, geometry.wkt, distance)
else:
spatial_query = "ifnull(%s(geomfromtext(%s), \
geomfromtext('%s')),false)" % \
(spatial_predicate, geomattr, geometry.wkt)
elif dbtype == 'postgresql+postgis+wkt': # adjust spatial query for PostGIS with WKT geometry column
LOGGER.debug('Adjusting spatial query for PostgreSQL+PostGIS+WKT')
if spatial_predicate == 'bbox':
spatial_predicate = 'intersects'
if spatial_predicate == 'beyond':
spatial_query = "not st_dwithin(st_geomfromtext(%s), \
st_geomfromtext('%s'), %f)" % \
(geomattr, geometry.wkt, float(distance))
elif spatial_predicate == 'dwithin':
spatial_query = "st_dwithin(st_geomfromtext(%s), \
st_geomfromtext('%s'), %f)" % \
(geomattr, geometry.wkt, float(distance))
else:
spatial_query = "st_%s(st_geomfromtext(%s), \
st_geomfromtext('%s'))" % \
(spatial_predicate, geomattr, geometry.wkt)
elif dbtype == 'postgresql+postgis+native': # adjust spatial query for PostGIS with native geometry
LOGGER.debug('Adjusting spatial query for PostgreSQL+PostGIS+native')
if spatial_predicate == 'bbox':
spatial_predicate = 'intersects'
if spatial_predicate == 'beyond':
spatial_query = "not st_dwithin(%s, \
st_geomfromtext('%s',4326), %f)" % \
(postgis_geometry_column, geometry.wkt, float(distance))
elif spatial_predicate == 'dwithin':
spatial_query = "st_dwithin(%s, \
st_geomfromtext('%s',4326), %f)" % \
(postgis_geometry_column, geometry.wkt, float(distance))
else:
spatial_query = "st_%s(%s, \
st_geomfromtext('%s',4326))" % \
(spatial_predicate, postgis_geometry_column, geometry.wkt)
else:
LOGGER.debug('Adjusting spatial query')
spatial_query = "query_spatial(%s,'%s','%s','%s')" % \
(geomattr, geometry.wkt, spatial_predicate, distance)
return spatial_query
def _get_comparison_operator(element):
"""return the SQL operator based on Filter query"""
return MODEL['ComparisonOperators']['ogc:%s' % util.xmltag_split(element.tag)]['opvalue']
def set_spatial_ranking(geometry):
"""Given that we have a spatial query in ogc:Filter we check the type of geometry
and set the ranking variables"""
if util.ranking_enabled:
if geometry.type in ['Polygon', 'Envelope']:
util.ranking_pass = True
util.ranking_query_geometry = geometry.wkt
elif geometry.type in ['LineString', 'Point']:
from shapely.geometry.base import BaseGeometry
from shapely.geometry import box
from shapely.wkt import loads,dumps
ls = loads(geometry.wkt)
b = ls.bounds
if geometry.type == 'LineString':
tmp_box = box(b[0],b[1],b[2],b[3])
tmp_wkt = dumps(tmp_box)
if tmp_box.area > 0:
util.ranking_pass = True
util.ranking_query_geometry = tmp_wkt
elif geometry.type == 'Point':
tmp_box = box((float(b[0])-1.0),(float(b[1])-1.0),(float(b[2])+1.0),(float(b[3])+1.0))
tmp_wkt = dumps(tmp_box)
util.ranking_pass = True
util.ranking_query_geometry = tmp_wkt
| PublicaMundi/pycsw | pycsw/fes.py | Python | mit | 18,085 |
from django.db import models
from types import Secret
class SecretiveModel(models.Model):
class Meta:
# Don't create a separate db table for this superclass model
abstract = True
def __setattr__(self, name, value):
"""Register self with all Secret attributes."""
result = super(SecretiveModel, self).__setattr__(name, value)
try:
current = getattr(self, name)
if type(current) is Secret:
# De-link the obsolete Secret from this model
current.destroy_model_reference((self, name))
except KeyError:
# Django's models actually return a KeyError for a missing attribute,
# not an AttributeError
pass
if type(value) is Secret:
# Allow the Secret to keep track of models it belongs to
value.create_model_reference((self, name))
return result
def upgrade_secret(self, field_name, plaintext, password):
"""Upgrade the PBKDF2 work factor for a given Secret attribute."""
setattr(self, field_name, Secret.from_plaintext(plaintext, password))
# Save only the affected field to database
self.save(update_fields=[field_name])
| mypetyak/django-citadel | citadel/models.py | Python | mit | 1,242 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_jsonhumanize
----------------------------------
Tests for `jsonhumanize` module.
"""
import unittest
from jsonhumanize import JsonHuman
class TestJsonhumanize(unittest.TestCase):
"""
Test class for jsonhumanize module.
"""
def setUp(self):
self.json = '''{
"name": "jsonhumanize",
"description": "Convert JSON to human readable HTML",
"author": "Martin Garcia <newluxfero@gmail.com>",
"tags": ["DOM", "HTML", "JSON", "Pretty Print"],
"version": "0.1.0",
"main": "jsonhumanize.py",
"license" : "MIT",
"dependencies": {
"crel": "1.0.0"
},
"repository": {
"type": "git",
"url": "git://github.com/magarcia/jsonhumanize.git"
},
"bugs": {
"url": "http://github.com/magarcia/jsonhumanize/issues"
},
"contributors": [],
"config": {
"what?": "this object is just to show some extra stuff",
"how?": ["use jsonhumanize", "add jsonhumanize.css", "???", \
"profit!"],
"customization?": ["customize the css prefix", "change the css \
file"],
"integer": 42,
"float": 12.3,
"bool": true
}
}'''
self.html = '<table class="jh-type-object jh-root"><tr>\
<th class="jh-key jh-object-key">name</th><td class="jh-object-value">\
<span class="jh-type-string">"jsonhumanize"</span></td></tr><tr>\
<th class="jh-key jh-object-key">description</th>\
<td class="jh-object-value"><span class="jh-type-string">\
"Convert JSON to human readable HTML"</span></td></tr><tr>\
<th class="jh-key jh-object-key">author</th>\
<td class="jh-object-value"><span class="jh-type-string">\
"Martin Garcia <newluxfero@gmail.com>"</span></td></tr><tr>\
<th class="jh-key jh-object-key">tags</th><td class="jh-object-value">\
<table class="jh-type-array"><tr><th class="jh-key jh-array-key">0\
</th><td class="jh-array-value"><span class="jh-type-string">"DOM"\
</span></td></tr><tr><th class="jh-key jh-array-key">1</th><td \
class="jh-array-value"><span class="jh-type-string">"HTML"</span></td>\
</tr><tr><th class="jh-key jh-array-key">2</th>\
<td class="jh-array-value"><span class="jh-type-string">"JSON"</span>\
</td></tr><tr><th class="jh-key jh-array-key">3</th>\
<td class="jh-array-value"><span class="jh-type-string">"Pretty Print"\
</span></td></tr></table></td></tr><tr>\
<th class="jh-key jh-object-key">version</th>\
<td class="jh-object-value"><span class="jh-type-string">"0.1.0"\
</span></td></tr><tr><th class="jh-key jh-object-key">main</th>\
<td class="jh-object-value"><span class="jh-type-string">\
"jsonhumanize.py"</span></td></tr><tr><th class="jh-key \
jh-object-key">license</th><td class="jh-object-value">\
<span class="jh-type-string">"MIT"</span></td></tr><tr>\
<th class="jh-key jh-object-key">dependencies\
</th><td class="jh-object-value"><table class="jh-type-object"><tr>\
<th class="jh-key jh-object-key">crel</th><td class="jh-object-value">\
<span class="jh-type-string">"1.0.0"</span></td></tr></table></td>\
</tr><tr><th class="jh-key jh-object-key">repository</th>\
<td class="jh-object-value"><table class="jh-type-object"><tr>\
<th class="jh-key jh-object-key">type</th><td class="jh-object-value">\
<span class="jh-type-string">"git"</span></td></tr><tr>\
<th class="jh-key jh-object-key">url</th><td class="jh-object-value">\
<span class="jh-type-string">\
"git://github.com/magarcia/jsonhumanize.git"</span></td></tr></table>\
</td></tr><tr><th class="jh-key jh-object-key">bugs</th>\
<td class="jh-object-value"><table class="jh-type-object"><tr>\
<th class="jh-key jh-object-key">url</th><td class="jh-object-value">\
<span class="jh-type-string">\
"http://github.com/magarcia/jsonhumanize/issues"</span></td></tr>\
</table></td></tr><tr><th class="jh-key jh-object-key">contributors\
</th><td class="jh-object-value"><table class="jh-type-array"></table>\
</td></tr><tr><th class="jh-key jh-object-key">config</th>\
<td class="jh-object-value"><table class="jh-type-object"><tr>\
<th class="jh-key jh-object-key">what?</th>\
<td class="jh-object-value"><span class="jh-type-string">\
"this object is just to show some extra stuff"</span></td></tr><tr>\
<th class="jh-key jh-object-key">how?</th><td class="jh-object-value">\
<table class="jh-type-array"><tr><th class="jh-key jh-array-key">0\
</th><td class="jh-array-value"><span class="jh-type-string">\
"use jsonhumanize"</span></td></tr><tr>\
<th class="jh-key jh-array-key">1</th><td class="jh-array-value">\
<span class="jh-type-string">"add jsonhumanize.css"</span></td></tr>\
<tr><th class="jh-key jh-array-key">2</th><td class="jh-array-value">\
<span class="jh-type-string">"???"</span></td></tr><tr>\
<th class="jh-key jh-array-key">3</th><td class="jh-array-value">\
<span class="jh-type-string">"profit!"</span></td></tr></table></td>\
</tr><tr><th class="jh-key jh-object-key">customization?</th>\
<td class="jh-object-value"><table class="jh-type-array"><tr>\
<th class="jh-key jh-array-key">0</th><td class="jh-array-value">\
<span class="jh-type-string">"customize the css prefix"</span></td>\
</tr><tr><th class="jh-key jh-array-key">1</th>\
<td class="jh-array-value"><span class="jh-type-string">\
"change the css file"</span></td></tr></table></td></tr><tr>\
<th class="jh-key jh-object-key">integer</th>\
<td class="jh-object-value"><span class="jh-type-int jh-type-number">\
42</span></td></tr><tr><th class="jh-key jh-object-key">float</th>\
<td class="jh-object-value">\
<span class="jh-type-float jh-type-number">12.30</span></td></tr><tr>\
<th class="jh-key jh-object-key">bool</th><td class="jh-object-value">\
<span class="jh-type-bool">true</span></td></tr></table></td></tr>\
</table>'
def test_something(self):
parsed = JsonHuman(keep_order=True).format(self.json)
self.assertEqual(parsed, self.html)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| magarcia/jsonhumanize | tests/test_jsonhumanize.py | Python | mit | 6,088 |
# Generated by Django 2.2 on 2020-07-06 12:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("djangocms_page_meta", "0011_auto_20190218_1010"),
]
operations = [
migrations.RemoveField(
model_name="pagemeta",
name="gplus_author",
),
migrations.RemoveField(
model_name="pagemeta",
name="gplus_type",
),
migrations.RemoveField(
model_name="titlemeta",
name="gplus_description",
),
migrations.AddField(
model_name="pagemeta",
name="schemaorg_type",
field=models.CharField(
blank=True, help_text="Use Article for generic pages.", max_length=255, verbose_name="Resource type"
),
),
]
| nephila/djangocms-page-meta | djangocms_page_meta/migrations/0012_auto_20200706_1230.py | Python | bsd-3-clause | 858 |
from .circumcision_model_mixin import CircumcisionModelMixin
from .crf_model_mixin import CrfModelManager, CrfModelMixin
# CrfModelMixinNonUniqueVisit
from .detailed_sexual_history_mixin import DetailedSexualHistoryMixin
from .hiv_testing_supplemental_mixin import HivTestingSupplementalMixin
from .mobile_test_model_mixin import MobileTestModelMixin
from .pregnancy_model_mixin import PregnancyModelMixin
from .search_slug_model_mixin import SearchSlugModelMixin
from .sexual_partner_model_mixin import SexualPartnerMixin
| botswana-harvard/bcpp-subject | bcpp_subject/models/model_mixins/__init__.py | Python | gpl-3.0 | 523 |
from __future__ import absolute_import
from pex.http.tracer import *
| abel-von/commons | src/python/twitter/common/python/http/tracer.py | Python | apache-2.0 | 69 |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_analyze_custom_documents_async.py
DESCRIPTION:
This sample demonstrates how to analyze a document with a custom
built model. The document must be of the same type as the documents the custom model
was built on. To learn how to build your own models, look at
sample_build_model_async.py.
The model can be built using the training files found here:
https://aka.ms/azsdk/formrecognizer/sampletrainingfiles
USAGE:
python sample_analyze_custom_documents_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
3) CUSTOM_BUILT_MODEL_ID - the ID of your custom built model
-OR-
CONTAINER_SAS_URL - The shared access signature (SAS) Url of your Azure Blob Storage container with your training files.
A model will be built and used to run the sample.
"""
import os
import asyncio
async def analyze_custom_documents_async(custom_model_id):
path_to_sample_documents = os.path.abspath(
os.path.join(
os.path.abspath(__file__),
"..",
"..",
"..",
"./sample_forms/forms/Form_1.jpg",
)
)
# [START analyze_custom_documents_async]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import DocumentAnalysisClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
model_id = os.getenv("CUSTOM_BUILT_MODEL_ID", custom_model_id)
document_analysis_client = DocumentAnalysisClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with document_analysis_client:
# Make sure your document's type is included in the list of document types the custom model can analyze
with open(path_to_sample_documents, "rb") as f:
poller = await document_analysis_client.begin_analyze_document(
model=model_id, document=f
)
result = await poller.result()
for idx, document in enumerate(result.documents):
print("--------Analyzing document #{}--------".format(idx + 1))
print("Document has type {}".format(document.doc_type))
print("Document has document type confidence {}".format(document.confidence))
print("Document was analyzed with model with ID {}".format(result.model_id))
for name, field in document.fields.items():
field_value = field.value if field.value else field.content
print("......found field of type '{}' with value '{}' and with confidence {}".format(field.value_type, field_value, field.confidence))
# iterate over tables, lines, and selection marks on each page
for page in result.pages:
print("\nLines found on page {}".format(page.page_number))
for line in page.lines:
print("...Line '{}'".format(line.content))
for word in page.words:
print(
"...Word '{}' has a confidence of {}".format(
word.content, word.confidence
)
)
if page.selection_marks:
print("\nSelection marks found on page {}".format(page.page_number))
for selection_mark in page.selection_marks:
print(
"...Selection mark is '{}' and has a confidence of {}".format(
selection_mark.state, selection_mark.confidence
)
)
for i, table in enumerate(result.tables):
print("\nTable {} can be found on page:".format(i + 1))
for region in table.bounding_regions:
print("...{}".format(i + 1, region.page_number))
for cell in table.cells:
print(
"...Cell[{}][{}] has text '{}'".format(
cell.row_index, cell.column_index, cell.content
)
)
print("-----------------------------------")
# [END analyze_custom_documents_async]
async def main():
model_id = None
if os.getenv("CONTAINER_SAS_URL"):
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import DocumentModelAdministrationClient
from azure.ai.formrecognizer import DocumentBuildMode
endpoint = os.getenv("AZURE_FORM_RECOGNIZER_ENDPOINT")
key = os.getenv("AZURE_FORM_RECOGNIZER_KEY")
if not endpoint or not key:
raise ValueError("Please provide endpoint and API key to run the samples.")
document_model_admin_client = DocumentModelAdministrationClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with document_model_admin_client:
poller = await document_model_admin_client.begin_build_model(
os.getenv("CONTAINER_SAS_URL"), DocumentBuildMode.TEMPLATE
)
model = await poller.result()
model_id = model.model_id
await analyze_custom_documents_async(model_id)
if __name__ == "__main__":
asyncio.run(main())
| Azure/azure-sdk-for-python | sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_analyze_custom_documents_async.py | Python | mit | 5,565 |
from __future__ import absolute_import
from datetime import datetime, timedelta
from httplib import NOT_FOUND, OK, CREATED, INTERNAL_SERVER_ERROR
from flask import jsonify, Blueprint, current_app
from flask_restful import Api, Resource, reqparse, inputs
import pytz
from werkzeug.exceptions import NotFound
from . import database
from .general import json_abort
from .serialization import job2json, scanner_status2json, scanner2json
from scanomatic.scanning.update_scanner_status import (
update_scanner_status, UpdateScannerStatusError,
)
blueprint = Blueprint("scanners_api", __name__)
SCANNER_TIMEOUT = timedelta(minutes=5)
def _scanner_is_online(scanner):
return (
scanner.last_seen is not None
and datetime.now(pytz.utc) - scanner.last_seen < SCANNER_TIMEOUT
)
@blueprint.route("", methods=['GET'])
def scanners_get():
scannerstore = database.getscannerstore()
scanners = scannerstore.get_all()
return jsonify([
scanner2json(scanner, _scanner_is_online(scanner))
for scanner in scanners
])
@blueprint.route("/<scannerid>", methods=['GET'])
def scanner_get(scannerid):
scannerstore = database.getscannerstore()
try:
scanner = scannerstore.get_scanner_by_id(scannerid)
except KeyError:
return json_abort(
NOT_FOUND, reason="Scanner '{}' unknown".format(scannerid)
)
return jsonify(scanner2json(scanner, _scanner_is_online(scanner)))
@blueprint.route("/<scanner>/status", methods=['PUT'])
def scanner_status_update(scanner):
scannerstore = database.getscannerstore()
parser = reqparse.RequestParser()
parser.add_argument('job')
parser.add_argument(
'startTime',
dest='start_time',
type=inputs.datetime_from_iso8601,
required=True,
)
parser.add_argument(
'nextScheduledScan',
dest='next_scheduled_scan',
type=inputs.datetime_from_iso8601,
)
parser.add_argument(
'imagesToSend',
dest='images_to_send',
type=inputs.natural,
required=True,
)
parser.add_argument(
'devices',
dest='devices',
action='append',
)
args = parser.parse_args(strict=True)
try:
result = update_scanner_status(scannerstore, scanner, **args)
except UpdateScannerStatusError as error:
return json_abort(INTERNAL_SERVER_ERROR, reason=str(error))
status_code = CREATED if result.new_scanner else OK
return "", status_code
class ScannerJob(Resource):
def get(self, scannerid):
scanjobstore = database.getscanjobstore()
scannerstore = database.getscannerstore()
if not scannerstore.has_scanner_with_id(scannerid):
raise NotFound
job = scanjobstore.get_current_scanjob_for_scanner(
scannerid, datetime.now(pytz.utc))
if job:
return job2json(job)
api = Api(blueprint)
api.add_resource(ScannerJob, '/<scannerid>/job', endpoint='scanner-job')
| Scan-o-Matic/scanomatic | scanomatic/ui_server/scanners_api.py | Python | gpl-3.0 | 3,003 |
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# OpenModes - An eigenmode solver for open electromagnetic resonantors
# Copyright (C) 2013 David Powell
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-----------------------------------------------------------------------------
"""
Integration routines and quadrature rules over triangles
"""
import numpy as np
import numpy.linalg as la
import scipy.special
from openmodes.helpers import Identified
from openmodes.external.point_in_polygon import wn_PnPoly
class IntegrationRule(Identified):
def __len__(self):
return len(self.points)
def __repr__(self):
return "%s.%s(%d)" % (type(self).__module__, type(self).__name__,
self.order)
def __iter__(self):
"Iterate over all integration points and weights"
for point, w in zip(self.points, self.weights):
yield point, w
class DunavantRule(IntegrationRule):
"""The symmetric quadrature rule over a triangle as given in
D. A. Dunavant, Int. J. Numer. Methods Eng. 21, 1129 (1985).
xi_eta: array
The barycentric coordinates (xi, eta) of the quadrature points
weights: array
The weights, normalised to sum to 1/2
"""
def __init__(self, order):
"""Calculate the coefficients of the integration rule
Parameters
----------
order : integer
The order of the rule (maximum 20)
"""
super(DunavantRule, self).__init__()
from openmodes import dunavant
self.order = order
num_points = dunavant.dunavant_order_num(order)
xi_eta, weights = dunavant.dunavant_rule(order, num_points)
self.points = np.asfortranarray(xi_eta.T)
# scale the weights to 0.5
self.weights = np.asfortranarray((weights*0.5/sum(weights)).T)
# This makes a useful default e.g. for interpolation
triangle_centres = DunavantRule(1)
class GaussLegendreRule(IntegrationRule):
"""1D Gauss Legendre Quadrature Rule
Defined over the range (0, 1)
"""
def __init__(self, order):
"Weights and abscissae of Gauss-Legendre quadrature of order N"
super(GaussLegendreRule, self).__init__()
a = scipy.special.sh_legendre(order).weights
self.weights = a[:, 1].real
self.points = a[:, 0].real
class TrapezoidalRule(IntegrationRule):
"""1D Trapezoidal rule with evenly spaced points
Defined over the range (0, 1)
Includes the end-points
"""
def __init__(self, order):
super(TrapezoidalRule, self).__init__()
self.points = np.linspace(0.0, 1.0, order+1)
self.weights = np.ones(order+1)
self.weights[0] *= 0.5
self.weights[-1] *= 0.5
def cartesian_to_barycentric(r, nodes):
"""Convert cartesian coordinates to barycentric (area coordinates) in a
triangle
r - Nx2 array of cartesian coordinates
nodes - 3x2 array of nodes of the triangle
"""
T = np.array(((nodes[0, 0] - nodes[2, 0], nodes[1, 0] - nodes[2, 0]),
(nodes[0, 1] - nodes[2, 1], nodes[1, 1] - nodes[2, 1])))
bary_coords = np.empty((len(r), 3))
bary_coords[:, :2] = la.solve(T, (r[:, :2]-nodes[None, 2, :2]).T).T
bary_coords[:, 2] = 1.0 - bary_coords[:, 1] - bary_coords[:, 0]
return bary_coords
def triangle_electric_dipole(vertices, xi_eta, weights):
"""Calculate the dipole moment of a triangle with constant unit charge
Parameters
----------
vertices : ndarray
the vertices which define the triangle
xi_eta : ndarray
the points of the quadrature rule in barycentric form
weights : ndarray
the weights of the integration
Returns
-------
p : ndarray
the electric dipole moment of the triangle
"""
r = ((vertices[0]-vertices[2])*xi_eta[:, 0, None] +
(vertices[1]-vertices[2])*xi_eta[:, 1, None] +
vertices[2])
return np.sum(weights[0, :, None]*r, axis=0)
def sphere_fibonacci(num_points, cartesian=False):
"""Compute points on the surface of a sphere based on the Fibonacci spiral
Parameters
----------
num_points : integer
The number of points to place on the sphere
cartesian : boolean, optional
If True, cartesian coordinates will be returned instead of spherical
Returns
-------
phi, theta : array (if `cartesian` is False)
The polar and azimuthal angles of the points
x, y, z : array (if `cartesian` is True)
The cartesian coordinates of the points
Algorithm from:
R. Swinbank and R. James Purser, “Fibonacci grids: A novel approach to
global modelling,” Q. J. R. Meteorol. Soc., vol. 132, no. 619, pp.
1769–1793, Jul. 2006.
"""
n = num_points
phi = 0.5*(1 + np.sqrt(5))
i = -n+1 + 2*np.arange(num_points, dtype=np.float64)
theta = 2*np.pi*i / phi
sphi = i/n
cphi = np.sqrt((n + i) * (n - i)) / n
if cartesian:
x = cphi * np.sin(theta)
y = cphi * np.cos(theta)
z = sphi
return x, y, z
else:
phi = np.arctan2(sphi, cphi)
return theta, phi
class Contour(object):
"""A contour for line integration in the complex plane"""
def points_inside(self, points):
"Check each point to see whether it lies within the contour"
vertices = np.array([x for x, w in self])
vertices = np.hstack((vertices.real[:, None], vertices.imag[:, None]))
inside = np.empty(np.product(points.shape), dtype=np.bool)
for point_num, point in enumerate(points.flat):
inside[point_num] = wn_PnPoly((point.real, point.imag), vertices)
return inside
class CircularContour(Contour):
"""A circular contour in the complex frequency plane"""
def __init__(self, centre, radius, integration_rule=TrapezoidalRule(20)):
self.centre = centre
self.radius = radius
self.integration_rule = integration_rule
def __iter__(self):
d_theta = 2*np.pi
for x, w in self.integration_rule:
theta = 2*np.pi*x
s = np.exp(1j*theta)*self.radius+self.centre
ds_dtheta = 1j*np.exp(1j*theta)*self.radius
yield(s, w*ds_dtheta*d_theta)
def __len__(self):
return len(self.integration_rule)
class RectangularContour(Contour):
"""A rectangular contour in the complex frequency plane"""
def __init__(self, s_min, s_max, integration_rule=GaussLegendreRule(20)):
"""
Parameters
----------
s_min, s_max: complex
The corners of the rectangle
"""
min_real, max_real = sorted((s_min.real, s_max.real))
min_imag, max_imag = sorted((s_min.imag, s_max.imag))
self.integration_rule = integration_rule
self.coordinates = (min_real+1j*min_imag, max_real+1j*min_imag,
max_real+1j*max_imag, min_real+1j*max_imag)
def __iter__(self):
"""
Returns
-------
gen: generator
A generator which yields (s, w), where s is the complex frequency
and w is the integration weight
"""
# integrate over all 4 lines
for line_count in range(4):
s_start = self.coordinates[line_count]
s_end = self.coordinates[(line_count+1) % 4]
ds = s_end-s_start
for x, w in self.integration_rule:
s = s_start + ds*x
yield(s, w*ds)
def __len__(self):
return 4*len(self.integration_rule)
class ExternalModeContour(Contour):
"""A modified rectangular contour which finds external modes of objects,
including on the negative real axis, but avoids internal modes on the
imaginary axis, and takes a detour about the origin"""
def __init__(self, corner, integration_rule=GaussLegendreRule(20),
overlap_axes=None, avoid_origin=None):
"""
Parameters
----------
corner: complex
The furthest corner of the complex plane. Must have positive
imaginary and negative real parts
overlap_axes: real, optional
The amount by which to overlap the real axis, and to avoid the
imaginary axis
avoid_origin: real, optional
The radius by which to skirt around the origin
"""
if corner.real >= 0.0 or corner.imag <= 0.0:
raise ValueError("Corner frequency must have negative real "
"and positive imaginary parts")
corner_dimension = max(abs(corner.real), abs(corner.imag))
if overlap_axes is None:
overlap_axes = 1e-2*corner_dimension
if avoid_origin is None:
avoid_origin = 3e-2*corner_dimension
if avoid_origin < overlap_axes or overlap_axes < 0:
raise ValueError("Invalid contour shape")
cos_avoid = np.sqrt(avoid_origin**2-overlap_axes**2)
self.integration_rule = integration_rule
self.coordinates = (-overlap_axes+1j*cos_avoid,
-overlap_axes+1j*corner.imag,
corner,
corner.real-1j*overlap_axes,
-cos_avoid-1j*overlap_axes
)
self.avoid_angle = np.arcsin(overlap_axes/avoid_origin)
self.avoid_origin = avoid_origin
def __iter__(self):
"""
Returns
-------
gen: generator
A generator which yields (s, w), where s is the complex frequency
and w is the integration weight
"""
# integrate over all 4 straight lines
for line_count in range(4):
s_start = self.coordinates[line_count]
s_end = self.coordinates[line_count+1]
ds = s_end-s_start
for x, w in self.integration_rule:
s = s_start + ds*x
yield(s, w*ds)
# the circular arc avoiding the origin
t_start = np.pi*0.5+self.avoid_angle
t_end = self.avoid_angle
dt = t_end-t_start
for x, w in self.integration_rule:
t = t_start + dt*x
s = self.avoid_origin*(-np.sin(t) + 1j*np.cos(t))
ds_dt = self.avoid_origin*(-np.cos(t) - 1j*np.sin(t))
yield(s, w*ds_dt*dt)
def __len__(self):
return 5*len(self.integration_rule)
class EllipticalContour(Contour):
"""A quarter ellipse contour in the complex frequency plane"""
def __init__(self, radius_real, radius_imag, offset_real, offset_imag,
integration_rule=GaussLegendreRule(20)):
"""
Parameters
----------
radius_real, radius_imag: real
The radii of the real and imaginary parts. The signs of these
determine which quadrant of the complex plane the quarter ellipse
will be in
offset_real, offset_imag: real
The offsets of the straight line parts from the real and imaginary
axes. Must be smaller in magnitude than the corresponding radii.
"""
self.radius_real = radius_real
self.radius_imag = radius_imag
self.offset_imag = offset_imag
self.offset_real = offset_real
self.integration_rule = integration_rule
def __iter__(self):
"""
Returns
-------
gen: generator
A generator which yields (s, w), where s is the complex frequency
and w is the integration weight
"""
radius_real = self.radius_real
radius_imag = self.radius_imag
offset_imag = self.offset_imag
offset_real = self.offset_real
# correct for the direction of rotation changing
sign = -np.sign(radius_real/radius_imag)
# the points of maximum real and imag (different from radius due to offsets)
max_real = radius_real*np.sqrt(1.0 - (offset_imag/radius_imag)**2)
max_imag = radius_imag*np.sqrt(1.0 - (offset_real/radius_real)**2)
# the line parallel to the real axis
s_start = max_real+1j*offset_imag
s_end = offset_real+1j*offset_imag
ds = s_end-s_start
for x, w in self.integration_rule:
s = s_start + ds*x
yield(s, w*ds*sign)
# the line parallel to the imaginary axis
s_start = offset_real+1j*offset_imag
s_end = offset_real+1j*max_imag
ds = s_end-s_start
for x, w in self.integration_rule:
s = s_start + ds*x
yield(s, w*ds*sign)
# the elliptical segment
t_start = np.arcsin(offset_real/radius_real)
t_end = np.arccos(offset_imag/radius_imag)
dt = t_end-t_start
for x, w in self.integration_rule:
t = t_start + dt*x
s = radius_real*np.sin(t) + 1j*radius_imag*np.cos(t)
ds_dt = radius_real*np.cos(t) - 1j*radius_imag*np.sin(t)
yield(s, w*ds_dt*dt*sign)
def __len__(self):
return 3*len(self.integration_rule)
| DavidPowell/OpenModes | openmodes/integration.py | Python | gpl-3.0 | 13,770 |
#!/usr/bin/python
## ----------------------------------------------------------------------------
## GMM Training using Multiprocessing on Large Datasets
## Copyright (C) 2014, D S Pavan Kumar (Email: dspavankumar[at]gmail.com)
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program; if not, write to the Free Software Foundation, Inc.,
## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
## ----------------------------------------------------------------------------
import numpy as np
from multiprocessing import Process, Queue, cpu_count
from iofile import *
import time
## Class definition for diagonal GMM
class GMM:
## Initialisation
def __init__ (self, dim, mix=1):
"""Initialises a GMM of required dimensionality and (optional) mixture count"""
self.weights = (1.0/mix) * np.ones(mix)
self.means = np.random.randn(mix, dim)
self.vars = np.ones((mix, dim))
self.mix = mix
self.dim = dim
self.__varfloor__ = 0.01
## Printing means, variances and weights
def __str__ (self):
"""Prints the GMM object"""
return "Means:\n%s\nVariances:\n%s\n Weights:\n%s\n" % (self.means, self.vars, self.weights)
## Likelihood of a vector
def likelihood(self, vec):
"""Calculates likelihood of a (numpy) vector"""
vecr = np.tile (vec, self.mix).reshape(self.mix, self.dim)
return (self.weights * ((2*np.pi)**(-0.5*self.dim)) * np.prod(self.vars,1)**(-0.5) ) * \
( np.exp(-0.5 * np.sum( (vecr-self.means)**2 /self.vars, 1) ) )
## Posterior of a vector (normalised likelihood vector, sums to unity)
def posterior(self, vec):
"""Calculates posterior (normalised likelihood) of a vector given the GMM"""
if self.mix == 1:
return np.array([1])
post = self.likelihood (vec)
postsum = np.sum (post)
return post/postsum #if (postsum > 1e-12) else np.zeros((self.mix))
## Double the number of mixtures
def double_mixtures (self):
"""Splits the number of mixtures of the GMM. Each mixture component is split"""
bias = np.zeros ((self.mix, self.dim))
for i in range(self.mix):
argmaxv = np.argmax (self.vars[i])
bias[i][argmaxv] = self.vars[i][argmaxv]
self.means = np.vstack ((self.means + 0.2*bias, self.means - 0.2*bias))
self.weights = np.tile (self.weights/2, 2)
self.vars = np.vstack ((self.vars, self.vars))
self.mix = 2*self.mix
## Training step 1 of 3: Initialise statistics accumulation
def __init_stats__ (self):
"""Initialises the accumulation of statistics for GMM re-estimation"""
self.__sgam__ = np.zeros(self.mix)
self.__sgamx__ = np.zeros((self.mix, self.dim))
self.__sgamxx__ = np.zeros((self.mix, self.dim))
## Training step 3 of 3: Recompute GMM parameters
def __finish_stats__ (self):
"""Performs M-step of the EM"""
self.weights = self.__sgam__ / np.sum(self.__sgam__)
denom = self.__sgam__.repeat(self.dim).reshape((self.mix,self.dim))
self.means = self.__sgamx__ / denom
self.vars = self.__sgamxx__ / denom - (self.means**2)
self.vars[self.vars < self.__varfloor__] = self.__varfloor__
## Training step 2 of 3: Update the statistics from a set of features
def __update_worker__ (self, mfclist, Q):
"""Accumulates statistics from a list of files - worker routine"""
sgam = np.zeros(self.mix)
sgamx = np.zeros((self.mix, self.dim))
sgamxx = np.zeros((self.mix, self.dim))
for mfcfile in mfclist:
feats = readfile (mfcfile)
for feat in feats:
gam = self.posterior(feat)
sgam += gam
sgamx += np.outer(gam, feat)
sgamxx += np.outer(gam, feat**2)
Q.put([sgam, sgamx, sgamxx])
## GMM update routine - master
def __update_stats__ (self, mfclist, threads=cpu_count()):
"""Accumulates statistics from a list of files"""
with open(mfclist, 'r') as f:
mfcfiles = f.read().splitlines()
Q = Queue()
processes = []
for thr in xrange(threads):
p = Process (target=self.__update_worker__, args=(mfcfiles[thr::threads], Q))
p.start()
processes.append(p)
while Q.qsize < threads:
time.sleep(0.01)
for thr in xrange(threads):
sgam, sgamx, sgamxx = Q.get()
self.__sgam__ += sgam
self.__sgamx__ += sgamx
self.__sgamxx__ += sgamxx
## Expectation-Maximisation (EM)
def em (self, mfclist, threads=cpu_count()):
"""A single expectation maximisation step"""
print "Running EM on", str(self.mix), "mixtures"
self.__init_stats__()
self.__update_stats__(mfclist, threads)
self.__finish_stats__()
## Train GMM (wrapper)
def train(self, mfclist, mix, threads=cpu_count()):
"""Wrapper of training a GMM"""
print "CPU threads being used:", str(threads)
if not (np.log(mix)/np.log(2)).is_integer():
print "Current version supports mixtures only in powers of 2. Training more mixtures."
m = self.mix
if m >= mix:
self.__init__(self.dim)
m = 1
self.em (mfclist)
if mix == 1:
return
while m < mix:
self.double_mixtures()
for i in range(3):
self.em (mfclist)
m *= 2
for i in range(3):
self.em (mfclist)
## Save the GMM
def saveas (self, filename):
"""Saves the GMM object"""
import pickle
with open (filename, 'w') as f:
pickle.dump (self, f)
| dspavankumar/gmm | gmm.py | Python | gpl-2.0 | 6,632 |
# -*- coding: utf-8 -*-
"""
Membership Management
"""
if not settings.has_module(c):
raise HTTP(404, body="Module disabled: %s" % c)
# =============================================================================
def index():
""" Dashboard """
return s3db.cms_index(c, alt_function="index_alt")
# -----------------------------------------------------------------------------
def index_alt():
"""
Module homepage for non-Admin users when no CMS content found
"""
# Just redirect to the list of Members
s3_redirect_default(URL(f="membership", args=["summary"]))
# =============================================================================
def membership_type():
"""
REST Controller
"""
if not auth.s3_has_role("ADMIN"):
s3.filter = auth.filter_by_root_org(s3db.member_membership_type)
output = s3_rest_controller()
return output
# =============================================================================
def membership():
"""
REST Controller
"""
def prep(r):
if r.interactive:
if s3.rtl:
# Ensure that + appears at the beginning of the number
# - using table alias to only apply to filtered component
from s3 import s3_phone_represent, S3PhoneWidget
f = s3db.get_aliased(s3db.pr_contact, "pr_phone_contact").value
f.represent = s3_phone_represent
f.widget = S3PhoneWidget()
if r.id and r.component is None and r.method != "delete":
# Redirect to person controller
vars = {"membership.id": r.id}
redirect(URL(f="person", vars=vars))
# Assume members under 120
s3db.pr_person.date_of_birth.widget = \
S3CalendarWidget(past_months=1440)
elif r.representation == "xls":
# Split person_id into first/middle/last to make it match Import sheets
list_fields = s3db.get_config("member_membership",
"list_fields")
list_fields.remove("person_id")
list_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
] + list_fields
s3db.configure("member_membership",
list_fields = list_fields)
return True
s3.prep = prep
return s3_rest_controller(rheader = s3db.member_rheader)
# =============================================================================
def person():
"""
Person Controller
- used for Personal Profile & Imports
- includes components relevant to Membership
"""
tablename = "pr_person"
table = s3db.pr_person
s3db.configure(tablename,
deletable = False,
)
s3.crud_strings[tablename].update(
title_upload = T("Import Members"))
s3db.configure("member_membership",
delete_next = URL("member", "membership"),
)
# Custom Method for Contacts
set_method = s3db.set_method
set_method("pr", "person",
method = "contacts",
action = s3db.pr_Contacts)
# Custom Method for CV
set_method("pr", "person",
method = "cv",
# @ToDo: Allow Members to have a CV without enabling HRM?
action = s3db.hrm_CV)
# Import pre-process
def import_prep(data):
"""
Deletes all Member records of the organisation/branch
before processing a new data import
"""
if s3.import_replace:
resource, tree = data
if tree is not None:
xml = current.xml
tag = xml.TAG
att = xml.ATTRIBUTE
root = tree.getroot()
expr = "/%s/%s[@%s='org_organisation']/%s[@%s='name']" % \
(tag.root, tag.resource, att.name, tag.data, att.field)
orgs = root.xpath(expr)
for org in orgs:
org_name = org.get("value", None) or org.text
if org_name:
try:
org_name = json.loads(xml.xml_decode(org_name))
except:
pass
if org_name:
mtable = s3db.member_membership
otable = s3db.org_organisation
query = (otable.name == org_name) & \
(mtable.organisation_id == otable.id)
resource = s3db.resource("member_membership", filter=query)
# Use cascade=True so that the deletion gets
# rolled back if the import fails:
resource.delete(format="xml", cascade=True)
s3.import_prep = import_prep
# CRUD pre-process
def prep(r):
if r.interactive:
if s3.rtl:
# Ensure that + appears at the beginning of the number
# - using table alias to only apply to filtered component
from s3 import s3_phone_represent, S3PhoneWidget
f = s3db.get_aliased(s3db.pr_contact, "pr_phone_contact").value
f.represent = s3_phone_represent
f.widget = S3PhoneWidget()
if r.component_name == "membership":
s3.crud_strings["member_membership"].update(
label_delete_button = T("Delete Membership"),
label_list_button = T("List Memberships")
)
if r.method not in ("import", "search_ac", "validate"):
if not r.component:
# Assume members under 120
s3db.pr_person.date_of_birth.widget = \
S3CalendarWidget(past_months=1440)
resource = r.resource
if resource.count() == 1:
resource.load()
r.record = resource.records().first()
if r.record:
r.id = r.record.id
if not r.record:
session.error = T("Record not found")
redirect(URL(f="membership"))
member_id = get_vars.get("membership.id", None)
if member_id and r.component_name == "membership":
r.component_id = member_id
s3db.configure("member_membership",
insertable = False,
)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component and "buttons" in output:
# Provide correct list-button (non-native controller)
buttons = output["buttons"]
if "list_btn" in buttons:
crud_button = r.resource.crud.crud_button
buttons["list_btn"] = crud_button(None,
tablename="member_membership",
name = "label_list_button",
_href = URL(c="member", f="membership"),
_id = "list-btn",
)
return output
s3.postp = postp
output = s3_rest_controller("pr", "person",
replace_option = T("Remove existing data before import"),
rheader = s3db.member_rheader,
)
return output
# END =========================================================================
| flavour/eden | controllers/member.py | Python | mit | 7,976 |
from .contact import *
from .users import *
from .user_functions import *
| Sult/evetool | users/models/__init__.py | Python | mit | 74 |
# -*- coding: utf-8 -*-
import codecs
from module.plugins.Container import Container
from module.utils import fs_encode
class LinkList(Container):
__name__ = "LinkList"
__version__ = "0.12"
__pattern__ = r'.+\.txt'
__config__ = [("clear", "bool", "Clear Linklist after adding", False),
("encoding", "string", "File encoding (default utf-8)", "")]
__description__ = """Read link lists in txt format"""
__author_name__ = ("spoob", "jeix")
__author_mail__ = ("spoob@pyload.org", "jeix@hasnomail.com")
def decrypt(self, pyfile):
try:
file_enc = codecs.lookup(self.getConfig("encoding")).name
except:
file_enc = "utf-8"
print repr(pyfile.url)
print pyfile.url
file_name = fs_encode(pyfile.url)
txt = codecs.open(file_name, 'r', file_enc)
links = txt.readlines()
curPack = "Parsed links from %s" % pyfile.name
packages = {curPack:[],}
for link in links:
link = link.strip()
if not link:
continue
if link.startswith(";"):
continue
if link.startswith("[") and link.endswith("]"):
# new package
curPack = link[1:-1]
packages[curPack] = []
continue
packages[curPack].append(link)
txt.close()
# empty packages fix
delete = []
for key,value in packages.iteritems():
if not value:
delete.append(key)
for key in delete:
del packages[key]
if self.getConfig("clear"):
try:
txt = open(file_name, 'wb')
txt.close()
except:
self.logWarning(_("LinkList could not be cleared."))
for name, links in packages.iteritems():
self.packages.append((name, links, name))
| estaban/pyload | module/plugins/container/LinkList.py | Python | gpl-3.0 | 1,951 |
from pytldr.summarize.lsa import LsaOzsoy, LsaSteinberger
from pytldr.summarize.relevance import RelevanceSummarizer
from pytldr.summarize.textrank import TextRankSummarizer
if __name__ == "__main__":
txt = """
(Reuters) - Talks between Greece and euro zone finance ministers over the country's debt crisis broke down on Monday when Athens rejected a proposal to request a six-month extension of its international bailout package as "unacceptable".
The unexpectedly rapid collapse raised doubts about Greece's future in the single currency area after a new leftist-led government vowed to scrap the 240 billion euro ($272.4 billion) bailout, reverse austerity policies and end cooperation with EU/IMF inspectors.
Dutch Finance Minister Jeroen Dijsselbloem, who chaired the meeting, said Athens had until Friday to request an extension, otherwise the bailout would expire at the end of the month. The Greek state and its banks would then face a looming cash crunch.
How long Greece can keep itself afloat without foreign support is uncertain. The euro fell against the dollar after the talks broke up but with Wall Street closed for a holiday, the full force of any market reaction may only be felt on Tuesday.
The European Central Bank will decide on Wednesday whether to maintain emergency lending to Greek banks that are bleeding deposits at an estimated rate of 2 billion euros ($2.27 billion) a week. The state faces some heavy loan repayments in March.
Seemingly determined not to be browbeaten by a chorus of EU ministers intoning that he needed to swallow Greek pride and come back to ask for the extension, Finance Minister Yanis Varoufakis, a left-wing academic economist, voiced confidence that a deal on different terms was within reach within days.
"I have no doubt that, within the next 48 hours Europe, is going to come together and we shall find the phrasing that is necessary so that we can submit it and move on to do the real work that is necessary," Varoufakis told a news conference, warning that the language of ultimatum never worked in Europe.
He cited what he called a "splendid" proposal from the European Commission by which Greece would get four to six months credit in return for a freeze on its anti-austerity policies. He said he had been ready to sign that - but that Dijsselbloem had then presented a different, and "highly problematic", deal.
A draft of what Dijsselbloem proposed, swiftly leaked by furious Greek officials, spoke of Athens extending and abiding by its "current programme" - anathema to a government which, as Varoufakis said, was elected last month to scrap the package.
"MORE LOGIC, LESS IDEOLOGY"
Commission officials denied offering a separate plan and the man Varoufakis said presented it, Economics Commissioner Pierre Moscovici, stuck to the same script as Dijsselbloem.
Greece must extend its bailout on the current conditions, he said, even if that could be couched in language that did not embarrass Prime Minister Alexis Tsipras before his supporters.
"We need more logic and less ideology," Moscovici said as EU officials fretted about how seriously the novice Greek leaders were taking their finances and how far concerns about semantics and saving political face might trump pressing economic needs.
Dijsselbloem, who insisted he was willing to be flexible on terminology that has become highly charged for Greek voters, said further talks would depend on Greece requesting a bailout. Varoufakis and the other ministers will remain in Brussels on Tuesday for a routine meeting on the EU economy.
"The general feeling in the Eurogroup is still that the best way forward would be for the Greek authorities to seek an extension of the programme," Dijsselbloem told a news briefing.
Echoing that, Moscovici insisted there was no "Plan B", a phrase bounced back in his turn by Varoufakis, who invoked the language of high stakes poker: "It's not a bluff," he said.
"It's Plan A. There is no Plan B."
The talks, which had been expected to last late into the night, broke up in less than four hours - less even than a previous meeting last Wednesday after which EU officials voiced concern and astonishment at the Greeks' lack of preparation.
The euro dropped nearly a U.S. cent on word of stalemate, though edge back to $1.1350, about 0.5 percent down on the day.
Both sides showed signs of fraying patience, with several ministers complaining of disappointment and fearing "disaster". Dijsselbloem and Varoufakis spoke of a need to rebuild trust.
Asked what would happen if Greece did not request a bailout extension, Edward Scicluna, the finance minister of the smallest EU state Malta said: "That would be it; it would be a disaster.
"Greece has to adjust, to realise the seriousness of the situation, because time is running out."
Germany, the euro zone's main paymaster and Greece's biggest creditor, stuck to its hard line.
German Finance Minister Wolfgang Schaeuble said before the talks that Greece had lived beyond its means for a long time and there was no appetite in Europe for giving it any more money without guarantees it was getting its finances in order.
MONEY FLEEING
As the meeting in Brussels broke up, a senior Greek banker said Greece's stance boded ill for the markets and the banks.
"It is a very negative development for the economy and the banks. The outflows will continue. We are losing 400-500 million (euros) every day and that means about 2 billion every week. We will have pressure on stocks and bond yields tomorrow," he said.
Varoufakis spelled out in a combative New York Times column Greece's refusal to be treated as a "debt colony" subjected to "the greatest austerity for the most depressed economy", adding: "The lines that we have presented as red will not be crossed."
An opinion poll showed 68 percent of Greeks want a "fair" compromise with euro zone partners while 30 percent said the government should stand tough even if it means reverting to the drachma. The poll found 81 percent want to stay in the euro.
Deposit outflows in Greece have picked up. JP Morgan bank said that at the current pace Greek banks had only 14 weeks before they run out of collateral to obtain funds from the central bank.
The ECB has allowed the Greek central bank to provide emergency lending to the banks, but a failure of the debt talks could mean the imposition of capital controls.
Euro zone member Cyprus was forced to close its banks for two weeks and introduce capital controls during a 2013 crisis. Such controls would need to be imposed when banks are closed. Greek banks are closed next Monday for a holiday.
(Additional reporting by Yann Le Guernigou, Michael Nienaber, Andrew Callus, Jan Strupczewski, Alastair Macdonald, Adrian Croft, Foo Yun Chee, Robin Emmott, Tom Koerkemeier, Julia Fioretti and Francesca Landini; Writing by Jeremy Gaunt, Paul Taylor and Alastair Macdonald; Editing by Paul Taylor, Giles Elgood and Eric Walsh)
"""
lsa_o = LsaOzsoy()
lsa_s = LsaSteinberger()
relevance = RelevanceSummarizer()
textrank = TextRankSummarizer()
print '\n\nLSA Ozsoy:\n'
summary = lsa_o.summarize(txt, length=5)
for sentence in summary:
print sentence
print '\n\nLSA Steinberger:\n'
summary = lsa_s.summarize(txt, length=5)
for sentence in summary:
print sentence
print '\n\nRelevance:\n'
summary = relevance.summarize(txt, length=5)
for sentence in summary:
print sentence
print '\n\nTextRank:\n'
summary = textrank.summarize(txt, length=5)
for sentence in summary:
print sentence | jaijuneja/PyTLDR | example.py | Python | gpl-3.0 | 7,755 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014-2020 OSMC (KodeKarnage)
This file is part of script.module.osmcsetting.updates
SPDX-License-Identifier: GPL-2.0-or-later
See LICENSES/GPL-2.0-or-later for more information.
"""
__all__ = ['osmcupdates', 'service']
| osmc/osmc | package/mediacenter-addon-osmc/src/script.module.osmcsetting.updates/resources/lib/__init__.py | Python | gpl-2.0 | 280 |
import os
# ***********************************
# Settings common to all environments
# ***********************************
# Application settings
APP_NAME = "INcDbUser"
APP_SYSTEM_ERROR_SUBJECT_LINE = APP_NAME + " system error"
# Flask settings
CSRF_ENABLED = True
# Flask-User settings
USER_APP_NAME = APP_NAME
USER_ENABLE_CHANGE_PASSWORD = True # Allow users to change their password
USER_ENABLE_CHANGE_USERNAME = False # Allow users to change their username
USER_ENABLE_CONFIRM_EMAIL = True # Force users to confirm their email
USER_ENABLE_FORGOT_PASSWORD = True # Allow users to reset their passwords
USER_ENABLE_EMAIL = True # Register with Email
USER_ENABLE_REGISTRATION = True # Allow new users to register
USER_ENABLE_RETYPE_PASSWORD = True # Prompt for `retype password` in:
USER_ENABLE_USERNAME = False # Register and Login with username
USER_AFTER_LOGIN_ENDPOINT = 'core.user_page'
USER_AFTER_LOGOUT_ENDPOINT = 'core.home_page'
| UCL-CS35/incdb-user | app/startup/common_settings.py | Python | bsd-2-clause | 953 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0011_auto_20150924_1340'),
]
operations = [
migrations.AlterModelOptions(
name='business',
options={'verbose_name': 'Merchant', 'ordering': ['category', 'name']},
),
migrations.AlterModelOptions(
name='categoryimage',
options={'verbose_name': 'Carousel Category Image', 'ordering': ['category']},
),
migrations.AlterModelOptions(
name='shopcategory',
options={'verbose_name': 'Merchant Category'},
),
]
| multivoxmuse/highlands-square.com | hisquare/home/migrations/0012_auto_20151021_2027.py | Python | gpl-2.0 | 723 |
import pathlib
from ...helpers import article
from .._helpers import _read, register
source = article(
authors=["D.P. Laurie"],
title="Algorithm 584: CUBTRI: Automatic Cubature over a Triangle",
journal="ACM Trans. Math. Softw.",
month="jun",
year="1982",
url="https://doi.org/10.1145/355993.356001",
)
this_dir = pathlib.Path(__file__).resolve().parent
def cubtri():
"""
Se also
https://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tri/quadrature_rules_tri.html
"""
return _read(this_dir / "cubtri.json", source)
register([cubtri])
| nschloe/quadpy | src/quadpy/t2/_cubtri/__init__.py | Python | mit | 594 |
#!/usr/bin/env python
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Interactive shell based on Django:
#
# Copyright (c) 2005, the Lawrence Journal-World
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
CLI interface for cinder management.
"""
from __future__ import print_function
import logging as python_logging
import os
import prettytable
import sys
import time
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import migration
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import timeutils
# Need to register global_opts
from cinder.common import config # noqa
from cinder.common import constants
from cinder import context
from cinder import db
from cinder.db import migration as db_migration
from cinder.db.sqlalchemy import api as db_api
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import rpc
from cinder import version
from cinder.volume import utils as vutils
CONF = cfg.CONF
# Decorators for actions
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
class ShellCommands(object):
def bpython(self):
"""Runs a bpython shell.
Falls back to Ipython/python shell if unavailable
"""
self.run('bpython')
def ipython(self):
"""Runs an Ipython shell.
Falls back to Python shell if unavailable
"""
self.run('ipython')
def python(self):
"""Runs a python shell.
Falls back to Python shell if unavailable
"""
self.run('python')
@args('--shell',
metavar='<bpython|ipython|python>',
help='Python shell')
def run(self, shell=None):
"""Runs a Python interactive interpreter."""
if not shell:
shell = 'bpython'
if shell == 'bpython':
try:
import bpython
bpython.embed()
except ImportError:
shell = 'ipython'
if shell == 'ipython':
try:
from IPython import embed
embed()
except ImportError:
try:
# Ipython < 0.11
# Explicitly pass an empty list as arguments, because
# otherwise IPython would use sys.argv from this script.
import IPython
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
except ImportError:
# no IPython module
shell = 'python'
if shell == 'python':
import code
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try',
# because we already know 'readline' was imported successfully.
import rlcompleter # noqa
readline.parse_and_bind("tab:complete")
code.interact()
@args('--path', required=True, help='Script path')
def script(self, path):
"""Runs the script from the specified path with flags set properly."""
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
def _db_error(caught_exception):
print('%s' % caught_exception)
print(_("The above error may show that the database has not "
"been created.\nPlease create a database using "
"'cinder-manage db sync' before running this command."))
sys.exit(1)
class HostCommands(object):
"""List hosts."""
@args('zone', nargs='?', default=None,
help='Availability Zone (default: %(default)s)')
def list(self, zone=None):
"""Show a list of all physical hosts.
Can be filtered by zone.
args: [zone]
"""
print(_("%(host)-25s\t%(zone)-15s") % {'host': 'host', 'zone': 'zone'})
ctxt = context.get_admin_context()
services = objects.ServiceList.get_all(ctxt)
if zone:
services = [s for s in services if s.availability_zone == zone]
hosts = []
for srv in services:
if not [h for h in hosts if h['host'] == srv['host']]:
hosts.append(srv)
for h in hosts:
print(_("%(host)-25s\t%(availability_zone)-15s")
% {'host': h['host'],
'availability_zone': h['availability_zone']})
class DbCommands(object):
"""Class for managing the database."""
online_migrations = ()
def __init__(self):
pass
@args('version', nargs='?', default=None, type=int,
help='Database version')
def sync(self, version=None):
"""Sync the database up to the most recent version."""
if version is not None and version > db.MAX_INT:
print(_('Version should be less than or equal to '
'%(max_version)d.') % {'max_version': db.MAX_INT})
sys.exit(1)
try:
return db_migration.db_sync(version)
except db_exc.DbMigrationError as ex:
print("Error during database migration: %s" % ex)
sys.exit(1)
def version(self):
"""Print the current database version."""
print(migration.db_version(db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH,
db_migration.INIT_VERSION))
@args('age_in_days', type=int,
help='Purge deleted rows older than age in days')
def purge(self, age_in_days):
"""Purge deleted rows older than a given age from cinder tables."""
age_in_days = int(age_in_days)
if age_in_days <= 0:
print(_("Must supply a positive, non-zero value for age"))
sys.exit(1)
if age_in_days >= (int(time.time()) / 86400):
print(_("Maximum age is count of days since epoch."))
sys.exit(1)
ctxt = context.get_admin_context()
try:
db.purge_deleted_rows(ctxt, age_in_days)
except db_exc.DBReferenceError:
print(_("Purge command failed, check cinder-manage "
"logs for more details."))
sys.exit(1)
def _run_migration(self, ctxt, max_count, ignore_state):
ran = 0
migrations = {}
for migration_meth in self.online_migrations:
count = max_count - ran
try:
found, done = migration_meth(ctxt, count, ignore_state)
except Exception:
print(_("Error attempting to run %(method)s") %
{'method': migration_meth.__name__})
found = done = 0
name = migration_meth.__name__
remaining = found - done
if found:
print(_('%(found)i rows matched query %(meth)s, %(done)i '
'migrated, %(remaining)i remaining') % {'found': found,
'meth': name,
'done': done,
'remaining':
remaining})
migrations.setdefault(name, (0, 0, 0))
migrations[name] = (migrations[name][0] + found,
migrations[name][1] + done,
migrations[name][2] + remaining)
if max_count is not None:
ran += done
if ran >= max_count:
break
return migrations
@args('--max_count', metavar='<number>', dest='max_count', type=int,
help='Maximum number of objects to consider.')
@args('--ignore_state', action='store_true', dest='ignore_state',
help='Force records to migrate even if another operation is '
'performed on them. This may be dangerous, please refer to '
'release notes for more information.')
def online_data_migrations(self, max_count=None, ignore_state=False):
"""Perform online data migrations for the release in batches."""
ctxt = context.get_admin_context()
if max_count is not None:
unlimited = False
if max_count < 1:
print(_('Must supply a positive value for max_number.'))
sys.exit(127)
else:
unlimited = True
max_count = 50
print(_('Running batches of %i until complete.') % max_count)
ran = None
migration_info = {}
while ran is None or ran != 0:
migrations = self._run_migration(ctxt, max_count, ignore_state)
migration_info.update(migrations)
ran = sum([done for found, done, remaining in migrations.values()])
if not unlimited:
break
t = prettytable.PrettyTable([_('Migration'),
_('Found'),
_('Done'),
_('Remaining')])
for name in sorted(migration_info.keys()):
info = migration_info[name]
t.add_row([name, info[0], info[1], info[2]])
print(t)
sys.exit(1 if ran else 0)
class VersionCommands(object):
"""Class for exposing the codebase version."""
def __init__(self):
pass
def list(self):
print(version.version_string())
def __call__(self):
self.list()
class VolumeCommands(object):
"""Methods for dealing with a cloud in an odd state."""
def __init__(self):
self._client = None
def _rpc_client(self):
if self._client is None:
if not rpc.initialized():
rpc.init(CONF)
target = messaging.Target(topic=constants.VOLUME_TOPIC)
serializer = objects.base.CinderObjectSerializer()
self._client = rpc.get_client(target, serializer=serializer)
return self._client
@args('volume_id',
help='Volume ID to be deleted')
def delete(self, volume_id):
"""Delete a volume, bypassing the check that it must be available."""
ctxt = context.get_admin_context()
volume = objects.Volume.get_by_id(ctxt, volume_id)
host = vutils.extract_host(volume.host) if volume.host else None
if not host:
print(_("Volume not yet assigned to host."))
print(_("Deleting volume from database and skipping rpc."))
volume.destroy()
return
if volume.status == 'in-use':
print(_("Volume is in-use."))
print(_("Detach volume from instance and then try again."))
return
cctxt = self._rpc_client().prepare(server=host)
cctxt.cast(ctxt, "delete_volume", volume_id=volume.id, volume=volume)
@args('--currenthost', required=True, help='Existing volume host name')
@args('--newhost', required=True, help='New volume host name')
def update_host(self, currenthost, newhost):
"""Modify the host name associated with a volume.
Particularly to recover from cases where one has moved
their Cinder Volume node, or modified their backend_name in a
multi-backend config.
"""
ctxt = context.get_admin_context()
volumes = db.volume_get_all_by_host(ctxt,
currenthost)
for v in volumes:
db.volume_update(ctxt, v['id'],
{'host': newhost})
class ConfigCommands(object):
"""Class for exposing the flags defined by flag_file(s)."""
def __init__(self):
pass
@args('param', nargs='?', default=None,
help='Configuration parameter to display (default: %(default)s)')
def list(self, param=None):
"""List parameters configured for cinder.
Lists all parameters configured for cinder unless an optional argument
is specified. If the parameter is specified we only print the
requested parameter. If the parameter is not found an appropriate
error is produced by .get*().
"""
param = param and param.strip()
if param:
print('%s = %s' % (param, CONF.get(param)))
else:
for key, value in CONF.items():
print('%s = %s' % (key, value))
class GetLogCommands(object):
"""Get logging information."""
def errors(self):
"""Get all of the errors from the log files."""
error_found = 0
if CONF.log_dir:
logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]
for file in logs:
log_file = os.path.join(CONF.log_dir, file)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print_name = 0
for index, line in enumerate(lines):
if line.find(" ERROR ") > 0:
error_found += 1
if print_name == 0:
print(log_file + ":-")
print_name = 1
print(_("Line %(dis)d : %(line)s") %
{'dis': len(lines) - index, 'line': line})
if error_found == 0:
print(_("No errors in logfiles!"))
@args('num_entries', nargs='?', type=int, default=10,
help='Number of entries to list (default: %(default)d)')
def syslog(self, num_entries=10):
"""Get <num_entries> of the cinder syslog events."""
entries = int(num_entries)
count = 0
log_file = ''
if os.path.exists('/var/log/syslog'):
log_file = '/var/log/syslog'
elif os.path.exists('/var/log/messages'):
log_file = '/var/log/messages'
else:
print(_("Unable to find system log file!"))
sys.exit(1)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print(_("Last %s cinder syslog entries:-") % (entries))
for line in lines:
if line.find("cinder") > 0:
count += 1
print(_("%s") % (line))
if count == entries:
break
if count == 0:
print(_("No cinder entries in syslog!"))
class BackupCommands(object):
"""Methods for managing backups."""
def list(self):
"""List all backups.
List all backups (including ones in progress) and the host
on which the backup operation is running.
"""
ctxt = context.get_admin_context()
backups = objects.BackupList.get_all(ctxt)
hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s"
print(hdr % (_('ID'),
_('User ID'),
_('Project ID'),
_('Host'),
_('Name'),
_('Container'),
_('Status'),
_('Size'),
_('Object Count')))
res = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d\t%-12d"
for backup in backups:
object_count = 0
if backup['object_count'] is not None:
object_count = backup['object_count']
print(res % (backup['id'],
backup['user_id'],
backup['project_id'],
backup['host'],
backup['display_name'],
backup['container'],
backup['status'],
backup['size'],
object_count))
@args('--currenthost', required=True, help='Existing backup host name')
@args('--newhost', required=True, help='New backup host name')
def update_backup_host(self, currenthost, newhost):
"""Modify the host name associated with a backup.
Particularly to recover from cases where one has moved
their Cinder Backup node, and not set backup_use_same_backend.
"""
ctxt = context.get_admin_context()
backups = objects.BackupList.get_all_by_host(ctxt, currenthost)
for bk in backups:
bk.host = newhost
bk.save()
class BaseCommand(object):
@staticmethod
def _normalize_time(time_field):
return time_field and timeutils.normalize_time(time_field)
@staticmethod
def _state_repr(is_up):
return ':-)' if is_up else 'XXX'
class ServiceCommands(BaseCommand):
"""Methods for managing services."""
def list(self):
"""Show a list of all cinder services."""
ctxt = context.get_admin_context()
services = objects.ServiceList.get_all(ctxt)
print_format = "%-16s %-36s %-16s %-10s %-5s %-20s %-12s %-15s %-36s"
print(print_format % (_('Binary'),
_('Host'),
_('Zone'),
_('Status'),
_('State'),
_('Updated At'),
_('RPC Version'),
_('Object Version'),
_('Cluster')))
for svc in services:
art = self._state_repr(svc.is_up)
status = 'disabled' if svc.disabled else 'enabled'
updated_at = self._normalize_time(svc.updated_at)
rpc_version = svc.rpc_current_version
object_version = svc.object_current_version
cluster = svc.cluster_name or ''
print(print_format % (svc.binary, svc.host,
svc.availability_zone, status, art,
updated_at, rpc_version, object_version,
cluster))
@args('binary', type=str,
help='Service to delete from the host.')
@args('host_name', type=str,
help='Host from which to remove the service.')
def remove(self, binary, host_name):
"""Completely removes a service."""
ctxt = context.get_admin_context()
try:
svc = objects.Service.get_by_args(ctxt, host_name, binary)
svc.destroy()
except exception.ServiceNotFound as e:
print(_("Host not found. Failed to remove %(service)s"
" on %(host)s.") %
{'service': binary, 'host': host_name})
print(u"%s" % e.args)
return 2
print(_("Service %(service)s on host %(host)s removed.") %
{'service': binary, 'host': host_name})
class ClusterCommands(BaseCommand):
"""Methods for managing clusters."""
def list(self):
"""Show a list of all cinder services."""
ctxt = context.get_admin_context()
clusters = objects.ClusterList.get_all(ctxt, services_summary=True)
print_format = "%-36s %-16s %-10s %-5s %-20s %-7s %-12s %-20s"
print(print_format % (_('Name'),
_('Binary'),
_('Status'),
_('State'),
_('Heartbeat'),
_('Hosts'),
_('Down Hosts'),
_('Updated At')))
for cluster in clusters:
art = self._state_repr(cluster.is_up)
status = 'disabled' if cluster.disabled else 'enabled'
heartbeat = self._normalize_time(cluster.last_heartbeat)
updated_at = self._normalize_time(cluster.updated_at)
print(print_format % (cluster.name, cluster.binary, status, art,
heartbeat, cluster.num_hosts,
cluster.num_down_hosts, updated_at))
@args('--recursive', action='store_true', default=False,
help='Delete associated hosts.')
@args('binary', type=str,
help='Service to delete from the cluster.')
@args('cluster-name', type=str, help='Cluster to delete.')
def remove(self, recursive, binary, cluster_name):
"""Completely removes a cluster."""
ctxt = context.get_admin_context()
try:
cluster = objects.Cluster.get_by_id(ctxt, None, name=cluster_name,
binary=binary,
get_services=recursive)
except exception.ClusterNotFound:
print(_("Couldn't remove cluster %s because it doesn't exist.") %
cluster_name)
return 2
if recursive:
for service in cluster.services:
service.destroy()
try:
cluster.destroy()
except exception.ClusterHasHosts:
print(_("Couldn't remove cluster %s because it still has hosts.") %
cluster_name)
return 2
msg = _('Cluster %s successfully removed.') % cluster_name
if recursive:
msg = (_('%(msg)s And %(num)s services from the cluster were also '
'removed.') % {'msg': msg, 'num': len(cluster.services)})
print(msg)
@args('--full-rename', dest='partial',
action='store_false', default=True,
help='Do full cluster rename instead of just replacing provided '
'current cluster name and preserving backend and/or pool info.')
@args('current', help='Current cluster name.')
@args('new', help='New cluster name.')
def rename(self, partial, current, new):
"""Rename cluster name for Volumes and Consistency Groups.
Useful when you want to rename a cluster, particularly when the
backend_name has been modified in a multi-backend config or we have
moved from a single backend to multi-backend.
"""
ctxt = context.get_admin_context()
# Convert empty strings to None
current = current or None
new = new or None
# Update Volumes
num_vols = objects.VolumeList.include_in_cluster(
ctxt, new, partial_rename=partial, cluster_name=current)
# Update Consistency Groups
num_cgs = objects.ConsistencyGroupList.include_in_cluster(
ctxt, new, partial_rename=partial, cluster_name=current)
if num_vols or num_cgs:
msg = _('Successfully renamed %(num_vols)s volumes and '
'%(num_cgs)s consistency groups from cluster %(current)s '
'to %(new)s')
print(msg % {'num_vols': num_vols, 'num_cgs': num_cgs, 'new': new,
'current': current})
else:
msg = _('No volumes or consistency groups exist in cluster '
'%(current)s.')
print(msg % {'current': current})
return 2
class ConsistencyGroupCommands(object):
"""Methods for managing consistency groups."""
@args('--currenthost', required=True, help='Existing CG host name')
@args('--newhost', required=True, help='New CG host name')
def update_cg_host(self, currenthost, newhost):
"""Modify the host name associated with a Consistency Group.
Particularly to recover from cases where one has moved
a host from single backend to multi-backend, or changed the host
configuration option, or modified the backend_name in a multi-backend
config.
"""
ctxt = context.get_admin_context()
groups = objects.ConsistencyGroupList.get_all(
ctxt, {'host': currenthost})
for gr in groups:
gr.host = newhost
gr.save()
CATEGORIES = {
'backup': BackupCommands,
'config': ConfigCommands,
'cluster': ClusterCommands,
'cg': ConsistencyGroupCommands,
'db': DbCommands,
'host': HostCommands,
'logs': GetLogCommands,
'service': ServiceCommands,
'shell': ShellCommands,
'version': VersionCommands,
'volume': VolumeCommands,
}
def methods_of(obj):
"""Return non-private methods from an object.
Get all callable methods of an object that don't start with underscore
:return: a list of tuples of the form (method_name, method)
"""
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result
def add_command_parsers(subparsers):
for category in sorted(CATEGORIES):
command_object = CATEGORIES[category]()
parser = subparsers.add_parser(category)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn) in methods_of(command_object):
parser = category_subparsers.add_parser(action)
action_kwargs = []
for args, kwargs in getattr(action_fn, 'args', []):
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
handler=add_command_parsers)
def get_arg_string(args):
if args[0] == '-':
# (Note)zhiteng: args starts with FLAGS.oparser.prefix_chars
# is optional args. Notice that cfg module takes care of
# actual ArgParser so prefix_chars is always '-'.
if args[1] == '-':
# This is long optional arg
args = args[2:]
else:
args = args[1:]
# We convert dashes to underscores so we can have cleaner optional arg
# names
if args:
args = args.replace('-', '_')
return args
def fetch_func_args(func):
fn_kwargs = {}
for args, kwargs in getattr(func, 'args', []):
# Argparser `dest` configuration option takes precedence for the name
arg = kwargs.get('dest') or get_arg_string(args[0])
fn_kwargs[arg] = getattr(CONF.category, arg)
return fn_kwargs
def main():
objects.register_all()
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opt(category_opt)
script_name = sys.argv[0]
if len(sys.argv) < 2:
print(_("\nOpenStack Cinder version: %(version)s\n") %
{'version': version.version_string()})
print(script_name + " category action [<args>]")
print(_("Available categories:"))
for category in CATEGORIES:
print(_("\t%s") % category)
sys.exit(2)
try:
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
python_logging.captureWarnings(True)
except cfg.ConfigDirNotFoundError as details:
print(_("Invalid directory: %s") % details)
sys.exit(2)
except cfg.ConfigFilesNotFoundError as e:
cfg_files = e.config_files
print(_("Failed to read configuration file(s): %s") % cfg_files)
sys.exit(2)
fn = CONF.category.action_fn
fn_kwargs = fetch_func_args(fn)
fn(**fn_kwargs)
| eharney/cinder | cinder/cmd/manage.py | Python | apache-2.0 | 29,824 |
from __future__ import unicode_literals
import os
import os.path
from cached_property import cached_property
import pre_commit.constants as C
from pre_commit import git
from pre_commit.clientlib.validate_config import load_config
from pre_commit.repository import Repository
from pre_commit.store import Store
class Runner(object):
"""A `Runner` represents the execution context of the hooks. Notably the
repository under test.
"""
def __init__(self, git_root):
self.git_root = git_root
@classmethod
def create(cls):
"""Creates a PreCommitRunner by doing the following:
- Finds the root of the current git repository
- chdirs to that directory
"""
root = git.get_root()
os.chdir(root)
return cls(root)
@cached_property
def config_file_path(self):
return os.path.join(self.git_root, C.CONFIG_FILE)
@cached_property
def repositories(self):
"""Returns a tuple of the configured repositories."""
config = load_config(self.config_file_path)
repositories = tuple(Repository.create(x, self.store) for x in config)
for repository in repositories:
repository.require_installed()
return repositories
def get_hook_path(self, hook_type):
return os.path.join(self.git_root, '.git', 'hooks', hook_type)
@cached_property
def pre_commit_path(self):
return self.get_hook_path('pre-commit')
@cached_property
def pre_push_path(self):
return self.get_hook_path('pre-push')
@cached_property
def cmd_runner(self):
# TODO: remove this and inline runner.store.cmd_runner
return self.store.cmd_runner
@cached_property
def store(self):
return Store()
| Teino1978-Corp/pre-commit | pre_commit/runner.py | Python | mit | 1,796 |
from __future__ import absolute_import, division, print_function, with_statement, unicode_literals
import catnap
import yaml
def parse_yaml(f):
"""Parses a YAML-based test file"""
return catnap.Test.parse(yaml.load(f))
| dailymuse/catnap | catnap/yaml_parser.py | Python | bsd-3-clause | 229 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from collections import OrderedDict
def list_database_output(result):
table = []
for item in result:
table.append(database_output(item))
return table
def database_output(result):
result = OrderedDict([('Database Id', result['id']),
('_colls', result['_colls']),
('_etag', result['_etag']),
('_rid', result['_rid']),
('_self', result['_self']),
('_ts', result['_ts']),
('_users', result['_users'])])
return result
def list_collection_output(result):
table = []
for item in result:
table.append(collection_output_helper(item))
return table
def collection_output(result):
return collection_output_helper(result['collection'])
def collection_output_helper(result):
result = OrderedDict([('Collection Id', result['id']),
('_conflicts', result['_conflicts']),
('_docs', result['_docs']),
('_etag', result['_etag']),
('_rid', result['_rid']),
('_self', result['_self']),
('_sprocs', result['_sprocs']),
('_triggers', result['_triggers']),
('_ts', result['_ts'])])
return result
def list_connection_strings_output(result):
table = []
for item in result['connectionStrings']:
table.append(item)
return table
| yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/cosmosdb/_format.py | Python | mit | 1,901 |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
""" This module contains helper functions """
import re
try:
from html import escape as escape_html # noqa: F401
except ImportError:
from cgi import escape as escape_html # noqa: F401
def escape_markdown(text):
"""Helper function to escape telegram markup symbols"""
escape_chars = '\*_`\['
return re.sub(r'([%s])' % escape_chars, r'\\\1', text)
| thonkify/thonkify | src/lib/telegram/utils/helpers.py | Python | mit | 1,181 |
# -*- coding: utf-8 -*-
#
# cross_check_mip_corrdet.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Auto- and crosscorrelation functions for spike trains
-----------------------------------------------------------
A time bin of size tbin is centered around the time difference it
represents. If the correlation function is calculated for tau in
[-tau_max, tau_max], the pair events contributing to the left-most
bin are those for which tau in [-tau_max-tbin/2, tau_max+tbin/2) and
so on.
Correlate two spike trains with each other assumes spike times to be ordered in
time. tau > 0 means spike2 is later than spike1
tau_max: maximum time lag in ms correlation function
tbin: bin size
spike1: first spike train [tspike...]
spike2: second spike train [tspike...]
References
~~~~~~~~~~~~
See Also
~~~~~~~~~~
:Authors:
KEYWORDS:
"""
import nest
from matplotlib.pylab import *
def corr_spikes_sorted(spike1, spike2, tbin, tau_max, h):
tau_max_i = int(tau_max / h)
tbin_i = int(tbin / h)
cross = zeros(int(2 * tau_max_i / tbin_i + 1), 'd')
j0 = 0
for spki in spike1:
j = j0
while j < len(spike2) and spike2[j] - spki < -tau_max_i - tbin_i / 2.0:
j += 1
j0 = j
while j < len(spike2) and spike2[j] - spki < tau_max_i + tbin_i / 2.0:
cross[int(
(spike2[j] - spki + tau_max_i + 0.5 * tbin_i) / tbin_i)] += 1.0
j += 1
return cross
nest.ResetKernel()
h = 0.1 # Computation step size in ms
T = 100000.0 # Total duration
delta_tau = 10.0
tau_max = 100.0
pc = 0.5
nu = 100.0
# grng_seed is 0 because test data was produced for seed = 0
nest.SetKernelStatus({'local_num_threads': 1, 'resolution': h,
'overwrite_files': True, 'grng_seed': 0})
# Set up network, connect and simulate
mg = nest.Create('mip_generator')
nest.SetStatus(mg, {'rate': nu, 'p_copy': pc})
cd = nest.Create('correlation_detector')
nest.SetStatus(cd, {'tau_max': tau_max, 'delta_tau': delta_tau})
sd = nest.Create('spike_detector')
nest.SetStatus(sd, {'withtime': True,
'withgid': True, 'time_in_steps': True})
pn1 = nest.Create('parrot_neuron')
pn2 = nest.Create('parrot_neuron')
nest.Connect(mg, pn1)
nest.Connect(mg, pn2)
nest.Connect(pn1, sd)
nest.Connect(pn2, sd)
nest.SetDefaults('static_synapse', {'weight': 1.0, 'receptor_type': 0})
nest.Connect(pn1, cd)
nest.SetDefaults('static_synapse', {'weight': 1.0, 'receptor_type': 1})
nest.Connect(pn2, cd)
nest.Simulate(T)
n_events = nest.GetStatus(cd)[0]['n_events']
n1 = n_events[0]
n2 = n_events[1]
lmbd1 = (n1 / (T - tau_max)) * 1000.0
lmbd2 = (n2 / (T - tau_max)) * 1000.0
h = 0.1
tau_max = 100.0 # ms correlation window
t_bin = 10.0 # ms bin size
spikes = nest.GetStatus(sd)[0]['events']['senders']
sp1 = find(spikes[:] == 4)
sp2 = find(spikes[:] == 5)
# Find crosscorrolation
cross = corr_spikes_sorted(sp1, sp2, t_bin, tau_max, h)
print("Crosscorrelation:")
print(cross)
print("Sum of crosscorrelation:")
print(sum(cross))
| terhorstd/nest-simulator | pynest/examples/cross_check_mip_corrdet.py | Python | gpl-2.0 | 3,691 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.internal.actions import page_action
from telemetry.page import action_runner as action_runner_module
from telemetry.unittest_util import tab_test_case
class PinchActionTest(tab_test_case.TabTestCase):
def setUp(self):
super(PinchActionTest, self).setUp()
def testPinchByApiCalledWithCorrectArguments(self):
self.Navigate('blank.html')
if not page_action.IsGestureSourceTypeSupported(self._tab, 'touch'):
return
action_runner = action_runner_module.ActionRunner(self._tab)
action_runner.ExecuteJavaScript('''
chrome.gpuBenchmarking.pinchBy = function(
scaleFactor, anchorLeft, anchorTop, callback, speed) {
window.__test_scaleFactor = scaleFactor;
window.__test_anchorLeft = anchorLeft;
window.__test_anchorTop = anchorTop;
window.__test_callback = callback;
window.__test_speed = speed;
window.__pinchActionDone = true;
};''')
action_runner.PinchPage(scale_factor=2)
self.assertEqual(
2, action_runner.EvaluateJavaScript('window.__test_scaleFactor'))
self.assertTrue(
action_runner.EvaluateJavaScript('!isNaN(window.__test_anchorLeft)'))
self.assertTrue(
action_runner.EvaluateJavaScript('!isNaN(window.__test_anchorTop)'))
self.assertTrue(
action_runner.EvaluateJavaScript('!!window.__test_callback'))
self.assertEqual(
800, action_runner.EvaluateJavaScript('window.__test_speed'))
| SaschaMester/delicium | tools/telemetry/telemetry/internal/actions/pinch_unittest.py | Python | bsd-3-clause | 1,647 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy import signals
import json
import codecs
import MySQLdb
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
class MessagePipeline(object):
def process_item(self, item, spider):
return item
class hit_spiderPipline(object):
def __init__(self):
#入库
#try:
# self.db = MySQLdb.connect(host="127.0.0.1", user="root", passwd="lin952787655", port=3306, db="message", charset="utf8")
#self.cursor = self.db.cursor()
#self.cursor.execute("DROP TABLE IF EXITS textdata")
#print "Connect to db successfully!"
#except:
#print "Fail to connect to db!"
#文件
self.file = codecs.open('new_message.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
#入库
#文件
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.db.commit()
self.db.close
self.file.close()
from twisted.enterprise import adbapi
import MySQLdb
import MySQLdb.cursors
class MySQLStorePipeline(object):
def __init__(self):
dbargs = dict(
host = '127.0.0.1',
db = 'hitnew',
user = 'root',
passwd = 'lin952787655',
#cursorclass = MySQLdb.cursors.DictCursor,
charset = 'utf8',
use_unicode = True
)
self.dbpool = adbapi.ConnectionPool('MySQLdb',**dbargs)
def process_item(self, item,spider):
res = self.dbpool.runInteraction(self.insert_into_table,item)
return item
def insert_into_table(self,conn,item):
conn.execute('insert into news(title, url, new) values(%s,%s,%s)', (item['title'],item['url_next'],item['text']))
| hitlinxiang/work | message/message/pipelines.py | Python | mit | 2,048 |
from django.contrib import admin
from alimentos.models import *
admin.site.register(Food)
| mricharleon/HatosGanaderos | alimentos/admin.py | Python | gpl-2.0 | 91 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.