code
stringlengths
3
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
3
1.05M
# -*- coding: utf-8 -*- import sys sys.path.append('../browser_interface/browser') class BrowserFactory(object): def create(self, type, *args, **kwargs): return getattr(__import__(type), type)(*args, **kwargs)
xtuyaowu/jtyd_python_spider
browser_interface/browser/BrowserFactory.py
Python
mit
225
# Software License Agreement (BSD License) # # Copyright (c) 2008, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Author: Jon Binney ''' Functions for working with PointCloud2. ''' __docformat__ = "restructuredtext en" import numpy as np from sensor_msgs.msg import PointCloud2, PointField # prefix to the names of dummy fields we add to get byte alignment correct. this needs to not # clash with any actual field names DUMMY_FIELD_PREFIX = '__' # mappings between PointField types and numpy types type_mappings = [(PointField.INT8, np.dtype('int8')), (PointField.UINT8, np.dtype('uint8')), (PointField.INT16, np.dtype('int16')), (PointField.UINT16, np.dtype('uint16')), (PointField.INT32, np.dtype('int32')), (PointField.UINT32, np.dtype('uint32')), (PointField.FLOAT32, np.dtype('float32')), (PointField.FLOAT64, np.dtype('float64'))] pftype_to_nptype = dict(type_mappings) nptype_to_pftype = dict((nptype, pftype) for pftype, nptype in type_mappings) # sizes (in bytes) of PointField types pftype_sizes = {PointField.INT8: 1, PointField.UINT8: 1, PointField.INT16: 2, PointField.UINT16: 2, PointField.INT32: 4, PointField.UINT32: 4, PointField.FLOAT32: 4, PointField.FLOAT64: 8} def pointcloud2_to_dtype(cloud_msg): '''Convert a list of PointFields to a numpy record datatype. ''' offset = 0 np_dtype_list = [] for f in cloud_msg.fields: while offset < f.offset: # might be extra padding between fields np_dtype_list.append(('%s%d' % (DUMMY_FIELD_PREFIX, offset), np.uint8)) offset += 1 np_dtype_list.append((f.name, pftype_to_nptype[f.datatype])) offset += pftype_sizes[f.datatype] # might be extra padding between points while offset < cloud_msg.point_step: np_dtype_list.append(('%s%d' % (DUMMY_FIELD_PREFIX, offset), np.uint8)) offset += 1 return np_dtype_list def arr_to_fields(cloud_arr): '''Convert a numpy record datatype into a list of PointFields. ''' fields = [] for field_name in cloud_arr.dtype.names: np_field_type, field_offset = cloud_arr.dtype.fields[field_name] pf = PointField() pf.name = field_name pf.datatype = nptype_to_pftype[np_field_type] pf.offset = field_offset pf.count = 1 # is this ever more than one? fields.append(pf) return fields def pointcloud2_to_array(cloud_msg, split_rgb=False): ''' Converts a rospy PointCloud2 message to a numpy recordarray Reshapes the returned array to have shape (height, width), even if the height is 1. The reason for using np.fromstring rather than struct.unpack is speed... especially for large point clouds, this will be <much> faster. ''' # construct a numpy record type equivalent to the point type of this cloud dtype_list = pointcloud2_to_dtype(cloud_msg) # parse the cloud into an array cloud_arr = np.frombuffer(cloud_msg.data, dtype_list) # cloud_arr = np.fromstring(cloud_msg.data, dtype_list) # remove the dummy fields that were added cloud_arr = cloud_arr[ [fname for fname, _type in dtype_list if not (fname[:len(DUMMY_FIELD_PREFIX)] == DUMMY_FIELD_PREFIX)]] if split_rgb: cloud_arr = split_rgb_field(cloud_arr) return np.reshape(cloud_arr, (cloud_msg.height, cloud_msg.width)) def array_to_pointcloud2(cloud_arr, stamp=None, frame_id=None, seq=None, merge_rgb=False): '''Converts a numpy record array to a sensor_msgs.msg.PointCloud2. ''' if merge_rgb: cloud_arr = merge_rgb_fields(cloud_arr) # make it 2d (even if height will be 1) cloud_arr = np.atleast_2d(cloud_arr) cloud_msg = PointCloud2() if stamp is not None: cloud_msg.header.stamp = stamp if frame_id is not None: cloud_msg.header.frame_id = frame_id if seq is not None: cloud_msg.header.seq = seq cloud_msg.height = cloud_arr.shape[0] cloud_msg.width = cloud_arr.shape[1] cloud_msg.fields = arr_to_fields(cloud_arr) cloud_msg.is_bigendian = False # assumption cloud_msg.point_step = cloud_arr.dtype.itemsize cloud_msg.row_step = cloud_msg.point_step*cloud_arr.shape[1] cloud_msg.is_dense = all([np.isfinite(cloud_arr[fname]).all() for fname in cloud_arr.dtype.names]) cloud_msg.data = cloud_arr.tostring() return cloud_msg def merge_rgb_fields(cloud_arr): '''Takes an array with named np.uint8 fields 'r', 'g', and 'b', and returns an array in which they have been merged into a single np.float32 'rgb' field. The first byte of this field is the 'r' uint8, the second is the 'g', uint8, and the third is the 'b' uint8. This is the way that pcl likes to handle RGB colors for some reason. ''' r = np.asarray(cloud_arr['r'], dtype=np.uint32) g = np.asarray(cloud_arr['g'], dtype=np.uint32) b = np.asarray(cloud_arr['b'], dtype=np.uint32) rgb_arr = np.array((r << 16) | (g << 8) | (b << 0), dtype=np.uint32) # not sure if there is a better way to do this. i'm changing the type of the array # from uint32 to float32, but i don't want any conversion to take place -jdb rgb_arr.dtype = np.float32 # create a new array, without r, g, and b, but with rgb float32 field new_dtype = [] for field_name in cloud_arr.dtype.names: field_type, field_offset = cloud_arr.dtype.fields[field_name] if field_name not in ('r', 'g', 'b'): new_dtype.append((field_name, field_type)) new_dtype.append(('rgb', np.float32)) new_cloud_arr = np.zeros(cloud_arr.shape, new_dtype) # fill in the new array for field_name in new_cloud_arr.dtype.names: if field_name == 'rgb': new_cloud_arr[field_name] = rgb_arr else: new_cloud_arr[field_name] = cloud_arr[field_name] return new_cloud_arr def split_rgb_field(cloud_arr): '''Takes an array with a named 'rgb' float32 field, and returns an array in which this has been split into 3 uint 8 fields: 'r', 'g', and 'b'. (pcl stores rgb in packed 32 bit floats) ''' rgb_arr = cloud_arr['rgb'].copy() rgb_arr.dtype = np.uint32 r = np.asarray((rgb_arr >> 16) & 255, dtype=np.uint8) g = np.asarray((rgb_arr >> 8) & 255, dtype=np.uint8) b = np.asarray(rgb_arr & 255, dtype=np.uint8) # create a new array, without rgb, but with r, g, and b fields new_dtype = [] for field_name in cloud_arr.dtype.names: field_type, field_offset = cloud_arr.dtype.fields[field_name] if not field_name == 'rgb': new_dtype.append((field_name, field_type)) new_dtype.append(('r', np.uint8)) new_dtype.append(('g', np.uint8)) new_dtype.append(('b', np.uint8)) new_cloud_arr = np.zeros(cloud_arr.shape, new_dtype) # fill in the new array for field_name in new_cloud_arr.dtype.names: if field_name == 'r': new_cloud_arr[field_name] = r elif field_name == 'g': new_cloud_arr[field_name] = g elif field_name == 'b': new_cloud_arr[field_name] = b else: new_cloud_arr[field_name] = cloud_arr[field_name] return new_cloud_arr def get_xyz_points(cloud_array, remove_nans=True, dtype=np.float): '''Pulls out x, y, and z columns from the cloud recordarray, and returns a 3xN matrix. ''' # remove crap points if remove_nans: mask = np.isfinite(cloud_array['x']) & np.isfinite(cloud_array['y']) & np.isfinite(cloud_array['z']) cloud_array = cloud_array[mask] # pull out x, y, and z values points = np.zeros(list(cloud_array.shape) + [3], dtype=dtype) points[...,0] = cloud_array['x'] points[...,1] = cloud_array['y'] points[...,2] = cloud_array['z'] return points def pointcloud2_to_xyz_array(cloud_msg, remove_nans=True): return get_xyz_points(pointcloud2_to_array(cloud_msg), remove_nans=remove_nans)
colincsl/pyKinectTools
pyKinectTools/utils/pointcloud_conversions.py
Python
bsd-2-clause
9,466
#!/usr/bin/env python import json, sys, time def isint(x): try: int(x) return True except: return False if len(sys.argv) > 2 and isint(sys.argv[1]) and isint(sys.argv[2]): sys.argv.pop(1) count = int(sys.argv[1]) for n in sys.argv[2:]: print '%s:' % n start = time.time() fp = open(n) for i in xrange(count): fp.seek(0) val = json.load(fp) end = time.time() print ' %d good, %gs' % (count, end - start)
tolysz/prepare-ghcjs
spec-lts8/aeson/benchmarks/parse.py
Python
bsd-3-clause
485
import os import platform from twisted.internet import defer from .. import data, helper from p2pool.util import pack P2P_PREFIX = '7c1f9184'.decode('hex') P2P_PORT = 9335 ADDRESS_VERSION = 0 RPC_PORT = 9334 RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue( 'hawaiicoinaddress' in (yield bitcoind.rpc_help()) and not (yield bitcoind.rpc_getinfo())['testnet'] )) SUBSIDY_FUNC = lambda height: 500*100000000 >> (height + 1)//500000 POW_FUNC = lambda data: pack.IntType(256).unpack(__import__('ltc_scrypt').getPoWHash(data)) BLOCK_PERIOD = 50 # s SYMBOL = 'HIC' CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'Hawaiicoin') if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/Hawaiicoin/') if platform.system() == 'Darwin' else os.path.expanduser('~/.hawaiicoin'), 'hawaiicoin.conf') BLOCK_EXPLORER_URL_PREFIX = 'http://pool.privanon.com:8080/block/' ADDRESS_EXPLORER_URL_PREFIX = 'http://pool.privanon.com:8080/address/' TX_EXPLORER_URL_PREFIX = 'http://pool.privanon.com:8080/tx/' SANE_TARGET_RANGE = (2**256//1000000000 - 1, 2**256//1000 - 1) DUMB_SCRYPT_DIFF = 2**16 DUST_THRESHOLD = 0.03e8
CohibAA/p2pool-doge1-8
p2pool/bitcoin/networks/hawaiicoin.py
Python
gpl-3.0
1,221
from horizon import views from horizon import tables from openstack_dashboard.dashboards.tasks.completed.tables import CompletedTasksTable from openstack_dashboard.api.salt_database_api import get_all_records,task_body_harp,get_all_records_mod # # from django.utils.translation import ugettext_lazy as _ from horizon.utils import memoized # # from django.core.urlresolvers import reverse from horizon import exceptions class IndexView(tables.DataTableView): # A very simple class-based view... template_name = 'tasks/completed/index.html' table_class = CompletedTasksTable def has_more_data(self, table): return self._more def get_data(self): # Add data to the context here... completed_jobs_list = [] self._more = False marker = self.request.GET.get(CompletedTasksTable._meta.pagination_param, None) try: completed_jobs_list , self._more = get_all_records_mod(self.request,paginate=True,marker=marker) except exceptions.NotAvailable: exceptions.handle(self.request,_('Database connection is not available')) return completed_jobs_list class DetailView(views.HorizonTemplateView): template_name = 'tasks/completed/detail.html' page_title = _("Task Details: {{ task.id }}") def get_context_data(self, **kwargs): context = super(DetailView, self).get_context_data(**kwargs) task = self.get_data() table = CompletedTasksTable(self.request) context["task"] = task return context @memoized.memoized_method def get_data(self): try: task_id = self.kwargs['task_id'] task = task_body_harp(job_id=task_id) except Exception: redirect = self.get_redirect_url() exceptions.handle(self.request, _('Unable to retrieve task details.'), redirect=redirect) return task def get_redirect_url(self): return reverse('horizon:tasks:completed:index')
icloudrnd/automation_tools
openstack_dashboard/dashboards/tasks/completed/views.py
Python
apache-2.0
2,082
from selectable.base import ModelLookup from selectable.registry import registry from timepiece.crm.models import Project from timepiece.entries.models import Activity class ActivityLookup(ModelLookup): model = Activity search_fields = ('name__icontains', ) def get_query(self, request, term): results = super(ActivityLookup, self).get_query(request, term) project_pk = request.GET.get('project', None) if project_pk not in [None, '']: project = Project.objects.get(pk=project_pk) if project and project.activity_group: return project.activity_group.activities.all().filter(name__icontains=term) return results def get_item_label(self, item): return u"%s" % (item.name) registry.register(ActivityLookup)
caktus/django-timepiece
timepiece/entries/lookups.py
Python
mit
807
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.compat.six import iteritems, string_types from ansible.errors import AnsibleError, AnsibleParserError from ansible.parsing.mod_args import ModuleArgsParser from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping, AnsibleUnicode from ansible.plugins import lookup_loader from ansible.playbook.attribute import FieldAttribute from ansible.playbook.base import Base from ansible.playbook.become import Become from ansible.playbook.block import Block from ansible.playbook.conditional import Conditional from ansible.playbook.loop_control import LoopControl from ansible.playbook.role import Role from ansible.playbook.taggable import Taggable from ansible.utils.unicode import to_str try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() __all__ = ['Task'] class Task(Base, Conditional, Taggable, Become): """ A task is a language feature that represents a call to a module, with given arguments and other parameters. A handler is a subclass of a task. Usage: Task.load(datastructure) -> Task Task.something(...) """ # ================================================================================= # ATTRIBUTES # load_<attribute_name> and # validate_<attribute_name> # will be used if defined # might be possible to define others _args = FieldAttribute(isa='dict', default=dict()) _action = FieldAttribute(isa='string') _any_errors_fatal = FieldAttribute(isa='bool') _async = FieldAttribute(isa='int', default=0) _changed_when = FieldAttribute(isa='list', default=[]) _delay = FieldAttribute(isa='int', default=5) _delegate_to = FieldAttribute(isa='string') _delegate_facts = FieldAttribute(isa='bool', default=False) _failed_when = FieldAttribute(isa='list', default=[]) _first_available_file = FieldAttribute(isa='list') _loop = FieldAttribute(isa='string', private=True) _loop_args = FieldAttribute(isa='list', private=True) _loop_control = FieldAttribute(isa='class', class_type=LoopControl) _name = FieldAttribute(isa='string', default='') _notify = FieldAttribute(isa='list') _poll = FieldAttribute(isa='int') _register = FieldAttribute(isa='string') _retries = FieldAttribute(isa='int') _until = FieldAttribute(isa='list', default=[]) def __init__(self, block=None, role=None, task_include=None): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' self._block = block self._role = role self._task_include = task_include super(Task, self).__init__() def get_path(self): ''' return the absolute path of the task with its line number ''' if hasattr(self, '_ds'): return "%s:%s" % (self._ds._data_source, self._ds._line_number) def get_name(self): ''' return the name of the task ''' if self._role and self.name: return "%s : %s" % (self._role.get_name(), self.name) elif self.name: return self.name else: if self._role: return "%s : %s" % (self._role.get_name(), self.action) else: return "%s" % (self.action,) def _merge_kv(self, ds): if ds is None: return "" elif isinstance(ds, string_types): return ds elif isinstance(ds, dict): buf = "" for (k,v) in iteritems(ds): if k.startswith('_'): continue buf = buf + "%s=%s " % (k,v) buf = buf.strip() return buf @staticmethod def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None): t = Task(block=block, role=role, task_include=task_include) return t.load_data(data, variable_manager=variable_manager, loader=loader) def __repr__(self): ''' returns a human readable representation of the task ''' if self.get_name() == 'meta': return "TASK: meta (%s)" % self.args['_raw_params'] else: return "TASK: %s" % self.get_name() def _preprocess_loop(self, ds, new_ds, k, v): ''' take a lookup plugin name and store it correctly ''' loop_name = k.replace("with_", "") if new_ds.get('loop') is not None: raise AnsibleError("duplicate loop in task: %s" % loop_name, obj=ds) if v is None: raise AnsibleError("you must specify a value when using %s" % k, obj=ds) new_ds['loop'] = loop_name new_ds['loop_args'] = v def preprocess_data(self, ds): ''' tasks are especially complex arguments so need pre-processing. keep it short. ''' assert isinstance(ds, dict) # the new, cleaned datastructure, which will have legacy # items reduced to a standard structure suitable for the # attributes of the task class new_ds = AnsibleMapping() if isinstance(ds, AnsibleBaseYAMLObject): new_ds.ansible_pos = ds.ansible_pos # use the args parsing class to determine the action, args, # and the delegate_to value from the various possible forms # supported as legacy args_parser = ModuleArgsParser(task_ds=ds) try: (action, args, delegate_to) = args_parser.parse() except AnsibleParserError as e: raise AnsibleParserError(to_str(e), obj=ds) # the command/shell/script modules used to support the `cmd` arg, # which corresponds to what we now call _raw_params, so move that # value over to _raw_params (assuming it is empty) if action in ('command', 'shell', 'script'): if 'cmd' in args: if args.get('_raw_params', '') != '': raise AnsibleError("The 'cmd' argument cannot be used when other raw parameters are specified." " Please put everything in one or the other place.", obj=ds) args['_raw_params'] = args.pop('cmd') new_ds['action'] = action new_ds['args'] = args new_ds['delegate_to'] = delegate_to # we handle any 'vars' specified in the ds here, as we may # be adding things to them below (special handling for includes). # When that deprecated feature is removed, this can be too. if 'vars' in ds: # _load_vars is defined in Base, and is used to load a dictionary # or list of dictionaries in a standard way new_ds['vars'] = self._load_vars(None, ds.pop('vars')) else: new_ds['vars'] = dict() for (k,v) in iteritems(ds): if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell': # we don't want to re-assign these values, which were # determined by the ModuleArgsParser() above continue elif k.replace("with_", "") in lookup_loader: self._preprocess_loop(ds, new_ds, k, v) else: # pre-2.0 syntax allowed variables for include statements at the # top level of the task, so we move those into the 'vars' dictionary # here, and show a deprecation message as we will remove this at # some point in the future. if action == 'include' and k not in self._get_base_attributes() and k not in self.DEPRECATED_ATTRIBUTES: display.deprecated("Specifying include variables at the top-level of the task is deprecated." " Please see:\nhttp://docs.ansible.com/ansible/playbooks_roles.html#task-include-files-and-encouraging-reuse\n\n" " for currently supported syntax regarding included files and variables") new_ds['vars'][k] = v else: new_ds[k] = v return super(Task, self).preprocess_data(new_ds) def _load_loop_control(self, attr, ds): if not isinstance(ds, dict): raise AnsibleParserError( "the `loop_control` value must be specified as a dictionary and cannot " \ "be a variable itself (though it can contain variables)", obj=ds, ) return LoopControl.load(data=ds, variable_manager=self._variable_manager, loader=self._loader) def post_validate(self, templar): ''' Override of base class post_validate, to also do final validation on the block and task include (if any) to which this task belongs. ''' if self._block: self._block.post_validate(templar) if self._task_include: self._task_include.post_validate(templar) super(Task, self).post_validate(templar) def _post_validate_register(self, attr, value, templar): ''' Override post validation for the register args field, which is not supposed to be templated ''' return value def _post_validate_loop_args(self, attr, value, templar): ''' Override post validation for the loop args field, which is templated specially in the TaskExecutor class when evaluating loops. ''' return value def _post_validate_environment(self, attr, value, templar): ''' Override post validation of vars on the play, as we don't want to template these too early. ''' if value is None: return dict() elif isinstance(value, list): if len(value) == 1: return templar.template(value[0], convert_bare=True) else: env = [] for env_item in value: if isinstance(env_item, (string_types, AnsibleUnicode)) and env_item in templar._available_variables.keys(): env[env_item] = templar.template(env_item, convert_bare=True) elif isinstance(value, dict): env = dict() for env_item in value: if isinstance(env_item, (string_types, AnsibleUnicode)) and env_item in templar._available_variables.keys(): env[env_item] = templar.template(value[env_item], convert_bare=True) # at this point it should be a simple string return templar.template(value, convert_bare=True) def _post_validate_changed_when(self, attr, value, templar): ''' changed_when is evaluated after the execution of the task is complete, and should not be templated during the regular post_validate step. ''' return value def _post_validate_failed_when(self, attr, value, templar): ''' failed_when is evaluated after the execution of the task is complete, and should not be templated during the regular post_validate step. ''' return value def _post_validate_until(self, attr, value, templar): ''' until is evaluated after the execution of the task is complete, and should not be templated during the regular post_validate step. ''' return value def get_vars(self): all_vars = dict() if self._block: all_vars.update(self._block.get_vars()) if self._task_include: all_vars.update(self._task_include.get_vars()) all_vars.update(self.vars) if 'tags' in all_vars: del all_vars['tags'] if 'when' in all_vars: del all_vars['when'] return all_vars def get_include_params(self): all_vars = dict() if self._task_include: all_vars.update(self._task_include.get_include_params()) if self.action == 'include': all_vars.update(self.vars) return all_vars def copy(self, exclude_block=False): new_me = super(Task, self).copy() new_me._block = None if self._block and not exclude_block: new_me._block = self._block.copy() new_me._role = None if self._role: new_me._role = self._role new_me._task_include = None if self._task_include: new_me._task_include = self._task_include.copy(exclude_block=exclude_block) return new_me def serialize(self): data = super(Task, self).serialize() if self._block: data['block'] = self._block.serialize() if self._role: data['role'] = self._role.serialize() if self._task_include: data['task_include'] = self._task_include.serialize() return data def deserialize(self, data): # import is here to avoid import loops #from ansible.playbook.task_include import TaskInclude block_data = data.get('block') if block_data: b = Block() b.deserialize(block_data) self._block = b del data['block'] role_data = data.get('role') if role_data: r = Role() r.deserialize(role_data) self._role = r del data['role'] ti_data = data.get('task_include') if ti_data: #ti = TaskInclude() ti = Task() ti.deserialize(ti_data) self._task_include = ti del data['task_include'] super(Task, self).deserialize(data) def evaluate_conditional(self, templar, all_vars): if self._block is not None: if not self._block.evaluate_conditional(templar, all_vars): return False if self._task_include is not None: if not self._task_include.evaluate_conditional(templar, all_vars): return False return super(Task, self).evaluate_conditional(templar, all_vars) def set_loader(self, loader): ''' Sets the loader on this object and recursively on parent, child objects. This is used primarily after the Task has been serialized/deserialized, which does not preserve the loader. ''' self._loader = loader if self._block: self._block.set_loader(loader) if self._task_include: self._task_include.set_loader(loader) def _get_parent_attribute(self, attr, extend=False): ''' Generic logic to get the attribute or parent attribute for a task value. ''' value = None try: value = self._attributes[attr] if self._block and (value is None or extend): parent_value = getattr(self._block, attr) if extend: value = self._extend_value(value, parent_value) else: value = parent_value if self._task_include and (value is None or extend): parent_value = getattr(self._task_include, attr) if extend: value = self._extend_value(value, parent_value) else: value = parent_value except KeyError: pass return value def _get_attr_environment(self): ''' Override for the 'tags' getattr fetcher, used from Base. ''' environment = self._attributes['environment'] parent_environment = self._get_parent_attribute('environment', extend=True) if parent_environment is not None: environment = self._extend_value(environment, parent_environment) return environment def _get_attr_any_errors_fatal(self): ''' Override for the 'tags' getattr fetcher, used from Base. ''' return self._get_parent_attribute('any_errors_fatal')
cmvelo/ansible
lib/ansible/playbook/task.py
Python
gpl-3.0
17,005
import random import re from util import hook, http @hook.command def suggest(inp, inp_unstripped=None): ".suggest [#n] <phrase> -- gets a random/the nth suggested google search" if inp_unstripped is not None: inp = inp_unstripped m = re.match("^#(\d+) (.+)$", inp) num = 0 if m: num, inp = m.groups() num = int(num) json = http.get_json( "http://suggestqueries.google.com/complete/search", client="firefox", q=inp ) suggestions = json[1] if not suggestions: return "no suggestions found" if not num: num = random.randint(1, len(suggestions)) if len(suggestions) + 1 <= num: return "only got %d suggestions" % len(suggestions) out = suggestions[num - 1] return "#%d: %s" % (num, out)
jmgao/skybot
plugins/suggest.py
Python
unlicense
798
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (C) 2018 David Arroyo Menéndez # Author: David Arroyo Menéndez <davidam@gnu.org> # Maintainer: David Arroyo Menéndez <davidam@gnu.org> # This file is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # This file is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with GNU Emacs; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, # Boston, MA 02110-1301 USA, #!/usr/bin/python import profile def factorial(n): if ((n == 0) | (n == 1)): return 1 elif (n > 1): return n * factorial(n - 1) print("RAW") profile.run('factorial(4)')
davidam/python-examples
basics/profile_factorial_raw.py
Python
gpl-3.0
1,099
# (C) Datadog, Inc. 2010-2017 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) # 3p import mock # project from checks import AGENT_METRICS_CHECK_NAME from tests.checks.common import AgentCheckTest, load_check MOCK_CONFIG = { 'instances': [ {'process_metrics': [ { 'name': 'memory_info', 'type': 'gauge', 'active': 'yes' }, { 'name': 'cpu_times', 'type': 'rate', 'active': 'yes' }, ]}], 'init_config': {} } MOCK_CONFIG_2 = { 'instances': [ {'process_metrics': [ { 'name': 'memory_info', 'type': 'gauge', 'active': 'yes' }, { 'name': 'get_non_existent_stat', 'type': 'gauge', 'active': 'yes' }, ]}], 'init_config': {} } AGENT_CONFIG_DEV_MODE = { 'developer_mode': True } AGENT_CONFIG_DEFAULT_MODE = {} MOCK_STATS = { 'memory_info': dict([('rss', 16814080), ('vms', 74522624)]), 'cpu_times': dict([('user', 0.041733968), ('system', 0.022306718)]) } MOCK_NAMES_TO_METRIC_TYPES = { 'memory_info': 'gauge', 'cpu_times': 'gauge' } class AgentMetricsTestCase(AgentCheckTest): CHECK_NAME = AGENT_METRICS_CHECK_NAME def mock_psutil_config_to_stats(self): return MOCK_STATS, MOCK_NAMES_TO_METRIC_TYPES ### Tests for Agent Developer Mode def test_psutil_config_to_stats(self): check = load_check(self.CHECK_NAME, MOCK_CONFIG, AGENT_CONFIG_DEV_MODE) instance = MOCK_CONFIG.get('instances')[0] stats, names_to_metric_types = check._psutil_config_to_stats(instance) self.assertIn('memory_info', names_to_metric_types) self.assertEqual(names_to_metric_types['memory_info'], 'gauge') self.assertIn('cpu_times', names_to_metric_types) self.assertEqual(names_to_metric_types['cpu_times'], 'rate') self.assertIn('memory_info', stats) self.assertIn('cpu_times', stats) def test_send_single_metric(self): check = load_check(self.CHECK_NAME, MOCK_CONFIG, AGENT_CONFIG_DEV_MODE) check.gauge = mock.MagicMock() check.rate = mock.MagicMock() check._send_single_metric('serverdensity.agent.collector.memory_info.vms', 16814081, 'gauge') check.gauge.assert_called_with('serverdensity.agent.collector.memory_info.vms', 16814081) check._send_single_metric('serverdensity.agent.collector.memory_info.vms', 16814081, 'rate') check.rate.assert_called_with('serverdensity.agent.collector.memory_info.vms', 16814081) self.assertRaises(Exception, check._send_single_metric, *('serverdensity.agent.collector.memory_info.vms', 16814081, 'bogus')) def test_register_psutil_metrics(self): check = load_check(self.CHECK_NAME, MOCK_CONFIG, AGENT_CONFIG_DEV_MODE) check._register_psutil_metrics(MOCK_STATS, MOCK_NAMES_TO_METRIC_TYPES) self.metrics = check.get_metrics() self.assertMetric('serverdensity.agent.collector.memory_info.rss', value=16814080) self.assertMetric('serverdensity.agent.collector.memory_info.vms', value=74522624) def test_bad_process_metric_check(self): ''' Tests that a bad configuration option for `process_metrics` gets ignored ''' check = load_check(self.CHECK_NAME, MOCK_CONFIG_2, AGENT_CONFIG_DEV_MODE) instance = MOCK_CONFIG.get('instances')[0] stats, names_to_metric_types = check._psutil_config_to_stats(instance) self.assertIn('memory_info', names_to_metric_types) self.assertEqual(names_to_metric_types['memory_info'], 'gauge') self.assertNotIn('non_existent_stat', names_to_metric_types) self.assertIn('memory_info', stats) self.assertNotIn('non_existent_stat', stats) ### Tests for Agent Default Mode def test_no_process_metrics_collected(self): ''' Test that additional process metrics are not collected when in default mode ''' mocks = { '_register_psutil_metrics': mock.MagicMock(side_effect=AssertionError), '_psutil_config_to_stats': mock.MagicMock(side_effect=AssertionError), } self.run_check(MOCK_CONFIG, mocks=mocks)
serverdensity/sd-agent-core-plugins
agent_metrics/test_agent_metrics.py
Python
bsd-3-clause
4,404
# -*- test-case-name: nevow.test.test_livepage -*- # Copyright (c) 2004 Divmod. # See LICENSE for details. """ Previous generation Nevow Comet support. Do not use this module. @see: L{nevow.athena} """ import itertools, types import warnings from zope.interface import implements, Interface from twisted.internet import defer, error from twisted.internet.task import LoopingCall from twisted.python import log from nevow import tags, inevow, context, static, flat, rend, url, util, stan # If you need to debug livepage itself or your livepage app, set this to true DEBUG = False _jslog = None def _openjslog(): global _jslog if _jslog is None: _jslog = file("js.log", "w") _jslog.write("**********\n") _jslog.flush() return _jslog def jslog(*x): if DEBUG: mylog = _openjslog() for y in x: mylog.write(str(y)) mylog.flush() class JavascriptContext(context.WovenContext): def __init__(self, parent=None, tag=None, isAttrib=None, inJSSingleQuoteString=None, remembrances=None): super(JavascriptContext, self).__init__( parent, tag, inJS=True, isAttrib=isAttrib, inJSSingleQuoteString=inJSSingleQuoteString, remembrances=None) class TimeoutException(Exception): pass class ClientSideException(Exception): pass class SingleQuote(object): def __init__(self, children): self.children = children def __repr__(self): return "%s(%s)" % (type(self).__name__, self.children) def flattenSingleQuote(singleQuote, ctx): new = JavascriptContext(ctx, tags.invisible[singleQuote], inJSSingleQuoteString=True) return flat.serialize(singleQuote.children, new) flat.registerFlattener(flattenSingleQuote, SingleQuote) class _js(object): """ Stan for Javascript. There is a convenience instance of this class named "js" in the livepage module which you should use instead of the _js class directly. Marker indicating literal Javascript should be rendered. No escaping will be performed. When inside a JavascriptContext, Nevow will automatically put apostrophe quote marks around any Python strings it renders. This makes turning a Python string into a JavaScript string very easy. However, there are often situations where you wish to generate some literal Javascript code and do not wish quote marks to be placed around it. In this situation, the js object should be used. The simplest usage is to simply pass a python string to js. When the js object is rendered, the python string will be rendered as if it were literal javascript. For example:: client.send(js(\"alert('hello')\")) However, to make the generation of Javascript more convenient, the js object also provides safe implementations of __getattr__, __call__, and __getitem__. See the following examples to get an idea of how to use it. The Python code is to the left of the -> and the Javascript which results is to the right:: js(\"alert('any javascript you like')\") -> alert('any javascript you like') js.window.title -> window.title js.document.getElementById('foo') -> document.getElementById('foo') js.myFunction('my argument') -> myFunction('my argument') js.myFunction(True, 5, \"it's a beautiful day\") -> myFunction(true, 5, 'it\\'s a beautiful day') js.document.all[\"something\"] -> document.all['something'] js[1, 2] -> [1, 2] XXX TODO support javascript object literals somehow? (They look like dicts) perhaps like this:: js[\"one\": 1, \"two\": 2] -> {\"one\": 1, \"two\": 2} The livepage module includes many convenient instances of the js object. It includes the literals:: document window this self It includes shorthand for commonly called javascript functions:: alert -> alert get -> document.getElementById set -> nevow_setNode append -> nevow_appendNode prepend -> nevow.prependNode insert -> nevow.insertNode It includes convenience calls against the client-side server object:: server.handle('callMe') -> server.handle('callMe') It includes commonly-used fragments of javascript:: stop -> ; return false; eol -> \\n Stop is used to prevent the browser from executing it's default event handler. For example:: button(onclick=[server.handle('click'), stop]) -> <button onclick=\"server.handle('click'); return false;\" /> EOL is currently required to separate statements (this requirement may go away in the future). For example:: client.send([ alert('hello'), eol, alert('goodbye')]) XXX TODO: investigate whether rendering a \\n between list elements in a JavascriptContext has any ill effects. """ def __init__(self, name=None): if name is None: name = [] if isinstance(name, str): name = [stan.raw(name)] self._children = name def __getattr__(self, name): if name == 'clone': raise RuntimeError("Can't clone") if self._children: newchildren = self._children[:] newchildren.append(stan.raw('.'+name)) return self.__class__(newchildren) return self.__class__(name) def __call__(self, *args): if not self._children: return self.__class__(args[0]) newchildren = self._children[:] stuff = [] for x in args: if isinstance(x, ( basestring, stan.Tag, types.FunctionType, types.MethodType, types.UnboundMethodType)): x = stan.raw("'"), SingleQuote(x), stan.raw("'") stuff.append((x, stan.raw(','))) if stuff: stuff[-1] = stuff[-1][0] newchildren.extend([stan.raw('('), stuff, stan.raw(');')]) return self.__class__(newchildren) def __getitem__(self, args): if not isinstance(args, (tuple, list)): args = (args,) newchildren = self._children[:] stuff = [(x, stan.raw(',')) for x in args] if stuff: stuff[-1] = stuff[-1][0] newchildren.extend([stan.raw("["), stuff, stan.raw("]")]) return self.__class__(newchildren) def __iter__(self): """Prevent an infinite loop if someone tries to do for x in jsinstance: """ raise NotImplementedError, "js instances are not iterable. (%r)" % (self, ) def __repr__(self): return "%s(%r)" % (type(self).__name__, self._children) def flattenJS(theJS, ctx): new = JavascriptContext(ctx, tags.invisible[theJS]) return flat.serialize(theJS._children, new) flat.registerFlattener(flattenJS, _js) js = _js() document = _js('document') get = document.getElementById window = _js('window') this = _js('this') self = _js('self') server = _js('server') alert = _js('alert') stop = _js('; return false;') eol = tags.raw('\n') set = js.nevow_setNode append = js.nevow_appendNode prepend = js.nevow_prependNode insert = js.nevow_insertNode def assign(where, what): """Assign what to where. Equivalent to where = what; """ return _js([where, stan.raw(" = "), what]) setq = assign # hee def var(where, what): """Define local variable 'where' and assign 'what' to it. Equivalent to var where = what; """ return _js([stan.raw("var "), where, stan.raw(" = "), what, stan.raw(";")]) def anonymous(block): """ Turn block (any stan) into an anonymous JavaScript function which takes no arguments. Equivalent to:: function () { block } """ return _js([stan.raw("function() {\n"), block, stan.raw("\n}")]) class IClientHandle(Interface): def hookupOutput(output, finisher=None): """hook up an output conduit to this live evil instance. """ def send(script): """send a script through the output conduit to the browser. If no output conduit is yet hooked up, buffer the script until one is. """ def handleInput(identifier, *args): """route some input from the browser to the appropriate destination. """ class IHandlerFactory(Interface): def locateHandler(ctx, name): """Locate a handler callable with the given name. """ class _transient(object): def __init__(self, transientId, arguments=None): self.transientId = transientId if arguments is None: arguments = [] elif isinstance(arguments, tuple): arguments = list(arguments) else: raise TypeError, "Arguments must be None or tuple" self.arguments = arguments def __call__(self, *arguments): return type(self)(self.transientId, arguments) def flattenTransient(transient, ctx): thing = js.server.handle("--transient.%s" % (transient.transientId, ), *transient.arguments) return flat.serialize(thing, ctx) flat.registerFlattener(flattenTransient, _transient) class ClientHandle(object): """An object which represents the client-side webbrowser. """ implements(IClientHandle) outputConduit = None def __init__(self, livePage, handleId, refreshInterval, targetTimeoutCount): self.refreshInterval = refreshInterval self.targetTimeoutCount = targetTimeoutCount self.timeoutCount = 0 self.livePage = livePage self.handleId = handleId self.outputBuffer = [] self.bufferDeferreds = [] self.closed = False self.closeNotifications = [] self.firstTime = True self.timeoutLoop = LoopingCall(self.checkTimeout) if refreshInterval: self.timeoutLoop.start(self.refreshInterval) self._transients = {} self.transientCounter = itertools.count().next self.nextId = itertools.count().next ## For backwards compatibility with handler def transient(self, what, *args): """Register a transient event handler, 'what'. The callable 'what' can only be invoked by the client once before being garbage collected. Additional attempts to invoke the handler will fail. """ transientId = str(self.transientCounter()) self._transients[transientId] = what return _transient(transientId, args) def popTransient(self, transientId): """Remove a transient previously registered by a call to transient. Normally, this will be done automatically when the transient is invoked. However, you can invoke it yourself if you wish to revoke the client's capability to call the transient handler. """ if DEBUG: print "TRANSIENTS", self._transients return self._transients.pop(transientId) def _actuallySend(self, scripts): output = self.outputConduit written = [] def writer(write): #print "WRITER", write written.append(write) def finisher(finish): towrite = '\n'.join(written) jslog("<<<<<<\n%s\n" % towrite) output.callback(towrite) flat.flattenFactory(scripts, self.outputContext, writer, finisher) self.outputConduit = None self.outputContext = None def send(self, *script): """Send the stan "script", which can be flattened to javascript, to the browser which is connected to this handle, and evaluate it in the context of the browser window. """ if self.outputConduit: self._actuallySend(script) else: self.outputBuffer.append(script) self.outputBuffer.append(eol) def setOutput(self, ctx, output): self.timeoutCount = 0 self.outputContext = ctx self.outputConduit = output if self.outputBuffer: if DEBUG: print "SENDING BUFFERED", self.outputBuffer self._actuallySend(self.outputBuffer) self.outputBuffer = [] def _actuallyPassed(self, result, deferreds): for d in deferreds: d.callback(result) def _actuallyFailed(self, failure, deferreds): for d in deferreds: d.errback(failure) def checkTimeout(self): if self.outputConduit is not None: ## The browser is waiting for us, send a noop. self.send(_js('null;')) return self.timeoutCount += 1 if self.timeoutCount >= self.targetTimeoutCount: ## This connection timed out. self._closeComplete( TimeoutException( "This connection did not ACK in at least %s seconds." % ( self.targetTimeoutCount * self.refreshInterval, ))) def outputGone(self, failure, output): # assert output == self.outputConduit # Twisted errbacks with a ConnectionDone when the client closes the # connection cleanly. Pretend it didn't happen and carry on. self.outputConduit = None if failure.check(error.ConnectionDone): self._closeComplete() else: self._closeComplete(failure) return None def _closeComplete(self, failure=None): if self.closed: return self.closed = True self.timeoutLoop.stop() self.timeoutLoop = None for notify in self.closeNotifications[:]: if failure is not None: notify.errback(failure) else: notify.callback(None) self.closeNotifications = [] def notifyOnClose(self): """This will return a Deferred that will be fired when the connection is closed 'normally', i.e. in response to handle.close() . If the connection is lost in any other way (because the browser navigated to another page, the browser was shut down, the network connection was lost, or the timeout was reached), this will errback instead.""" d = defer.Deferred() self.closeNotifications.append(d) return d def close(self, executeScriptBeforeClose=""): if DEBUG: print "CLOSE WAS CALLED" d = self.notifyOnClose() self.send(js.nevow_closeLive(executeScriptBeforeClose)) return d def set(self, where, what): self.send(js.nevow_setNode(where, what)) def prepend(self, where, what): self.send(js.nevow_prependNode(where, what)) def append(self, where, what): self.send(js.nevow_appendNode(where, what)) def alert(self, what): self.send(js.alert(what)) def call(self, what, *args): self.send(js(what)(*args)) def sendScript(self, string): warnings.warn( "[0.5] nevow.livepage.ClientHandle.sendScript is deprecated, use send instead.", DeprecationWarning, 2) self.send(string) class DefaultClientHandleFactory(object): clientHandleClass = ClientHandle def __init__(self): self.clientHandles = {} self.handleCounter = itertools.count().next def newClientHandle(self, livePage, refreshInterval, targetTimeoutCount): handleid = str(self.handleCounter()) handle = self.clientHandleClass( livePage, handleid, refreshInterval, targetTimeoutCount) self.clientHandles[handleid] = handle # MD : Modify to not remove handles automatically so they can be resumed #modification for eXe online: the session will take care of deleting old clients #this module should not have been used since 6 years anyway... so mod #can live here for now until migration to athena #handle.notifyOnClose().addBoth(lambda ign: self.deleteHandle(handleid)) return handle def deleteHandle(self, handleid): del self.clientHandles[handleid] def getHandleForId(self, handleId): """Override this to restore old handles on demand. """ return self.clientHandles[handleId] theDefaultClientHandleFactory = DefaultClientHandleFactory() class OutputHandlerResource: implements(inevow.IResource) def __init__(self, clientHandle): self.clientHandle = clientHandle def locateChild(self, ctx, segments): raise NotImplementedError() def renderHTTP(self, ctx): request = inevow.IRequest(ctx) neverEverCache(request) activeChannel(request) ctx.remember(jsExceptionHandler, inevow.ICanHandleException) request.channel._savedTimeOut = None # XXX TODO d = defer.Deferred() request.notifyFinish().addErrback(self.clientHandle.outputGone, d) jsContext = JavascriptContext(ctx, tags.invisible()) self.clientHandle.livePage.rememberStuff(jsContext) jsContext.remember(self.clientHandle, IClientHandle) if self.clientHandle.firstTime: self.clientHandle.livePage.goingLive(jsContext, self.clientHandle) self.clientHandle.firstTime = False self.clientHandle.setOutput(jsContext, d) return d class InputHandlerResource: implements(inevow.IResource) def __init__(self, clientHandle): self.clientHandle = clientHandle def locateChild(self, ctx, segments): raise NotImplementedError() def renderHTTP(self, ctx): self.clientHandle.timeoutCount = 0 request = inevow.IRequest(ctx) neverEverCache(request) activeChannel(request) ctx.remember(self.clientHandle, IClientHandle) ctx.remember(jsExceptionHandler, inevow.ICanHandleException) self.clientHandle.livePage.rememberStuff(ctx) handlerName = request.args['handler-name'][0] arguments = request.args.get('arguments', ()) jslog(">>>>>>\n%s %s\n" % (handlerName, arguments)) if handlerName.startswith('--transient.'): handler = self.clientHandle.popTransient(handlerName.split('.')[-1]) else: handler = self.clientHandle.livePage.locateHandler( ctx, request.args['handler-path'], handlerName) jsContext = JavascriptContext(ctx, tags.invisible[handler]) towrite = [] def writer(r): jslog("WRITE ", r) towrite.append(r) def finisher(r): jslog("FINISHED", r) writestr = ''.join(towrite) jslog("<><><>\n%s\n" % (writestr, )) request.write(writestr) request.finish() return r result = handler(jsContext, *arguments) jslog("RESULT ", result) if result is None: return defer.succeed('') return self.clientHandle.livePage.flattenFactory(result, jsContext, writer, finisher) class DefaultClientHandlesResource(object): implements(inevow.IResource) clientResources = { 'input': InputHandlerResource, 'output': OutputHandlerResource, } clientFactory = theDefaultClientHandleFactory def locateChild(self, ctx, segments): handleId = segments[0] handlerType = segments[1] #client = self.clientFactory.clientHandles[handleId] client = clientFactory.clientHandles[handleId] return self.clientResources[handlerType](client), segments[2:] theDefaultClientHandlesResource = DefaultClientHandlesResource() class attempt(defer.Deferred): """ Attempt to do 'stuff' in the browser. callback on the server if 'stuff' executes without raising an exception. errback on the server if 'stuff' raises a JavaScript exception in the client. Used like this:: def printIt(what): print "Woo!", what C = IClientHandle(ctx) C.send( attempt(js("1+1")).addCallback(printIt)) C.send( attempt(js("thisWillFail")).addErrback(printIt)) """ def __init__(self, stuff): self.stuff = stuff defer.Deferred.__init__(self) def flattenAttemptDeferred(d, ctx): def attemptComplete(ctx, result, reason=None): if result == 'success': d.callback(None) else: d.errback(ClientSideException(reason)) transient = IClientHandle(ctx).transient(attemptComplete) return flat.serialize([ _js("""try { """), d.stuff, _js(""" """), transient('success'), _js(""" } catch (e) { """), transient('failure', js.e), _js("}") ], ctx) flat.registerFlattener(flattenAttemptDeferred, attempt) class IOutputEvent(Interface): pass class IInputEvent(Interface): pass class ExceptionHandler(object): def renderHTTP_exception(self, ctx, failure): log.msg("Exception during input event:") log.err(failure) request = inevow.IRequest(ctx) request.write("throw new Error('Server side error: %s')" % (failure.getErrorMessage().replace("'", "\\'").replace("\n", "\\\n"), )) request.finish() def renderInlineException(self, ctx, reason): """TODO: I don't even think renderInlineException is ever called by anybody """ pass jsExceptionHandler = ExceptionHandler() def neverEverCache(request): """ Set headers to indicate that the response to this request should never, ever be cached. """ request.setHeader('Cache-Control', 'no-store, no-cache, must-revalidate') request.setHeader('Pragma', 'no-cache') def activeChannel(request): """Mark this connection as a 'live' channel by setting the Connection: close header and flushing all headers immediately. """ request.setHeader("Connection", "close") request.write('') class LivePage(rend.Page): """ A Page which is Live provides asynchronous, bidirectional RPC between Python on the server and JavaScript in the client browser. A LivePage must include the "liveglue" JavaScript which includes a unique identifier which is assigned to every page render of a LivePage and the JavaScript required for the client to communicate asynchronously with the server. A LivePage grants the client browser the capability of calling server-side Python methods using a small amount of JavaScript code. There are two types of Python handler methods, persistent handlers and transient handlers. - To grant the client the capability to call a persistent handler over and over as many times as it wishes, subclass LivePage and provide handle_foo methods. The client can then call handle_foo by executing the following JavaScript:: server.handle('foo') handle_foo will be invoked because the default implementation of locateHandler looks for a method prefixed handle_*. To change this, override locateHandler to do what you wish. - To grant the client the capability of calling a handler once and exactly once, use ClientHandle.transient to register a callable and embed the return result in a page to render JavaScript which will invoke the transient handler when executed. For example:: def render_clickable(self, ctx, data): def hello(ctx): return livepage.alert(\"Hello, world. You can only click me once.\") return ctx.tag(onclick=IClientHandle(ctx).transient(hello)) The return result of transient can also be called to pass additional arguments to the transient handler. For example:: def render_choice(self, ctx, data): def chosen(ctx, choseWhat): return livepage.set( \"choosable\", [\"Thanks for choosing \", choseWhat]) chooser = IClientHandle(ctx).transient(chosen) return span(id=\"choosable\")[ \"Choose one:\", p(onclick=chooser(\"one\"))[\"One\"], p(onclick=chooser(\"two\"))[\"Two\"]] Note that the above situation displays temporary UI to the user. When the user invokes the chosen handler, the UI which allowed the user to invoke the chosen handler is removed from the client. Thus, it is important that the transient registration is deleted once it is invoked, otherwise uncollectable garbage would accumulate in the handler dictionary. It is also important that either the one or the two button consume the same handler, since it is an either/or choice. If two handlers were registered, the untaken choice would be uncollectable garbage. """ refreshInterval = 30 targetTimeoutCount = 3 clientFactory = theDefaultClientHandleFactory def renderHTTP(self, ctx): if not self.cacheable: neverEverCache(inevow.IRequest(ctx)) return rend.Page.renderHTTP(self, ctx) def locateHandler(self, ctx, path, name): ### XXX TODO: Handle path return getattr(self, 'handle_%s' % (name, )) def goingLive(self, ctx, handle): """This particular LivePage instance is 'going live' from the perspective of the ClientHandle 'handle'. Override this to get notified when a new browser window observes this page. This means that a new user is now looking at the page, an old user has refreshed the page, or an old user has opened a new window or tab onto the page. This is the first time the ClientHandle instance is available for general use by the server. This Page may wish to keep track of the ClientHandle instances depending on how your application is set up. """ pass def child_livepage_client(self, ctx): return theDefaultClientHandlesResource # child_nevow_glue.js = static.File # see below def render_liveid(self, ctx, data): warnings.warn("You don't need a liveid renderer any more; just liveglue is fine.", DeprecationWarning) return '' cacheable = False # Set this to true to use ***HIGHLY*** # EXPERIMENTAL lazy ID allocation feature, # which will allow your LivePage instances to # be cached by clients. def render_liveglue(self, ctx, data): if not self.cacheable: handleId = "'", self.clientFactory.newClientHandle( self, self.refreshInterval, self.targetTimeoutCount).handleId, "'" else: handleId = 'null' return [ tags.script(type="text/javascript")[ "var nevow_clientHandleId = ", handleId ,";"], tags.script(type="text/javascript", src=url.here.child('nevow_glue.js')) ] setattr( LivePage, 'child_nevow_glue.js', static.File( util.resource_filename('nevow', 'liveglue.js'), 'text/javascript')) glue = tags.directive('liveglue') ##### BACKWARDS COMPATIBILITY CODE ctsTemplate = "nevow_clientToServerEvent('%s',this,''%s)%s" handledEventPostlude = '; return false;' class handler(object): callme = None args = () identifier = None def __init__(self, *args, **kw): """**DEPRECATED** [0.5] Handler is now deprecated. To expose server-side code to the client to be called by JavaScript, read the LivePage docstring. """ warnings.warn( "[0.5] livepage.handler is deprecated; Provide handle_foo methods (or override locateHandler) on your LivePage and use (in javascript) server.handle('foo'), or use ClientHandle.transient to register a one-shot handler capability.", DeprecationWarning, 2) ## Handle working like a 2.4 decorator where calling handler returns a decorator if not callable(args[0]) or isinstance(args[0], _js): self.args = args return self.callme = args[0] self(*args[1:], **kw) def __call__(self, *args, **kw): if self.callme is None: self.callme = args[0] args = args[1:] self.args += args self.outsideAttribute = kw.get('outsideAttribute') bubble = kw.get('bubble') if bubble: self.postlude = ';' else: self.postlude = handledEventPostlude if 'identifier' in kw: self.identifier = kw['identifier'] return self content = property(lambda self: flt(self)) def flattenHandler(handler, ctx): client = IClientHandle(ctx) iden = handler.identifier if iden is None: iden = client.nextId() iden = '--handler-%s' % (iden, ) ## TODO this should be the IHandlerFactory instead of IResource setattr(IHandlerFactory(ctx), 'handle_%s' % (iden, ), handler.callme) isAttrib = not handler.outsideAttribute new = JavascriptContext(ctx, tags.invisible[handler.args], isAttrib=isAttrib) rv = flat.flatten( js.nevow_clientToServerEvent(*(iden, this, '') + handler.args), new) rv += handler.postlude return tags.xml(rv) flat.registerFlattener(flattenHandler, handler) def flt(stan, quote=True, client=None, handlerFactory=None): """Flatten some stan to a string suitable for embedding in a javascript string. If quote is True, apostrophe, quote, and newline will be quoted """ warnings.warn("[0.5] livepage.flt is deprecated. Don't use it.", DeprecationWarning, 2) from nevow import testutil fr = testutil.FakeRequest() ctx = context.RequestContext(tag=fr) ctx.remember(client, IClientHandle) ctx.remember(handlerFactory, IHandlerFactory) ctx.remember(None, inevow.IData) fl = flat.flatten(stan, ctx=ctx) if quote: fl = fl.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n') return fl
UstadMobile/exelearning-ustadmobile-work
nevow/livepage.py
Python
gpl-2.0
30,194
# Enter your code here. Read input from STDIN. Print output to STDOUT N=int(raw_input()) nums=map(int,raw_input().split(" ")) nums=sorted(nums, reverse=True) for i in xrange(1,len(nums)): if nums[i]<nums[0]: print nums[i] break
shree-shubham/Unitype
Find the Second Largest Number.py
Python
gpl-3.0
248
"""Config flow for Vizio.""" import copy import logging import socket from typing import Any, Dict, Optional from pyvizio import VizioAsync, async_guess_device_type from pyvizio.const import APP_HOME import voluptuous as vol from homeassistant import config_entries from homeassistant.components.media_player import DEVICE_CLASS_SPEAKER, DEVICE_CLASS_TV from homeassistant.config_entries import ( SOURCE_IGNORE, SOURCE_IMPORT, SOURCE_ZEROCONF, ConfigEntry, ) from homeassistant.const import ( CONF_ACCESS_TOKEN, CONF_DEVICE_CLASS, CONF_EXCLUDE, CONF_HOST, CONF_INCLUDE, CONF_NAME, CONF_PIN, CONF_PORT, CONF_TYPE, ) from homeassistant.core import callback from homeassistant.helpers import config_validation as cv from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.typing import DiscoveryInfoType from homeassistant.util.network import is_ip_address from .const import ( CONF_APPS, CONF_APPS_TO_INCLUDE_OR_EXCLUDE, CONF_INCLUDE_OR_EXCLUDE, CONF_VOLUME_STEP, DEFAULT_DEVICE_CLASS, DEFAULT_NAME, DEFAULT_VOLUME_STEP, DEVICE_ID, DOMAIN, ) _LOGGER = logging.getLogger(__name__) def _get_config_schema(input_dict: Dict[str, Any] = None) -> vol.Schema: """ Return schema defaults for init step based on user input/config dict. Retain info already provided for future form views by setting them as defaults in schema. """ if input_dict is None: input_dict = {} return vol.Schema( { vol.Required( CONF_NAME, default=input_dict.get(CONF_NAME, DEFAULT_NAME) ): str, vol.Required(CONF_HOST, default=input_dict.get(CONF_HOST)): str, vol.Required( CONF_DEVICE_CLASS, default=input_dict.get(CONF_DEVICE_CLASS, DEFAULT_DEVICE_CLASS), ): vol.All(str, vol.Lower, vol.In([DEVICE_CLASS_TV, DEVICE_CLASS_SPEAKER])), vol.Optional( CONF_ACCESS_TOKEN, default=input_dict.get(CONF_ACCESS_TOKEN, "") ): str, }, extra=vol.REMOVE_EXTRA, ) def _get_pairing_schema(input_dict: Dict[str, Any] = None) -> vol.Schema: """ Return schema defaults for pairing data based on user input. Retain info already provided for future form views by setting them as defaults in schema. """ if input_dict is None: input_dict = {} return vol.Schema( {vol.Required(CONF_PIN, default=input_dict.get(CONF_PIN, "")): str} ) def _host_is_same(host1: str, host2: str) -> bool: """Check if host1 and host2 are the same.""" host1 = host1.split(":")[0] host1 = host1 if is_ip_address(host1) else socket.gethostbyname(host1) host2 = host2.split(":")[0] host2 = host2 if is_ip_address(host2) else socket.gethostbyname(host2) return host1 == host2 class VizioOptionsConfigFlow(config_entries.OptionsFlow): """Handle Vizio options.""" def __init__(self, config_entry: ConfigEntry) -> None: """Initialize vizio options flow.""" self.config_entry = config_entry async def async_step_init( self, user_input: Dict[str, Any] = None ) -> Dict[str, Any]: """Manage the vizio options.""" if user_input is not None: if user_input.get(CONF_APPS_TO_INCLUDE_OR_EXCLUDE): user_input[CONF_APPS] = { user_input[CONF_INCLUDE_OR_EXCLUDE]: user_input[ CONF_APPS_TO_INCLUDE_OR_EXCLUDE ].copy() } user_input.pop(CONF_INCLUDE_OR_EXCLUDE) user_input.pop(CONF_APPS_TO_INCLUDE_OR_EXCLUDE) return self.async_create_entry(title="", data=user_input) options = vol.Schema( { vol.Optional( CONF_VOLUME_STEP, default=self.config_entry.options.get( CONF_VOLUME_STEP, DEFAULT_VOLUME_STEP ), ): vol.All(vol.Coerce(int), vol.Range(min=1, max=10)) } ) if self.config_entry.data[CONF_DEVICE_CLASS] == DEVICE_CLASS_TV: default_include_or_exclude = ( CONF_EXCLUDE if self.config_entry.options and CONF_EXCLUDE in self.config_entry.options.get(CONF_APPS, {}) else CONF_INCLUDE ) options = options.extend( { vol.Optional( CONF_INCLUDE_OR_EXCLUDE, default=default_include_or_exclude.title(), ): vol.All( vol.In([CONF_INCLUDE.title(), CONF_EXCLUDE.title()]), vol.Lower ), vol.Optional( CONF_APPS_TO_INCLUDE_OR_EXCLUDE, default=self.config_entry.options.get(CONF_APPS, {}).get( default_include_or_exclude, [] ), ): cv.multi_select( [ APP_HOME["name"], *[ app["name"] for app in self.hass.data[DOMAIN][CONF_APPS].data ], ] ), } ) return self.async_show_form(step_id="init", data_schema=options) class VizioConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a Vizio config flow.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL @staticmethod @callback def async_get_options_flow(config_entry: ConfigEntry) -> VizioOptionsConfigFlow: """Get the options flow for this handler.""" return VizioOptionsConfigFlow(config_entry) def __init__(self) -> None: """Initialize config flow.""" self._user_schema = None self._must_show_form = None self._ch_type = None self._pairing_token = None self._data = None self._apps = {} async def _create_entry(self, input_dict: Dict[str, Any]) -> Dict[str, Any]: """Create vizio config entry.""" # Remove extra keys that will not be used by entry setup input_dict.pop(CONF_APPS_TO_INCLUDE_OR_EXCLUDE, None) input_dict.pop(CONF_INCLUDE_OR_EXCLUDE, None) if self._apps: input_dict[CONF_APPS] = self._apps return self.async_create_entry(title=input_dict[CONF_NAME], data=input_dict) async def async_step_user( self, user_input: Dict[str, Any] = None ) -> Dict[str, Any]: """Handle a flow initialized by the user.""" assert self.hass errors = {} if user_input is not None: # Store current values in case setup fails and user needs to edit self._user_schema = _get_config_schema(user_input) if self.unique_id is None: unique_id = await VizioAsync.get_unique_id( user_input[CONF_HOST], user_input[CONF_DEVICE_CLASS], session=async_get_clientsession(self.hass, False), ) # Check if unique ID was found, set unique ID, and abort if a flow with # the same unique ID is already in progress if not unique_id: errors[CONF_HOST] = "cannot_connect" elif ( await self.async_set_unique_id( unique_id=unique_id, raise_on_progress=True ) is not None ): errors[CONF_HOST] = "existing_config_entry_found" if not errors: # pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167 if self._must_show_form and self.context["source"] == SOURCE_ZEROCONF: # Discovery should always display the config form before trying to # create entry so that user can update default config options self._must_show_form = False elif user_input[ CONF_DEVICE_CLASS ] == DEVICE_CLASS_SPEAKER or user_input.get(CONF_ACCESS_TOKEN): # Ensure config is valid for a device if not await VizioAsync.validate_ha_config( user_input[CONF_HOST], user_input.get(CONF_ACCESS_TOKEN), user_input[CONF_DEVICE_CLASS], session=async_get_clientsession(self.hass, False), ): errors["base"] = "cannot_connect" if not errors: return await self._create_entry(user_input) # pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167 elif self._must_show_form and self.context["source"] == SOURCE_IMPORT: # Import should always display the config form if CONF_ACCESS_TOKEN # wasn't included but is needed so that the user can choose to update # their configuration.yaml or to proceed with config flow pairing. We # will also provide contextual message to user explaining why _LOGGER.warning( "Couldn't complete configuration.yaml import: '%s' key is " "missing. Either provide '%s' key in configuration.yaml or " "finish setup by completing configuration via frontend", CONF_ACCESS_TOKEN, CONF_ACCESS_TOKEN, ) self._must_show_form = False else: self._data = copy.deepcopy(user_input) return await self.async_step_pair_tv() schema = self._user_schema or _get_config_schema() # pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167 if errors and self.context["source"] == SOURCE_IMPORT: # Log an error message if import config flow fails since otherwise failure is silent _LOGGER.error( "configuration.yaml import failure: %s", ", ".join(errors.values()) ) return self.async_show_form(step_id="user", data_schema=schema, errors=errors) async def async_step_import(self, import_config: Dict[str, Any]) -> Dict[str, Any]: """Import a config entry from configuration.yaml.""" # Check if new config entry matches any existing config entries for entry in self.hass.config_entries.async_entries(DOMAIN): # If source is ignore bypass host check and continue through loop if entry.source == SOURCE_IGNORE: continue if await self.hass.async_add_executor_job( _host_is_same, entry.data[CONF_HOST], import_config[CONF_HOST] ): updated_options = {} updated_data = {} remove_apps = False if entry.data[CONF_HOST] != import_config[CONF_HOST]: updated_data[CONF_HOST] = import_config[CONF_HOST] if entry.data[CONF_NAME] != import_config[CONF_NAME]: updated_data[CONF_NAME] = import_config[CONF_NAME] # Update entry.data[CONF_APPS] if import_config[CONF_APPS] differs, and # pop entry.data[CONF_APPS] if import_config[CONF_APPS] is not specified if entry.data.get(CONF_APPS) != import_config.get(CONF_APPS): if not import_config.get(CONF_APPS): remove_apps = True else: updated_options[CONF_APPS] = import_config[CONF_APPS] if entry.data.get(CONF_VOLUME_STEP) != import_config[CONF_VOLUME_STEP]: updated_options[CONF_VOLUME_STEP] = import_config[CONF_VOLUME_STEP] if updated_options or updated_data or remove_apps: new_data = entry.data.copy() new_options = entry.options.copy() if remove_apps: new_data.pop(CONF_APPS) new_options.pop(CONF_APPS) if updated_data: new_data.update(updated_data) # options are stored in entry options and data so update both if updated_options: new_data.update(updated_options) new_options.update(updated_options) self.hass.config_entries.async_update_entry( entry=entry, data=new_data, options=new_options ) return self.async_abort(reason="updated_entry") return self.async_abort(reason="already_configured_device") self._must_show_form = True # Store config key/value pairs that are not configurable in user step so they # don't get lost on user step if import_config.get(CONF_APPS): self._apps = copy.deepcopy(import_config[CONF_APPS]) return await self.async_step_user(user_input=import_config) async def async_step_zeroconf( self, discovery_info: Optional[DiscoveryInfoType] = None ) -> Dict[str, Any]: """Handle zeroconf discovery.""" assert self.hass # If host already has port, no need to add it again if ":" not in discovery_info[CONF_HOST]: discovery_info[ CONF_HOST ] = f"{discovery_info[CONF_HOST]}:{discovery_info[CONF_PORT]}" # Set default name to discovered device name by stripping zeroconf service # (`type`) from `name` num_chars_to_strip = len(discovery_info[CONF_TYPE]) + 1 discovery_info[CONF_NAME] = discovery_info[CONF_NAME][:-num_chars_to_strip] discovery_info[CONF_DEVICE_CLASS] = await async_guess_device_type( discovery_info[CONF_HOST] ) # Set unique ID early for discovery flow so we can abort if needed unique_id = await VizioAsync.get_unique_id( discovery_info[CONF_HOST], discovery_info[CONF_DEVICE_CLASS], session=async_get_clientsession(self.hass, False), ) await self.async_set_unique_id(unique_id=unique_id, raise_on_progress=True) self._abort_if_unique_id_configured() # Form must be shown after discovery so user can confirm/update configuration # before ConfigEntry creation. self._must_show_form = True return await self.async_step_user(user_input=discovery_info) async def async_step_pair_tv( self, user_input: Dict[str, Any] = None ) -> Dict[str, Any]: """ Start pairing process for TV. Ask user for PIN to complete pairing process. """ errors = {} # Start pairing process if it hasn't already started if not self._ch_type and not self._pairing_token: dev = VizioAsync( DEVICE_ID, self._data[CONF_HOST], self._data[CONF_NAME], None, self._data[CONF_DEVICE_CLASS], session=async_get_clientsession(self.hass, False), ) pair_data = await dev.start_pair() if pair_data: self._ch_type = pair_data.ch_type self._pairing_token = pair_data.token return await self.async_step_pair_tv() return self.async_show_form( step_id="user", data_schema=_get_config_schema(self._data), errors={"base": "cannot_connect"}, ) # Complete pairing process if PIN has been provided if user_input and user_input.get(CONF_PIN): dev = VizioAsync( DEVICE_ID, self._data[CONF_HOST], self._data[CONF_NAME], None, self._data[CONF_DEVICE_CLASS], session=async_get_clientsession(self.hass, False), ) pair_data = await dev.pair( self._ch_type, self._pairing_token, user_input[CONF_PIN] ) if pair_data: self._data[CONF_ACCESS_TOKEN] = pair_data.auth_token self._must_show_form = True # pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167 if self.context["source"] == SOURCE_IMPORT: # If user is pairing via config import, show different message return await self.async_step_pairing_complete_import() return await self.async_step_pairing_complete() # If no data was retrieved, it's assumed that the pairing attempt was not # successful errors[CONF_PIN] = "complete_pairing_failed" return self.async_show_form( step_id="pair_tv", data_schema=_get_pairing_schema(user_input), errors=errors, ) async def _pairing_complete(self, step_id: str) -> Dict[str, Any]: """Handle config flow completion.""" if not self._must_show_form: return await self._create_entry(self._data) self._must_show_form = False return self.async_show_form( step_id=step_id, data_schema=vol.Schema({}), description_placeholders={"access_token": self._data[CONF_ACCESS_TOKEN]}, ) async def async_step_pairing_complete( self, user_input: Dict[str, Any] = None ) -> Dict[str, Any]: """ Complete non-import sourced config flow. Display final message to user confirming pairing. """ return await self._pairing_complete("pairing_complete") async def async_step_pairing_complete_import( self, user_input: Dict[str, Any] = None ) -> Dict[str, Any]: """ Complete import sourced config flow. Display final message to user confirming pairing and displaying access token. """ return await self._pairing_complete("pairing_complete_import")
sdague/home-assistant
homeassistant/components/vizio/config_flow.py
Python
apache-2.0
18,537
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-03-19 20:03 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20160307_1903'), ] operations = [ migrations.AddField( model_name='userexamination', name='must_finished_at', field=models.DateTimeField(blank=True, null=True, verbose_name='Обязан закончить до'), ), ]
telminov/personnel-testing
core/migrations/0003_userexamination_must_finished_at.py
Python
mit
529
from questionnaire.features.pages.base import PageObject class ThemePage(PageObject): url = "/themes/"
eJRF/ejrf
questionnaire/features/pages/theme.py
Python
bsd-3-clause
108
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2017, 2018. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """A module for monitoring jobs, backends, etc. """ from .job_monitor import job_monitor from .backend_overview import backend_monitor, backend_overview
QISKit/qiskit-sdk-py
qiskit/tools/monitor/__init__.py
Python
apache-2.0
663
# coding: utf-8 # pylint: disable=invalid-name, protected-access, too-many-arguments, global-statement """Symbolic configuration API.""" from __future__ import absolute_import as _abs import ctypes from ..base import _LIB from ..base import c_array, c_str, mx_uint from ..base import SymbolHandle from ..base import check_call from ..name import NameManager from .common import CachedOp # pylint: disable=unused-import _symbol_cls = None class SymbolBase(object): """Symbol is symbolic graph.""" __slots__ = ["handle"] # pylint: disable=no-member def __init__(self, handle): """Initialize the function with handle Parameters ---------- handle : SymbolHandle the handle to the underlying C++ Symbol """ self.handle = handle def __del__(self): check_call(_LIB.NNSymbolFree(self.handle)) def _compose(self, *args, **kwargs): """Compose symbol on inputs. This call mutates the current symbol. Parameters ---------- args: provide positional arguments kwargs: provide keyword arguments Returns ------- the resulting symbol """ name = kwargs.pop('name', None) if name: name = c_str(name) if len(args) != 0 and len(kwargs) != 0: raise TypeError('compose only accept input Symbols \ either as positional or keyword arguments, not both') for arg in args: if not isinstance(arg, SymbolBase): raise TypeError('Compose expect `Symbol` as arguments') for val in kwargs.values(): if not isinstance(val, SymbolBase): raise TypeError('Compose expect `Symbol` as arguments') num_args = len(args) + len(kwargs) if len(kwargs) != 0: keys = c_array(ctypes.c_char_p, [c_str(key) for key in kwargs]) args = c_array(SymbolHandle, [s.handle for s in kwargs.values()]) else: keys = None args = c_array(SymbolHandle, [s.handle for s in args]) check_call(_LIB.NNSymbolCompose( self.handle, name, num_args, keys, args)) def _set_attr(self, **kwargs): """Set the attribute of the symbol. Parameters ---------- **kwargs The attributes to set """ keys = c_array(ctypes.c_char_p, [c_str(key) for key in kwargs]) vals = c_array(ctypes.c_char_p, [c_str(str(val)) for val in kwargs.values()]) num_args = mx_uint(len(kwargs)) check_call(_LIB.MXSymbolSetAttrs( self.handle, num_args, keys, vals)) def _set_handle(self, handle): """Set handle.""" self.handle = handle def __reduce__(self): return (_symbol_cls, (None,), self.__getstate__()) def _set_symbol_class(cls): """Set the symbolic class to be cls""" global _symbol_cls _symbol_cls = cls def invoke(cached_op, args, name=None): """Call cached symbolic operator""" ret = SymbolHandle() hint = cached_op.op.lower() name = c_str(NameManager.current.get(name, hint)) check_call(_LIB.MXCachedCreateSymbol( cached_op.handle, name, mx_uint(len(args)), c_array(SymbolHandle, [s.handle for s in args]), ctypes.byref(ret))) return _symbol_cls(ret) def _symbol_creator(handle, args, kwargs, keys, vals, name): sym_handle = SymbolHandle() check_call(_LIB.MXSymbolCreateAtomicSymbol( ctypes.c_void_p(handle), mx_uint(len(keys)), c_array(ctypes.c_char_p, [c_str(i) for i in keys]), c_array(ctypes.c_char_p, [c_str(str(i)) for i in vals]), ctypes.byref(sym_handle))) if args and kwargs: raise TypeError( 'Operators with variable length input can only accept input' 'Symbols either as positional or keyword arguments, not both') s = _symbol_cls(sym_handle) if args: s._compose(*args, name=name) elif kwargs: s._compose(name=name, **kwargs) else: s._compose(name=name) return s
danithaca/mxnet
python/mxnet/_ctypes/symbol.py
Python
apache-2.0
4,211
"""Order a duplicate block storage volume.""" # :license: MIT, see LICENSE for more details. import click import SoftLayer from SoftLayer.CLI import environment from SoftLayer.CLI import exceptions CONTEXT_SETTINGS = {'token_normalize_func': lambda x: x.upper()} @click.command(context_settings=CONTEXT_SETTINGS) @click.argument('origin-volume-id') @click.option('--origin-snapshot-id', '-o', type=int, help="ID of an origin volume snapshot to use for duplcation.") @click.option('--duplicate-size', '-c', type=int, help='Size of duplicate block volume in GB. ' '***If no size is specified, the size of ' 'the origin volume will be used.***\n' 'Potential Sizes: [20, 40, 80, 100, 250, ' '500, 1000, 2000, 4000, 8000, 12000] ' 'Minimum: [the size of the origin volume]') @click.option('--duplicate-iops', '-i', type=int, help='Performance Storage IOPS, between 100 and 6000 in ' 'multiples of 100 [only used for performance volumes] ' '***If no IOPS value is specified, the IOPS value of the ' 'origin volume will be used.***\n' 'Requirements: [If IOPS/GB for the origin volume is less ' 'than 0.3, IOPS/GB for the duplicate must also be less ' 'than 0.3. If IOPS/GB for the origin volume is greater ' 'than or equal to 0.3, IOPS/GB for the duplicate must ' 'also be greater than or equal to 0.3.]') @click.option('--duplicate-tier', '-t', help='Endurance Storage Tier (IOPS per GB) [only used for ' 'endurance volumes] ***If no tier is specified, the tier ' 'of the origin volume will be used.***\n' 'Requirements: [If IOPS/GB for the origin volume is 0.25, ' 'IOPS/GB for the duplicate must also be 0.25. If IOPS/GB ' 'for the origin volume is greater than 0.25, IOPS/GB ' 'for the duplicate must also be greater than 0.25.]', type=click.Choice(['0.25', '2', '4', '10'])) @click.option('--duplicate-snapshot-size', '-s', type=int, help='The size of snapshot space to order for the duplicate. ' '***If no snapshot space size is specified, the snapshot ' 'space size of the origin block volume will be used.***\n' 'Input "0" for this parameter to order a duplicate volume ' 'with no snapshot space.') @click.option('--billing', type=click.Choice(['hourly', 'monthly']), default='monthly', help="Optional parameter for Billing rate (default to monthly)") @click.option('--dependent-duplicate', type=click.BOOL, default=False, show_default=True, help='Whether or not this duplicate will be a dependent duplicate ' 'of the origin volume.') @environment.pass_env def cli(env, origin_volume_id, origin_snapshot_id, duplicate_size, duplicate_iops, duplicate_tier, duplicate_snapshot_size, billing, dependent_duplicate): """Order a duplicate block storage volume.""" block_manager = SoftLayer.BlockStorageManager(env.client) hourly_billing_flag = False if billing.lower() == "hourly": hourly_billing_flag = True if duplicate_tier is not None: duplicate_tier = float(duplicate_tier) try: order = block_manager.order_duplicate_volume( origin_volume_id, origin_snapshot_id=origin_snapshot_id, duplicate_size=duplicate_size, duplicate_iops=duplicate_iops, duplicate_tier_level=duplicate_tier, duplicate_snapshot_size=duplicate_snapshot_size, hourly_billing_flag=hourly_billing_flag, dependent_duplicate=dependent_duplicate ) except ValueError as ex: raise exceptions.ArgumentError(str(ex)) if 'placedOrder' in order.keys(): click.echo("Order #{0} placed successfully!".format( order['placedOrder']['id'])) for item in order['placedOrder']['items']: click.echo(" > %s" % item['description']) else: click.echo("Order could not be placed! Please verify your options " + "and try again.")
softlayer/softlayer-python
SoftLayer/CLI/block/duplicate.py
Python
mit
4,531
#!/usr/bin/env python # -*- coding: utf-8 -*- import argparse import os import sys parser = argparse.ArgumentParser(description='Calibre Web is a web app' ' providing a interface for browsing, reading and downloading eBooks\n', prog='cps.py') parser.add_argument('-p', metavar='path', help='path and name to settings db, e.g. /opt/cw.db') parser.add_argument('-g', metavar='path', help='path and name to gdrive db, e.g. /opt/gd.db') parser.add_argument('-c', metavar='path', help='path and name to SSL certfile, e.g. /opt/test.cert, works only in combination with keyfile') parser.add_argument('-k', metavar='path', help='path and name to SSL keyfile, e.g. /opt/test.key, works only in combination with certfile') args = parser.parse_args() generalPath = os.path.normpath(os.getenv("CALIBRE_DBPATH", os.path.dirname(os.path.realpath(__file__)) + os.sep + ".." + os.sep)) if args.p: settingspath = args.p else: settingspath = os.path.join(generalPath, "app.db") if args.g: gdpath = args.g else: gdpath = os.path.join(generalPath, "gdrive.db") certfilepath = None keyfilepath = None if args.c: if os.path.isfile(args.c): certfilepath = args.c else: print("Certfilepath is invalid. Exiting...") sys.exit(1) if args.c is "": certfilepath = "" if args.k: if os.path.isfile(args.k): keyfilepath = args.k else: print("Keyfilepath is invalid. Exiting...") sys.exit(1) if args.k is "": keyfilepath = ""
issmirnov/calibre-web
cps/cli.py
Python
gpl-3.0
1,534
""" Tests for the Split Testing Module """ import ddt import lxml from mock import Mock, patch from fs.memoryfs import MemoryFS from xmodule.partitions.tests.test_partitions import StaticPartitionService, PartitionTestCase, MockUserPartitionScheme from xmodule.tests.xml import factories as xml from xmodule.tests.xml import XModuleXmlImportTest from xmodule.tests import get_test_system from xmodule.x_module import AUTHOR_VIEW, STUDENT_VIEW from xmodule.validation import StudioValidationMessage from xmodule.split_test_module import SplitTestDescriptor, SplitTestFields, get_split_user_partitions from xmodule.partitions.partitions import Group, UserPartition class SplitTestModuleFactory(xml.XmlImportFactory): """ Factory for generating SplitTestModules for testing purposes """ tag = 'split_test' class SplitTestUtilitiesTest(PartitionTestCase): """ Tests for utility methods related to split_test module. """ def test_split_user_partitions(self): """ Tests the get_split_user_partitions helper method. """ first_random_partition = UserPartition( 0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')], self.random_scheme ) second_random_partition = UserPartition( 0, 'second_partition', 'Second Partition', [Group("4", 'zeta'), Group("5", 'omega')], self.random_scheme ) all_partitions = [ first_random_partition, # Only UserPartitions with scheme "random" will be returned as available options. UserPartition( 1, 'non_random_partition', 'Will Not Be Returned', [Group("1", 'apple'), Group("2", 'banana')], self.non_random_scheme ), second_random_partition ] self.assertEqual( [first_random_partition, second_random_partition], get_split_user_partitions(all_partitions) ) class SplitTestModuleTest(XModuleXmlImportTest, PartitionTestCase): """ Base class for all split_module tests. """ def setUp(self): super(SplitTestModuleTest, self).setUp() self.course_id = 'test_org/test_course_number/test_run' # construct module course = xml.CourseFactory.build() sequence = xml.SequenceFactory.build(parent=course) split_test = SplitTestModuleFactory( parent=sequence, attribs={ 'user_partition_id': '0', 'group_id_to_child': '{"0": "i4x://edX/xml_test_course/html/split_test_cond0", "1": "i4x://edX/xml_test_course/html/split_test_cond1"}' } ) xml.HtmlFactory(parent=split_test, url_name='split_test_cond0', text='HTML FOR GROUP 0') xml.HtmlFactory(parent=split_test, url_name='split_test_cond1', text='HTML FOR GROUP 1') self.course = self.process_xml(course) self.course_sequence = self.course.get_children()[0] self.module_system = get_test_system() self.module_system.descriptor_runtime = self.course._runtime # pylint: disable=protected-access self.course.runtime.export_fs = MemoryFS() user = Mock(username='ma', email='ma@edx.org', is_staff=False, is_active=True) self.partitions_service = StaticPartitionService( [ self.user_partition, UserPartition( 1, 'second_partition', 'Second Partition', [Group("0", 'abel'), Group("1", 'baker'), Group("2", 'charlie')], MockUserPartitionScheme() ) ], user=user, course_id=self.course.id, track_function=Mock(name='track_function'), ) self.module_system._services['partitions'] = self.partitions_service # pylint: disable=protected-access self.split_test_module = self.course_sequence.get_children()[0] self.split_test_module.bind_for_student( self.module_system, self.split_test_module._field_data, # pylint: disable=protected-access user.id ) @ddt.ddt class SplitTestModuleLMSTest(SplitTestModuleTest): """ Test the split test module """ @ddt.data((0, 'split_test_cond0'), (1, 'split_test_cond1')) @ddt.unpack def test_child(self, user_tag, child_url_name): self.user_partition.scheme.current_group = self.user_partition.groups[user_tag] # pylint: disable=no-member self.assertEquals(self.split_test_module.child_descriptor.url_name, child_url_name) @ddt.data((0, 'HTML FOR GROUP 0'), (1, 'HTML FOR GROUP 1')) @ddt.unpack def test_get_html(self, user_tag, child_content): self.user_partition.scheme.current_group = self.user_partition.groups[user_tag] # pylint: disable=no-member self.assertIn( child_content, self.module_system.render(self.split_test_module, STUDENT_VIEW).content ) @ddt.data(0, 1) def test_child_missing_tag_value(self, _user_tag): # If user_tag has a missing value, we should still get back a valid child url self.assertIn(self.split_test_module.child_descriptor.url_name, ['split_test_cond0', 'split_test_cond1']) @ddt.data(100, 200, 300, 400, 500, 600, 700, 800, 900, 1000) def test_child_persist_new_tag_value_when_tag_missing(self, _user_tag): # If a user_tag has a missing value, a group should be saved/persisted for that user. # So, we check that we get the same url_name when we call on the url_name twice. # We run the test ten times so that, if our storage is failing, we'll be most likely to notice it. self.assertEquals(self.split_test_module.child_descriptor.url_name, self.split_test_module.child_descriptor.url_name) # Patch the definition_to_xml for the html children. @patch('xmodule.html_module.HtmlDescriptor.definition_to_xml') def test_export_import_round_trip(self, def_to_xml): # The HtmlDescriptor definition_to_xml tries to write to the filesystem # before returning an xml object. Patch this to just return the xml. def_to_xml.return_value = lxml.etree.Element('html') # Mock out the process_xml # Expect it to return a child descriptor for the SplitTestDescriptor when called. self.module_system.process_xml = Mock() # Write out the xml. xml_obj = self.split_test_module.definition_to_xml(MemoryFS()) self.assertEquals(xml_obj.get('user_partition_id'), '0') self.assertIsNotNone(xml_obj.get('group_id_to_child')) # Read the xml back in. fields, children = SplitTestDescriptor.definition_from_xml(xml_obj, self.module_system) self.assertEquals(fields.get('user_partition_id'), '0') self.assertIsNotNone(fields.get('group_id_to_child')) self.assertEquals(len(children), 2) class SplitTestModuleStudioTest(SplitTestModuleTest): """ Unit tests for how split test interacts with Studio. """ @patch('xmodule.split_test_module.SplitTestDescriptor.group_configuration_url', return_value='http://example.com') def test_render_author_view(self, group_configuration_url): """ Test the rendering of the Studio author view. """ def create_studio_context(root_xblock): """ Context for rendering the studio "author_view". """ return { 'reorderable_items': set(), 'root_xblock': root_xblock, } # The split_test module should render both its groups when it is the root context = create_studio_context(self.split_test_module) html = self.module_system.render(self.split_test_module, AUTHOR_VIEW, context).content self.assertIn('HTML FOR GROUP 0', html) self.assertIn('HTML FOR GROUP 1', html) # When rendering as a child, it shouldn't render either of its groups context = create_studio_context(self.course_sequence) html = self.module_system.render(self.split_test_module, AUTHOR_VIEW, context).content self.assertNotIn('HTML FOR GROUP 0', html) self.assertNotIn('HTML FOR GROUP 1', html) # The "Create Missing Groups" button should be rendered when groups are missing context = create_studio_context(self.split_test_module) self.split_test_module.user_partitions = [ UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta'), Group("2", 'gamma')]) ] html = self.module_system.render(self.split_test_module, AUTHOR_VIEW, context).content self.assertIn('HTML FOR GROUP 0', html) self.assertIn('HTML FOR GROUP 1', html) def test_group_configuration_url(self): """ Test creation of correct Group Configuration URL. """ mocked_course = Mock(advanced_modules=['split_test']) mocked_modulestore = Mock() mocked_modulestore.get_course.return_value = mocked_course self.split_test_module.system.modulestore = mocked_modulestore self.split_test_module.user_partitions = [ UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')]) ] expected_url = '/group_configurations/edX/xml_test_course/101#0' self.assertEqual(expected_url, self.split_test_module.group_configuration_url) def test_editable_settings(self): """ Test the setting information passed back from editable_metadata_fields. """ editable_metadata_fields = self.split_test_module.editable_metadata_fields self.assertIn(SplitTestDescriptor.display_name.name, editable_metadata_fields) self.assertNotIn(SplitTestDescriptor.due.name, editable_metadata_fields) self.assertNotIn(SplitTestDescriptor.user_partitions.name, editable_metadata_fields) # user_partition_id will always appear in editable_metadata_settings, regardless # of the selected value. self.assertIn(SplitTestDescriptor.user_partition_id.name, editable_metadata_fields) def test_non_editable_settings(self): """ Test the settings that are marked as "non-editable". """ non_editable_metadata_fields = self.split_test_module.non_editable_metadata_fields self.assertIn(SplitTestDescriptor.due, non_editable_metadata_fields) self.assertIn(SplitTestDescriptor.user_partitions, non_editable_metadata_fields) self.assertNotIn(SplitTestDescriptor.display_name, non_editable_metadata_fields) def test_available_partitions(self): """ Tests that the available partitions are populated correctly when editable_metadata_fields are called """ self.assertEqual([], SplitTestDescriptor.user_partition_id.values) # user_partitions is empty, only the "Not Selected" item will appear. self.split_test_module.user_partition_id = SplitTestFields.no_partition_selected['value'] self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement partitions = SplitTestDescriptor.user_partition_id.values self.assertEqual(1, len(partitions)) self.assertEqual(SplitTestFields.no_partition_selected['value'], partitions[0]['value']) # Populate user_partitions and call editable_metadata_fields again self.split_test_module.user_partitions = [ UserPartition( 0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')], self.random_scheme ), # Only UserPartitions with scheme "random" will be returned as available options. UserPartition( 1, 'non_random_partition', 'Will Not Be Returned', [Group("1", 'apple'), Group("2", 'banana')], self.non_random_scheme ) ] self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement partitions = SplitTestDescriptor.user_partition_id.values self.assertEqual(2, len(partitions)) self.assertEqual(SplitTestFields.no_partition_selected['value'], partitions[0]['value']) self.assertEqual(0, partitions[1]['value']) self.assertEqual("first_partition", partitions[1]['display_name']) # Try again with a selected partition and verify that there is no option for "No Selection" self.split_test_module.user_partition_id = 0 self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement partitions = SplitTestDescriptor.user_partition_id.values self.assertEqual(1, len(partitions)) self.assertEqual(0, partitions[0]['value']) self.assertEqual("first_partition", partitions[0]['display_name']) # Finally try again with an invalid selected partition and verify that "No Selection" is an option self.split_test_module.user_partition_id = 999 self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement partitions = SplitTestDescriptor.user_partition_id.values self.assertEqual(2, len(partitions)) self.assertEqual(SplitTestFields.no_partition_selected['value'], partitions[0]['value']) self.assertEqual(0, partitions[1]['value']) self.assertEqual("first_partition", partitions[1]['display_name']) def test_active_and_inactive_children(self): """ Tests the active and inactive children returned for different split test configurations. """ split_test_module = self.split_test_module children = split_test_module.get_children() # Verify that a split test has no active children if it has no specified user partition. split_test_module.user_partition_id = -1 [active_children, inactive_children] = split_test_module.active_and_inactive_children() self.assertEqual(active_children, []) self.assertEqual(inactive_children, children) # Verify that all the children are returned as active for a correctly configured split_test split_test_module.user_partition_id = 0 split_test_module.user_partitions = [ UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')]) ] [active_children, inactive_children] = split_test_module.active_and_inactive_children() self.assertEqual(active_children, children) self.assertEqual(inactive_children, []) # Verify that a split_test does not return inactive children in the active children self.split_test_module.user_partitions = [ UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha')]) ] [active_children, inactive_children] = split_test_module.active_and_inactive_children() self.assertEqual(active_children, [children[0]]) self.assertEqual(inactive_children, [children[1]]) # Verify that a split_test ignores misconfigured children self.split_test_module.user_partitions = [ UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("2", 'gamma')]) ] [active_children, inactive_children] = split_test_module.active_and_inactive_children() self.assertEqual(active_children, [children[0]]) self.assertEqual(inactive_children, [children[1]]) # Verify that a split_test referring to a non-existent user partition has no active children self.split_test_module.user_partition_id = 2 [active_children, inactive_children] = split_test_module.active_and_inactive_children() self.assertEqual(active_children, []) self.assertEqual(inactive_children, children) def test_validation_messages(self): """ Test the validation messages produced for different split test configurations. """ split_test_module = self.split_test_module def verify_validation_message(message, expected_message, expected_message_type, expected_action_class=None, expected_action_label=None, expected_action_runtime_event=None): """ Verify that the validation message has the expected validation message and type. """ self.assertEqual(message.text, expected_message) self.assertEqual(message.type, expected_message_type) if expected_action_class: self.assertEqual(message.action_class, expected_action_class) else: self.assertFalse(hasattr(message, "action_class")) if expected_action_label: self.assertEqual(message.action_label, expected_action_label) else: self.assertFalse(hasattr(message, "action_label")) if expected_action_runtime_event: self.assertEqual(message.action_runtime_event, expected_action_runtime_event) else: self.assertFalse(hasattr(message, "action_runtime_event")) def verify_summary_message(general_validation, expected_message, expected_message_type): """ Verify that the general validation message has the expected validation message and type. """ self.assertEqual(general_validation.text, expected_message) self.assertEqual(general_validation.type, expected_message_type) # Verify the messages for an unconfigured user partition split_test_module.user_partition_id = -1 validation = split_test_module.validate() self.assertEqual(len(validation.messages), 0) verify_validation_message( validation.summary, u"The experiment is not associated with a group configuration.", StudioValidationMessage.NOT_CONFIGURED, 'edit-button', u"Select a Group Configuration", ) # Verify the messages for a correctly configured split_test split_test_module.user_partition_id = 0 split_test_module.user_partitions = [ UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')]) ] validation = split_test_module.validate_split_test() self.assertTrue(validation) self.assertIsNone(split_test_module.general_validation_message(), None) # Verify the messages for a split test with too few groups split_test_module.user_partitions = [ UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta'), Group("2", 'gamma')]) ] validation = split_test_module.validate() self.assertEqual(len(validation.messages), 1) verify_validation_message( validation.messages[0], u"The experiment does not contain all of the groups in the configuration.", StudioValidationMessage.ERROR, expected_action_runtime_event='add-missing-groups', expected_action_label=u"Add Missing Groups" ) verify_summary_message( validation.summary, u"This content experiment has issues that affect content visibility.", StudioValidationMessage.ERROR ) # Verify the messages for a split test with children that are not associated with any group split_test_module.user_partitions = [ UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha')]) ] validation = split_test_module.validate() self.assertEqual(len(validation.messages), 1) verify_validation_message( validation.messages[0], u"The experiment has an inactive group. Move content into active groups, then delete the inactive group.", StudioValidationMessage.WARNING ) verify_summary_message( validation.summary, u"This content experiment has issues that affect content visibility.", StudioValidationMessage.WARNING ) # Verify the messages for a split test with both missing and inactive children split_test_module.user_partitions = [ UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("2", 'gamma')]) ] validation = split_test_module.validate() self.assertEqual(len(validation.messages), 2) verify_validation_message( validation.messages[0], u"The experiment does not contain all of the groups in the configuration.", StudioValidationMessage.ERROR, expected_action_runtime_event='add-missing-groups', expected_action_label=u"Add Missing Groups" ) verify_validation_message( validation.messages[1], u"The experiment has an inactive group. Move content into active groups, then delete the inactive group.", StudioValidationMessage.WARNING ) # With two messages of type error and warning priority given to error. verify_summary_message( validation.summary, u"This content experiment has issues that affect content visibility.", StudioValidationMessage.ERROR ) # Verify the messages for a split test referring to a non-existent user partition split_test_module.user_partition_id = 2 validation = split_test_module.validate() self.assertEqual(len(validation.messages), 1) verify_validation_message( validation.messages[0], u"The experiment uses a deleted group configuration. " u"Select a valid group configuration or delete this experiment.", StudioValidationMessage.ERROR ) verify_summary_message( validation.summary, u"This content experiment has issues that affect content visibility.", StudioValidationMessage.ERROR ) # Verify the message for a split test referring to a non-random user partition split_test_module.user_partitions = [ UserPartition( 10, 'incorrect_partition', 'Non Random Partition', [Group("0", 'alpha'), Group("2", 'gamma')], scheme=self.non_random_scheme ) ] split_test_module.user_partition_id = 10 validation = split_test_module.validate() self.assertEqual(len(validation.messages), 1) verify_validation_message( validation.messages[0], u"The experiment uses a group configuration that is not supported for experiments. " u"Select a valid group configuration or delete this experiment.", StudioValidationMessage.ERROR ) verify_summary_message( validation.summary, u"This content experiment has issues that affect content visibility.", StudioValidationMessage.ERROR )
beni55/edx-platform
common/lib/xmodule/xmodule/tests/test_split_test_module.py
Python
agpl-3.0
23,315
from _common import * _truths = ['nprongs'] truths = {_truths[x]:x for x in xrange(len(_truths))} def get_dims(coll): coll.objects['train']['particles'].load(memory=False) dims = coll.objects['train']['particles'].data.data.shape dims = (dims[0], dims[1], dims[2]-1) # need to exclude the last column if config.limit is not None and config.limit < dims[1]: dims = (dims[0], config.limit, dims[2]) return (None, dims[1], 4), (None, dims[1], dims[2]-4) def make_coll(fpath): coll = obj.GenCollection() coll.add_categories(['singletons','particles'], fpath) return coll def generate(collections, partition='train', batch=32, repartition=True, mask=False, decorr_mass=False, decorr_pt=False, learn_mass=False, learn_pt=False, normalize=False): small_batch = max(1, int(batch / len(collections))) generators = {c:c.generator(components=['singletons', 'particles', c.weight,'truth'], partition=partition, batch=small_batch, repartition=repartition, normalize=normalize) for c in collections} msd_index = config.gen_singletons['msd'] pt_index = config.gen_singletons['pt'] msd_norm_factor = 1. / config.max_mass pt_norm_factor = 1. / (config.max_pt - config.min_pt) def xform_mass(x): binned = (np.minimum(x, config.max_mass) * msd_norm_factor * (config.n_decorr_bins - 1)).astype(np.int) onehot = np_utils.to_categorical(binned, config.n_decorr_bins) return onehot def xform_pt(x): binned = (np.minimum(x-config.min_pt, config.max_pt-config.min_pt) * pt_norm_factor * (config.n_decorr_bins - 1) ).astype(np.int) onehot = np_utils.to_categorical(binned, config.n_decorr_bins) return onehot while True: inputs = [] outputs = [] weights = [] for c in collections: data = {k:v.data for k,v in next(generators[c]).iteritems()} # the last element of the particle feature vector is really truth info - do not train! if config.limit: x = data['particles'][:,:config.limit,:-1] else: x = data['particles'][:,:,:-1] i = [x[:,:,:4], x[:,:,4:]] if learn_mass: i.append(data['singletons'][:,msd_index] * msd_norm_factor) if learn_pt: i.append((data['singletons'][:,pt_index] - config.min_pt) * pt_norm_factor) inputs.append(i) nprongs = np_utils.to_categorical( np.clip( data['truth'][:,truths['nprongs']].astype(np.int), 0, config.n_truth ), config.n_truth ) o = [nprongs] w = [data[c.weight]] if decorr_mass: mass = xform_mass(data['singletons'][:,msd_index]) o.append(mass) w.append(w[0] * nprongs[:,config.adversary_mask]) if decorr_pt: pt = xform_pt(data['singletons'][:,pt_index]) o.append(pt) w.append(w[0] * nprongs[:,config.adversary_mask]) outputs.append(o) weights.append(w) merged_inputs = [] NINPUTS = 2 + int(learn_mass) + int(learn_pt) for j in xrange(NINPUTS): merged_inputs.append(np.concatenate([v[j] for v in inputs], axis=0)) merged_outputs = [] merged_weights = [] NOUTPUTS = 1 + int(decorr_pt) + int(decorr_mass) for j in xrange(NOUTPUTS): merged_outputs.append(np.concatenate([v[j] for v in outputs], axis=0)) merged_weights.append(np.concatenate([v[j] for v in weights], axis=0)) if config.weights_scale is not None: for j in xrange(NOUTPUTS): merged_weights[j] *= np.dot(merged_outputs[0], config.weights_scale) yield merged_inputs, merged_outputs, merged_weights
sidnarayanan/BAdNet
python/subtlenet/generators/gen_4vec.py
Python
mit
4,220
import random def seed(z): """ Sets all seeds used to generate random number streams. Currently contains: - random library Previously contained: - numpy """ random.seed(z) def random_choice(array, probs=None): """ This function takes in an array of values to make a choice from, and an pdf corresponding to those values. It returns a random choice from that array, using the probs as weights. """ # If no pdf provided, assume uniform dist: if probs == None: index = int(random.random() * len(array)) return array[index] # A common case, guaranteed to reach the Exit node; # No need to sample for this: if (set(probs[:-1]) == set([0.0])) and (probs[-1] == 1.0): return array[-1] # Sample a random value from using pdf rdm_num = random.random() i, p = 0, probs[0] while rdm_num > p: i += 1 p += probs[i] return array[i] def truncated_normal(mean, sd): """ Sample from a Normal distribution, with mean and standard deviation (sd). This truncates the distribution at 0 (lower bound of 0). If samples less than 0 are sampled, they are resampled until a positive value is sampled. """ sample = random.normalvariate(mean, sd) while sample <= 0.0: sample = random.normalvariate(mean, sd) return sample def flatten_list(list_of_lists): flat = [] for a_list in list_of_lists: flat += a_list return flat def no_routing(ind): """ Process-based routing fucntion that sends the customer straight to exit node. It is a placeholder for when NoArrivals is used. """ return []
CiwPython/Ciw
ciw/auxiliary.py
Python
mit
1,525
"""Enum values for HSA Note that Python namespacing could be used to avoid the C-like prefixing, but we choose to keep the same names as found in the C enums, in order to match the documentation. """ import ctypes HSA_LARGE_MODEL = ctypes.sizeof(ctypes.c_void_p) == 8 # hsa_status_t # The function has been executed successfully. HSA_STATUS_SUCCESS = 0x0 # A traversal over a list of elements has been interrupted by the # application before completing. HSA_STATUS_INFO_BREAK = 0x1 # A generic error has occurred. HSA_STATUS_ERROR = 0x1000 # One of the actual arguments does not meet a precondition stated in the # documentation of the corresponding formal argument. HSA_STATUS_ERROR_INVALID_ARGUMENT = 0x1001 # The requested queue creation is not valid. HSA_STATUS_ERROR_INVALID_QUEUE_CREATION = 0x1002 # The requested allocation is not valid. HSA_STATUS_ERROR_INVALID_ALLOCATION = 0x1003 # The agent is invalid. HSA_STATUS_ERROR_INVALID_AGENT = 0x1004 # The memory region is invalid. HSA_STATUS_ERROR_INVALID_REGION = 0x1005 # The signal is invalid. HSA_STATUS_ERROR_INVALID_SIGNAL = 0x1006 # The queue is invalid. HSA_STATUS_ERROR_INVALID_QUEUE = 0x1007 # The HSA runtime failed to allocate the necessary resources. This error # may also occur when the HSA runtime needs to spawn threads or create # internal OS-specific events. HSA_STATUS_ERROR_OUT_OF_RESOURCES = 0x1008 # The AQL packet is malformed. HSA_STATUS_ERROR_INVALID_PACKET_FORMAT = 0x1009 # An error has been detected while releasing a resource. HSA_STATUS_ERROR_RESOURCE_FREE = 0x100A # An API other than ::hsa_init has been invoked while the reference count # of the HSA runtime is 0. HSA_STATUS_ERROR_NOT_INITIALIZED = 0x100B # The maximum reference count for the object has been reached. HSA_STATUS_ERROR_REFCOUNT_OVERFLOW = 0x100C # The arguments passed to a functions are not compatible. HSA_STATUS_ERROR_INCOMPATIBLE_ARGUMENTS = 0x100D # The index is invalid.\ HSA_STATUS_ERROR_INVALID_INDEX = 0x100E # The instruction set architecture is invalid. HSA_STATUS_ERROR_INVALID_ISA = 0x100F, # The instruction set architecture name is invalid. HSA_STATUS_ERROR_INVALID_ISA_NAME = 0x1017 # The code object is invalid. HSA_STATUS_ERROR_INVALID_CODE_OBJECT = 0x1010 # The executable is invalid. HSA_STATUS_ERROR_INVALID_EXECUTABLE = 0x1011 # The executable is frozen. HSA_STATUS_ERROR_FROZEN_EXECUTABLE = 0x1012 # There is no symbol with the given name. HSA_STATUS_ERROR_INVALID_SYMBOL_NAME = 0x1013 # The variable is already defined. HSA_STATUS_ERROR_VARIABLE_ALREADY_DEFINED = 0x1014 # The variable is undefined. HSA_STATUS_ERROR_VARIABLE_UNDEFINED = 0x1015 # An HSAIL operation resulted on a hardware exception. HSA_STATUS_ERROR_EXCEPTION = 0x1016 # hsa_packet_type_t HSA_PACKET_TYPE_VENDOR_SPECIFIC = 0 # The packet has been processed in the past, but has not been reassigned to # the packet processor. A packet processor must not process a packet of this # type. All queues support this packet type. HSA_PACKET_TYPE_INVALID = 1 # Packet used by agents for dispatching jobs to kernel agents. Not all # queues support packets of this type (see ::hsa_queue_feature_t). HSA_PACKET_TYPE_KERNEL_DISPATCH = 2 # Packet used by agents to delay processing of subsequent packets, and to # express complex dependencies between multiple packets. All queues support # this packet type. HSA_PACKET_TYPE_BARRIER_AND = 3 # Packet used by agents for dispatching jobs to agents. Not all # queues support packets of this type (see ::hsa_queue_feature_t). HSA_PACKET_TYPE_AGENT_DISPATCH = 4 # Packet used by agents to delay processing of subsequent packets, and to # express complex dependencies between multiple packets. All queues support # this packet type. HSA_PACKET_TYPE_BARRIER_OR = 5 # hsa_queue_type_t HSA_QUEUE_TYPE_MULTI = 0 HSA_QUEUE_TYPE_SINGLE = 1 # hsa_queue_feature_t HSA_QUEUE_FEATURE_KERNEL_DISPATCH = 1 HSA_QUEUE_FEATURE_AGENT_DISPATCH = 2 # hsa_fence_scope_t HSA_FENCE_SCOPE_NONE = 0 HSA_FENCE_SCOPE_AGENT = 1 HSA_FENCE_SCOPE_SYSTEM = 2 # hsa_wait_state_t # The application thread may be rescheduled while waiting on the signal. HSA_WAIT_STATE_BLOCKED = 0 # The application thread stays active while waiting on a signal. HSA_WAIT_STATE_ACTIVE = 1 # hsa_signal_condition_t HSA_SIGNAL_CONDITION_EQ = 0 HSA_SIGNAL_CONDITION_NE = 1 HSA_SIGNAL_CONDITION_LT = 2 HSA_SIGNAL_CONDITION_GTE = 3 # # hsa_dim_t # HSA_DIM_X = 0 # HSA_DIM_Y = 1 # HSA_DIM_Z = 2 # hsa_extension_t HSA_EXTENSION_FINALIZER = 0 HSA_EXTENSION_IMAGES = 1 HSA_EXTENSION_AMD_PROFILER = 2 # hsa_agent_feature_t HSA_AGENT_FEATURE_KERNEL_DISPATCH = 1 HSA_AGENT_FEATURE_AGENT_DISPATCH = 2 # hsa_device_type_t HSA_DEVICE_TYPE_CPU = 0 HSA_DEVICE_TYPE_GPU = 1 HSA_DEVICE_TYPE_DSP = 2 # hsa_system_info_t HSA_SYSTEM_INFO_VERSION_MAJOR = 0 HSA_SYSTEM_INFO_VERSION_MINOR = 1 HSA_SYSTEM_INFO_TIMESTAMP = 2 HSA_SYSTEM_INFO_TIMESTAMP_FREQUENCY = 3 HSA_SYSTEM_INFO_SIGNAL_MAX_WAIT = 4 HSA_SYSTEM_INFO_ENDIANNESS = 5 HSA_SYSTEM_INFO_MACHINE_MODEL = 6 HSA_SYSTEM_INFO_EXTENSIONS = 7 # hsa_agent_info_t # Agent name. The type of this attribute is a NUL-terminated char[64]. If # the name of the agent uses less than 63 characters, the rest of the # array must be filled with NULs. HSA_AGENT_INFO_NAME = 0 # Name of vendor. The type of this attribute is a NUL-terminated char[64]. If # the name of the vendor uses less than 63 characters, the rest of the array # must be filled with NULs. HSA_AGENT_INFO_VENDOR_NAME = 1 # Agent capability. The type of this attribute is ::hsa_agent_feature_t. HSA_AGENT_INFO_FEATURE = 2 # Machine model supported by the agent. The type of this attribute is # ::hsa_machine_model_t. HSA_AGENT_INFO_MACHINE_MODEL = 3 # Profile supported by the agent. The type of this attribute is # ::hsa_profile_t. HSA_AGENT_INFO_PROFILE = 4 # Default floating-point rounding mode. The type of this attribute is # ::hsa_default_float_rounding_mode_t, but the value # ::HSA_DEFAULT_FLOAT_ROUNDING_MODE_DEFAULT is not allowed. HSA_AGENT_INFO_DEFAULT_FLOAT_ROUNDING_MODE = 5 # Default floating-point rounding modes supported by the agent in the Base # profile. The type of this attribute is a mask of # ::hsa_default_float_rounding_mode_t. The default floating-point rounding # mode (::HSA_AGENT_INFO_DEFAULT_FLOAT_ROUNDING_MODE) bit must not be set. HSA_AGENT_INFO_BASE_PROFILE_DEFAULT_FLOAT_ROUNDING_MODES = 23 # Flag indicating that the f16 HSAIL operation is at least as fast as the # f32 operation in the current agent. The value of this attribute is # undefined if the agent is not a kernel agent. The type of this # attribute is bool. HSA_AGENT_INFO_FAST_F16_OPERATION = 24 # Number of work-items in a wavefront. Must be a power of 2 in the range # [1,256]. The value of this attribute is undefined if the agent is not # a kernel agent. The type of this attribute is uint32_t. HSA_AGENT_INFO_WAVEFRONT_SIZE = 6 # Maximum number of work-items of each dimension of a work-group. Each # maximum must be greater than 0. No maximum can exceed the value of # ::HSA_AGENT_INFO_WORKGROUP_MAX_SIZE. The value of this attribute is # undefined if the agent is not a kernel agent. The type of this # attribute is uint16_t[3]. HSA_AGENT_INFO_WORKGROUP_MAX_DIM = 7 # Maximum total number of work-items in a work-group. The value of this # attribute is undefined if the agent is not a kernel agent. The type # of this attribute is uint32_t. HSA_AGENT_INFO_WORKGROUP_MAX_SIZE = 8 # Maximum number of work-items of each dimension of a grid. Each maximum must # be greater than 0, and must not be smaller than the corresponding value in # ::HSA_AGENT_INFO_WORKGROUP_MAX_DIM. No maximum can exceed the value of # ::HSA_AGENT_INFO_GRID_MAX_SIZE. The value of this attribute is undefined if # the agent is not a kernel agent. The type of this attribute is # ::hsa_dim3_t. HSA_AGENT_INFO_GRID_MAX_DIM = 9 # Maximum total number of work-items in a grid. The value of this attribute # is undefined if the agent is not a kernel agent. The type of this # attribute is uint32_t. HSA_AGENT_INFO_GRID_MAX_SIZE = 10 # Maximum number of fbarriers per work-group. Must be at least 32. The value # of this attribute is undefined if the agent is not a kernel agent. The # type of this attribute is uint32_t. HSA_AGENT_INFO_FBARRIER_MAX_SIZE = 11 # Maximum number of queues that can be active (created but not destroyed) at # one time in the agent. The type of this attribute is uint32_t. HSA_AGENT_INFO_QUEUES_MAX = 12 # Minimum number of packets that a queue created in the agent # can hold. Must be a power of 2 greater than 0. Must not exceed # the value of ::HSA_AGENT_INFO_QUEUE_MAX_SIZE. The type of this # attribute is uint32_t. HSA_AGENT_INFO_QUEUE_MIN_SIZE = 13 # Maximum number of packets that a queue created in the agent can # hold. Must be a power of 2 greater than 0. The type of this attribute # is uint32_t. HSA_AGENT_INFO_QUEUE_MAX_SIZE = 14 # Type of a queue created in the agent. The type of this attribute is # ::hsa_queue_type_t. HSA_AGENT_INFO_QUEUE_TYPE = 15 # Identifier of the NUMA node associated with the agent. The type of this # attribute is uint32_t. HSA_AGENT_INFO_NODE = 16 # Type of hardware device associated with the agent. The type of this # attribute is ::hsa_device_type_t. HSA_AGENT_INFO_DEVICE = 17 # Array of data cache sizes (L1..L4). Each size is expressed in bytes. A size # of 0 for a particular level indicates that there is no cache information # for that level. The type of this attribute is uint32_t[4]. HSA_AGENT_INFO_CACHE_SIZE = 18 # Instruction set architecture of the agent. The type of this attribute # is ::hsa_isa_t. HSA_AGENT_INFO_ISA = 19 # Bit-mask indicating which extensions are supported by the agent. An # extension with an ID of @p i is supported if the bit at position @p i is # set. The type of this attribute is uint8_t[128]. HSA_AGENT_INFO_EXTENSIONS = 20 # Major version of the HSA runtime specification supported by the # agent. The type of this attribute is uint16_t. HSA_AGENT_INFO_VERSION_MAJOR = 21 # Minor version of the HSA runtime specification supported by the # agent. The type of this attribute is uint16_t. HSA_AGENT_INFO_VERSION_MINOR = 22 # hsa_region_segment_t # Global segment. Used to hold data that is shared by all agents. HSA_REGION_SEGMENT_GLOBAL = 0 # Read-only segment. Used to hold data that remains constant during the # execution of a kernel. HSA_REGION_SEGMENT_READONLY = 1 # Private segment. Used to hold data that is local to a single work-item. HSA_REGION_SEGMENT_PRIVATE = 2 # Group segment. Used to hold data that is shared by the work-items of a # work-group. HSA_REGION_SEGMENT_GROUP = 3 # hsa_region_global_flag_t # The application can use memory in the region to store kernel arguments, and # provide the values for the kernarg segment of a kernel dispatch. If this # flag is set, then ::HSA_REGION_GLOBAL_FLAG_FINE_GRAINED must be set. HSA_REGION_GLOBAL_FLAG_KERNARG = 1 # Updates to memory in this region are immediately visible to all the # agents under the terms of the HSA memory model. If this # flag is set, then ::HSA_REGION_GLOBAL_FLAG_COARSE_GRAINED must not be set. HSA_REGION_GLOBAL_FLAG_FINE_GRAINED = 2 # Updates to memory in this region can be performed by a single agent at # a time. If a different agent in the system is allowed to access the # region, the application must explicitely invoke ::hsa_memory_assign_agent # in order to transfer ownership to that agent for a particular buffer. HSA_REGION_GLOBAL_FLAG_COARSE_GRAINED = 4 # hsa_region_info_t # Segment where memory in the region can be used. The type of this # attribute is ::hsa_region_segment_t. HSA_REGION_INFO_SEGMENT = 0 # Flag mask. The value of this attribute is undefined if the value of # ::HSA_REGION_INFO_SEGMENT is not ::HSA_REGION_SEGMENT_GLOBAL. The type of # this attribute is uint32_t, a bit-field of ::hsa_region_global_flag_t # values. HSA_REGION_INFO_GLOBAL_FLAGS = 1 # Size of this region, in bytes. The type of this attribute is size_t. HSA_REGION_INFO_SIZE = 2 # Maximum allocation size in this region, in bytes. Must not exceed the value # of ::HSA_REGION_INFO_SIZE. The type of this attribute is size_t. # # If the region is in the global or readonly segments, this is the maximum # size that the application can pass to ::hsa_memory_allocate. If the region # is in the group segment, this is the maximum size (per work-group) that can # be requested for a given kernel dispatch. If the region is in the private # segment, this is the maximum size (per work-item) that can be request for a # specific kernel dispatch. HSA_REGION_INFO_ALLOC_MAX_SIZE = 4 # Indicates whether memory in this region can be allocated using # ::hsa_memory_allocate. The type of this attribute is bool. # # The value of this flag is always false for regions in the group and private # segments. HSA_REGION_INFO_RUNTIME_ALLOC_ALLOWED = 5 # Allocation granularity of buffers allocated by ::hsa_memory_allocate in # this region. The size of a buffer allocated in this region is a multiple of # the value of this attribute. The value of this attribute is only defined if # ::HSA_REGION_INFO_RUNTIME_ALLOC_ALLOWED is true for this region. The type # of this attribute is size_t. HSA_REGION_INFO_RUNTIME_ALLOC_GRANULE = 6 # Alignment of buffers allocated by ::hsa_memory_allocate in this region. The # value of this attribute is only defined if # ::HSA_REGION_INFO_RUNTIME_ALLOC_ALLOWED is true for this region, and must # be a power of 2. The type of this attribute is size_t. HSA_REGION_INFO_RUNTIME_ALLOC_ALIGNMENT = 7 # hsa_profile_t HSA_PROFILE_BASE = 0 HSA_PROFILE_FULL = 1 # hsa_machine_model_t HSA_MACHINE_MODEL_SMALL = 0 HSA_MACHINE_MODEL_LARGE = 1 # hsa_executable_symbol_info_t # The kind of the symbol. The type of this attribute is ::hsa_symbol_kind_t. HSA_EXECUTABLE_SYMBOL_INFO_TYPE = 0 # The length of the symbol name. The type of this attribute is uint32_t. HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH = 1 # The name of the symbol. The type of this attribute is character array with # the length equal to the value of ::HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH # attribute HSA_EXECUTABLE_SYMBOL_INFO_NAME = 2 # The length of the module name to which this symbol belongs if this symbol # has module linkage, otherwise 0 is returned. The type of this attribute is # uint32_t. HSA_EXECUTABLE_SYMBOL_INFO_MODULE_NAME_LENGTH = 3 # The module name to which this symbol belongs if this symbol has module # linkage, otherwise empty string is returned. The type of this attribute is # character array with the length equal to the value of # ::HSA_EXECUTABLE_SYMBOL_INFO_MODULE_NAME_LENGTH attribute. HSA_EXECUTABLE_SYMBOL_INFO_MODULE_NAME = 4 # Agent associated with this symbol. If the symbol is a variable, the # value of this attribute is only defined if # ::HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_ALLOCATION is # ::HSA_VARIABLE_ALLOCATION_AGENT. The type of this attribute is hsa_agent_t. HSA_EXECUTABLE_SYMBOL_INFO_AGENT = 20 # The address of the variable. The value of this attribute is undefined if # the symbol is not a variable. The type of this attribute is uint64_t. # If executable's state is ::HSA_EXECUTABLE_STATE_UNFROZEN, then 0 is # returned. HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_ADDRESS = 21 # The linkage kind of the symbol. The type of this attribute is # ::hsa_symbol_linkage_t. HSA_EXECUTABLE_SYMBOL_INFO_LINKAGE = 5 # Indicates whether the symbol corresponds to a definition. The type of this # attribute is bool. HSA_EXECUTABLE_SYMBOL_INFO_IS_DEFINITION = 17 # The allocation kind of the variable. The value of this attribute is # undefined if the symbol is not a variable. The type of this attribute is # ::hsa_variable_allocation_t. HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_ALLOCATION = 6 # The segment kind of the variable. The value of this attribute is undefined # if the symbol is not a variable. The type of this attribute is # ::hsa_variable_segment_t. HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_SEGMENT = 7 # Alignment of the variable. The value of this attribute is undefined if # the symbol is not a variable. The type of this attribute is uint32_t. HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_ALIGNMENT = 8 # Size of the variable. The value of this attribute is undefined if # the symbol is not a variable. The type of this attribute is uint32_t. # # A value of 0 is returned if the variable is an external variable and has an # unknown dimension. HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_SIZE = 9 # Indicates whether the variable is constant. The value of this attribute is # undefined if the symbol is not a variable. The type of this attribute is # bool. HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_IS_CONST = 10 # Kernel object handle, used in the kernel dispatch packet. The value of this # attribute is undefined if the symbol is not a kernel. The type of this # attribute is uint64_t. # # If the state of the executable is ::HSA_EXECUTABLE_STATE_UNFROZEN, then 0 # is returned. HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_OBJECT = 22 # Size of kernarg segment memory that is required to hold the values of the # kernel arguments, in bytes. The value of this attribute is undefined if the # symbol is not a kernel. The type of this attribute is uint32_t. HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_KERNARG_SEGMENT_SIZE = 11 # Alignment (in bytes) of the buffer used to pass arguments to the kernel, # which is the maximum of 16 and the maximum alignment of any of the kernel # arguments. The value of this attribute is undefined if the symbol is not a # kernel. The type of this attribute is uint32_t. HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_KERNARG_SEGMENT_ALIGNMENT = 12 # Size of static group segment memory required by the kernel (per # work-group), in bytes. The value of this attribute is undefined # if the symbol is not a kernel. The type of this attribute is uint32_t. # # The reported amount does not include any dynamically allocated group # segment memory that may be requested by the application when a kernel is # dispatched. HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_GROUP_SEGMENT_SIZE = 13 # Size of static private, spill, and arg segment memory required by # this kernel (per work-item), in bytes. The value of this attribute is # undefined if the symbol is not a kernel. The type of this attribute is # uint32_t. # # If the value of ::HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_DYNAMIC_CALLSTACK is # true, the kernel may use more private memory than the reported value, and # the application must add the dynamic call stack usage to @a # private_segment_size when populating a kernel dispatch packet. HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_PRIVATE_SEGMENT_SIZE = 14 # Dynamic callstack flag. The value of this attribute is undefined if the # symbol is not a kernel. The type of this attribute is bool. # # If this flag is set (the value is true), the kernel uses a dynamically # sized call stack. This can happen if recursive calls, calls to indirect # functions, or the HSAIL alloca instruction are present in the kernel. HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_DYNAMIC_CALLSTACK = 15 # Indirect function object handle. The value of this attribute is undefined # if the symbol is not an indirect function, or the associated agent does # not support the Full Profile. The type of this attribute depends on the # machine model: if machine model is small, then the type is uint32_t, if # machine model is large, then the type is uint64_t. # # If the state of the executable is ::HSA_EXECUTABLE_STATE_UNFROZEN, then 0 # is returned. HSA_EXECUTABLE_SYMBOL_INFO_INDIRECT_FUNCTION_OBJECT = 23 # Call convention of the indirect function. The value of this attribute is # undefined if the symbol is not an indirect function, or the associated # agent does not support the Full Profile. The type of this attribute is # uint32_t. HSA_EXECUTABLE_SYMBOL_INFO_INDIRECT_FUNCTION_CALL_CONVENTION = 16 # hsa_default_float_rounding_mode_t # Use a default floating-point rounding mode specified elsewhere. HSA_DEFAULT_FLOAT_ROUNDING_MODE_DEFAULT = 0 # Operations that specify the default floating-point mode are rounded to zero # by default. HSA_DEFAULT_FLOAT_ROUNDING_MODE_ZERO = 1 # Operations that specify the default floating-point mode are rounded to the # nearest representable number and that ties should be broken by selecting # the value with an even least significant bit. HSA_DEFAULT_FLOAT_ROUNDING_MODE_NEAR = 2 # hsa_code_object_type_t HSA_CODE_OBJECT_TYPE_PROGRAM = 0 # hsa_executable_state_t # Executable state, which allows the user to load code objects and define # external variables. Variable addresses, kernel code handles, and # indirect function code handles are not available in query operations until # the executable is frozen (zero always returned). HSA_EXECUTABLE_STATE_UNFROZEN = 0 # Executable state, which allows the user to query variable addresses, # kernel code handles, and indirect function code handles using query # operation. Loading new code objects, as well as defining external variables # is not allowed in this state. HSA_EXECUTABLE_STATE_FROZEN = 1 # hsa_kernel_dispatch_packet_setup_t HSA_KERNEL_DISPATCH_PACKET_SETUP_DIMENSIONS = 0 # hsa_packet_header_t HSA_PACKET_HEADER_TYPE = 0 HSA_PACKET_HEADER_BARRIER = 8 HSA_PACKET_HEADER_ACQUIRE_FENCE_SCOPE = 9 HSA_PACKET_HEADER_RELEASE_FENCE_SCOPE = 11
jriehl/numba
numba/roc/hsadrv/enums.py
Python
bsd-2-clause
22,011
## For Python v3.5.2 ## ## N=3: 40 configurations ## N=4: 454 configurations ## N=5: 13,098 configurations (0.56 seconds) ## N=6: 1,214,975 configurations (89.5 seconds) ## N=7: 110,483,315 configurations (3.60 hours) ## N=8: ## from timeit import default_timer as timer BigN=8 tile_count=int(BigN*(BigN+1)/2) board_size=int(BigN*(BigN+1)/2) attempts=0 solutions=0 start=0 tile_list=list() board=list() color_flag=False CSI_BEG="\x1B[" CSI_END="m" CSI_OFF=CSI_BEG+"0m" CSI_black=CSI_BEG+"0;30;40"+CSI_END CSI_red=CSI_BEG+"0;30;41"+CSI_END CSI_green=CSI_BEG+"0;30;42"+CSI_END CSI_blue=CSI_BEG+"0;30;44"+CSI_END CSI_yellow=CSI_BEG+"0;30;43"+CSI_END CSI_purple=CSI_BEG+"0;30;45"+CSI_END CSI_teal=CSI_BEG+"0;30;46"+CSI_END CSI_grey=CSI_BEG+"0;30;47"+CSI_END CSI_COLORS=[CSI_black, CSI_red, CSI_green, CSI_blue, CSI_yellow, CSI_purple, CSI_teal, CSI_grey] def build_tile_list(): for outer in range(BigN): for inner in range(outer+1): tile=list() tile.append(outer+1) tile.append(False) tile_list.append(tile) def unused_tiles(): for index in range(tile_count): if tile_list[index][1]==False: return True return False def init_board(): for row in range(board_size): for col in range(board_size): position=list() position.append(row+1) position.append(col+1) position.append(0) position.append(False) board.append(position) def show_board(): for row in range(board_size-1, -1, -1): for col in range(board_size): position=board[(row*board_size)+col] if position[2]==0: print(".", end=" ") else: if color_flag==True: print(CSI_COLORS[position[2]-1]+str(position[2])+" "+CSI_OFF, end="") else: print(position[2], end=" ") print() print() def add_drop_tile(tile_size, pos_row, pos_col, add_flag): for row in range(pos_row, pos_row+tile_size): for col in range(pos_col, pos_col+tile_size): board[(row*board_size)+col][3]=add_flag if add_flag==True: board[(row*board_size)+col][2]=tile_size else: board[(row*board_size)+col][2]=0 def can_fit(tile_size, pos_row, pos_col): if pos_row+tile_size>board_size or pos_col+tile_size>board_size: return False for row in range(pos_row, pos_row+tile_size): for col in range(pos_col, pos_col+tile_size): if board[(row*board_size)+col][3]==True: return False return True def go_deep(): global attempts attempts+=1 global solutions for row in range(board_size): for col in range(board_size): if board[(row*board_size)+col][3]==False: last_tile_used=0 for index in range(tile_count): tile=tile_list[index] if tile[1]==False and tile[0]!=last_tile_used: # unused tile and a size we haven't already tried last_tile_used=tile[0] if can_fit(tile[0], row, col)==True: tile_list[index][1]=True add_drop_tile(tile[0], row, col, True) if unused_tiles()==True: go_deep() else: solutions+=1 print("Solution #{:,} is Configuration #{:,} found in {:,.2f} minutes".format(solutions, attempts, (timer()-start)/60)) show_board() # don't return keep; looking for more solutions add_drop_tile(tile[0], row, col, False) tile_list[index][1]=False return # bail if we've tried all tiles def part_num(): global start start=timer() build_tile_list() init_board() go_deep() print("Tried {:,} configurations in {:,.2f} hours".format(attempts, (timer()-start)/(60*60)))
Munklar/Partridge-Number
PartNum-Py3.5.2.py
Python
mit
4,257
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class Error(Model): """Error. :param code: :type code: str :param message: :type message: str :param target: :type target: str :param details: :type details: list[~azure.mgmt.network.v2016_09_01.models.ErrorDetails] :param inner_error: :type inner_error: str """ _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, 'target': {'key': 'target', 'type': 'str'}, 'details': {'key': 'details', 'type': '[ErrorDetails]'}, 'inner_error': {'key': 'innerError', 'type': 'str'}, } def __init__(self, *, code: str=None, message: str=None, target: str=None, details=None, inner_error: str=None, **kwargs) -> None: super(Error, self).__init__(**kwargs) self.code = code self.message = message self.target = target self.details = details self.inner_error = inner_error
lmazuel/azure-sdk-for-python
azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/error_py3.py
Python
mit
1,474
import json from respite.serializers.jsonserializer import JSONSerializer class JSONPSerializer(JSONSerializer): def serialize(self, request): data = super(JSONPSerializer, self).serialize(request) if 'callback' in request.GET: callback = request.GET['callback'] else: callback = 'callback' return '%s(%s)' % (callback, data)
jgorset/django-respite
respite/serializers/jsonpserializer.py
Python
mit
391
import sqlite3 class database: def __init__(self, filename): self.db = sqlite3.connect(filename) self.cur = self.db.cursor() def __del__(self): self.close() def commit(self): self.db.commit() def close(self): if hasattr(self,"db"): self.db.close() def create_table(self,schame,tablename): with open(schame) as sql: schema = sql.read().format(table=tablename) self.cur.executescript(schema) self.commit() def initTable(self,schame,tablename,iplist): self.create_table(schame,tablename) sql = "INSERT INTO `%s` (ip) VALUES (?)" %tablename ips = [ (i,) for i in iplist] self.cur.executemany(sql,ips) self.commit() ################### don't use it def update(db,tablename): with open(config['INFO'],'r') as f: rows = [] for l in f.readlines(): ip,mac,comment,name = l.decode('utf-8')[:-1].split('|') rows.append((mac,comment,name,ip)) sql = "UPDATE `%s` SET mac=?,comment=?,name=? WHERE ip=?" %tablename db.cur.executemany(sql,rows) db.commit()
xavierskip/LANinfo
db.py
Python
mit
1,171
import warnings from django import template from django.utils.safestring import mark_safe from wagtail.wagtailembeds.embeds import get_embed register = template.Library() @register.filter def embed(url, max_width=None): embed = get_embed(url, max_width=max_width) try: if embed is not None: return mark_safe(embed.html) else: return '' except: return ''
jorge-marques/wagtail
wagtail/wagtailembeds/templatetags/wagtailembeds_tags.py
Python
bsd-3-clause
420
#!/usr/bin/env python # -*- coding: utf8 -*- from itertools import product, chain from ..misc.misc import indent, compose import copy import inspect class Function(object): r""" Define el arreglo n dimensional que se usan para tener operaciones y relaciones n-arias. Necesariamente toma numeros desde 0 Tambien puede tomar una funcion directamente >>> sum_mod3=Function({(0,0):0, (0,1):1, (0,2):2, (1,0):1, (1,1):2, (1,2):0, (2,0):2, (2,1):0, (2,2):1,}) >>> sum_mod3mas3=Function({(0,0):3, (0,1):4, (0,2):5, (1,0):4, (1,1):5, (1,2):3, (2,0):5, (2,1):3, (2,2):4,}) >>> sum_mod3.table() [[0, 0, 0], [0, 1, 1], [0, 2, 2], [1, 0, 1], [1, 1, 2], [1, 2, 0], [2, 0, 2], [2, 1, 0], [2, 2, 1]] >>> sum_mod3 Function( [0, 0] -> 0, [0, 1] -> 1, [0, 2] -> 2, [1, 0] -> 1, [1, 1] -> 2, [1, 2] -> 0, [2, 0] -> 2, [2, 1] -> 0, [2, 2] -> 1, ) >>> sorted(list(sum_mod3.domain())) [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)] >>> sum_mod3.arity() 2 >>> sum_mod3 == sum_mod3mas3 False >>> sum_mod3.map_in_place(lambda x: x+3) >>> sum_mod3 == sum_mod3mas3 True >>> sum_mod3(1,2) 3 >>> sum_mod3(2,2) 4 >>> sum_mod3.table() [[0, 0, 3], [0, 1, 4], [0, 2, 5], [1, 0, 4], [1, 1, 5], [1, 2, 3], [2, 0, 5], [2, 1, 3], [2, 2, 4]] >>> sum_mod3=Function(lambda x,y:(x+y)%3,d_universe=[0,1,2]) >>> sum_mod3.table() [[0, 0, 0], [0, 1, 1], [0, 2, 2], [1, 0, 1], [1, 1, 2], [1, 2, 0], [2, 0, 2], [2, 1, 0], [2, 2, 1]] >>> sum_mod3 Function( [0, 0] -> 0, [0, 1] -> 1, [0, 2] -> 2, [1, 0] -> 1, [1, 1] -> 2, [1, 2] -> 0, [2, 0] -> 2, [2, 1] -> 0, [2, 2] -> 1, ) >>> sorted(list(sum_mod3.domain())) [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)] >>> sum_mod3.arity() 2 >>> sum_mod3 == sum_mod3mas3 False >>> sum_mod3.map_in_place(lambda x: x+3) >>> sum_mod3 == sum_mod3mas3 True >>> sum_mod3(1,2) 3 >>> sum_mod3(2,2) 4 >>> sum_mod3.table() [[0, 0, 3], [0, 1, 4], [0, 2, 5], [1, 0, 4], [1, 1, 5], [1, 2, 3], [2, 0, 5], [2, 1, 3], [2, 2, 4]] """ def __init__(self, d, arity=None, d_universe=None): # assert issubclass(type(l),list) self.func = None self.dict = None self.d_universe = d_universe if callable(d): assert d_universe, d_universe self.func = d elif isinstance(d, list): self.dict = self.__list_to_dict(d) else: self.dict = d if self.dict: assert all(isinstance(t, tuple) for t in list(self.dict.keys())) if arity: self.arityval = arity else: if self.func: # la aridad es la aridad de func self.arityval = len(inspect.getargspec(self.func).args) else: try: self.arityval = len(list(self.dict.keys())[0]) except IndexError: raise ValueError("Arity is not defined") if not all(len(k) == self.arityval for k in self.dict.keys()): raise ValueError("Inconsistent arity") self.relation = False # maneja si la funcion es booleana if not self.d_universe: self.d_universe = list(set(chain(*list(self.domain())))) def copy(self): """ Devuelve una copia de si mismo """ result = copy.copy(self) if self.func: result.d_universe= list(result.d_universe) else: result.dict = self.dict.copy() return result def domain(self): """ Un generador del dominio """ if self.relation or self.func: return product(self.d_universe, repeat=self.arity()) else: return iter(self.dict.keys()) def image(self): """ Un generador de la imagen """ if self.func: return iter(set(self.func(*t) for t in self.domain())) else: return iter(set(self.dict.values())) def arity(self): """ Devuelve la aridad de la funcion, revisando la 'primer' tupla del diccionario. """ return self.arityval def map_in_place(self, f): """ Funciona como un map, pero respeta la estructura de la matriz. """ if self.func: self.func = compose(f,self.func) else: self.dict = self.dict.copy() for key in self.dict: self.dict[key] = f(self.dict[key]) def restrict(self, subuniverse): """ Restringe la funcion a un subconjunto. """ result = self.copy() if result.func: result.d_universe = subuniverse else: for t in self.dict: if any(e not in subuniverse for e in t): del result.dict[t] return result def vector_call(self, vector): """ Aplica la funcion a un vector de elementos del dominio. """ return type(vector)(list(map(self, vector))) def __call__(self, *args): if not len(args) == self.arity(): raise ValueError( "Arity is %s, not %s. Do you need use vector_call?" % (self.arity(), len(args))) try: if self.func: if all(x in self.d_universe for x in args): result = self.func(*args) else: raise KeyError else: result = self.dict[args] except KeyError: if self.relation and all(x in self.d_universe for x in args): return False raise ValueError("Value '%s' not in domain of '%s'" % (str(args), repr(self))) if self.relation: return bool(result) else: return result def __lasfen__(self): """ Devuelve la cardinalidad del conjunto de partida. """ return len(self.array) def __eq__(self, other): """ Dos funciones son iguales si tienen el mismo dominio y el mismo comportamiento. """ if self.func: return frozenset(map(tuple,self.table())) == frozenset(map(tuple,other.table())) else: # basta con revisar el arreglo, ya que contiene el dominio y el # comportamiento return self.dict == other.dict def __ne__(self, other): return not self.__eq__(other) def __hash__(self): """ Hash de las funciones para manejar funciones en conjuntos. No es muy rapida. >>> f=Function({(0,0):0, (0,1):1, (0,2):2, (1,0):1, (1,1):2, (1,2):0, (2,0):2, (2,1):0, (2,2):1,}) >>> g=Function({(2,0):2, (0,1):1, (0,2):2, (1,0):1, (0,0):0, (1,1):2, (1,2):0, (2,1):0, (2,2):1,}) >>> h=Function({(2,0):1, (0,1):1, (0,2):2, (1,0):1, (0,0):0, (1,1):2, (1,2):0, (2,1):0, (2,2):1,}) >>> hash(f)==hash(g) True >>> hash(f)==hash(h) False """ if self.func: return hash(frozenset([self.d_universe,self.func])) else: return hash(frozenset(self.dict.items())) def table(self): """ Devuelve una lista de listas con la tabla que representa a la relacion/operacion """ if self.func: result = sorted((t,self.func(*t)) for t in self.domain()) else: result = sorted(self.dict.items()) if self.relation: result = [k_v for k_v in result if k_v[1]] result = [list(k_v1[0]) for k_v1 in result] else: result = [list(k_v2[0]) + [k_v2[1]] for k_v2 in result] return result def __list_to_dict(self, l): """ Convierte una matriz del modo anterior de generar funcionciones en un diccionario """ from itertools import product import numpy as np l = np.array(l, dtype=np.dtype(object)) arity = l.ndim result = {} for t in product(list(range(len(l))), repeat=arity): if l.item(*t) is not None: result[t] = l.item(*t) return result def __repr__(self): if self.relation: result = "Relation(\n" table = ["%s," % x for x in self.table()] else: if self.arity(): result = "Function(\n" table = ["%s -> %s," % (x[:-1], x[-1]) for x in self.table()] else: result = "Constant(\n" table = str(self.table()[0][0]) table = indent("\n".join(table)) return result + table + ")" def __iter__(self): """ Vuelve a las funciones iterables a partir de su grafico o a las relaciones directamente desde su conjunto de tuplas. """ return iter(self.table()) if __name__ == "__main__": import doctest doctest.testmod()
pablogventura/sagepkg
definability/functions/functions.py
Python
gpl-3.0
9,180
import time import io from collections import ChainMap from jinja2 import Undefined, FileSystemLoader, environment from jinja2.utils import missing, object_type_repr from jinja2.exceptions import TemplateNotFound, TemplateSyntaxError import os import sqlite3 database_cursor = None class Environment(environment.Environment): def _generate(self, source, name, filename, defer_init=False): # With a normal template with FileSystemLoader, the name is the name of the file. If it is BytesIO, # it's from the test case body, and will be named as such. # TODO: get the name from varmap.mapped.id somehow? if type(name) == io.BytesIO: name = 'Embedded Test Case Template' return super(Environment, self)._generate(source, name, filename, defer_init=defer_init) class KeepUndefined(Undefined): # Jinja renders undefined variables to empty string by default. This overrides that behavior to have it # return the original {{ variable }} so it can be replaced by ninja, as with assigned variables through the # test run. def __str__(self): if self._undefined_hint is None: if self._undefined_obj is missing: return u'{{ %s }}' % self._undefined_name # here -- what to do here.. nested dictionary with a value that isn't originally in the varmap/testData. # TODO: be cleverer... return u'{{ %s[%r] }}' % (object_type_repr(self._undefined_obj), self._undefined_name) return u'{{ undefined value printed: %s }}' % self._undefined_hint def render_string_with_jinja(string_value, jinja_env, jinja_map): print(type(string_value), string_value) if isinstance(string_value, str): bytes_template = io.BytesIO() bytes_template.write(string_value.encode('utf-8')) bytes_template.seek(0) else: print('bytes templ', string_value) bytes_template = string_value template = jinja_env.get_template(bytes_template) rendered_string = template.render(jinja_map) print('rendered string is: {}'.format(rendered_string)) return rendered_string
shankj3/flask_version
render_with_jinja/test.py
Python
mit
2,183
import json import stripe from datetime import datetime, timedelta from django.conf import settings from django.core import mail from django.test import Client, TestCase from django.utils.timezone import now from cl.donate.management.commands.cl_send_donation_reminders import Command from cl.donate.models import Donation # From: https://stripe.com/docs/testing#cards stripe_test_numbers = { 'good': { 'visa': '4242424242424242', }, 'bad': { 'cvc_fail': '4000000000000127', } } class EmailCommandTest(TestCase): fixtures = ['donate_test_data.json'] def test_sending_an_email(self): """Do we send emails correctly?""" # Set this value since the JSON will get stale and can't have dynamic # dates. Note that we need to get hours involved because this way we can # be sure that our donation happens in the middle of the period of time # when the alert script will check for donations. about_a_year_ago = now() - timedelta(days=354, hours=12) Donation.objects.filter(pk=1).update(date_created=about_a_year_ago) comm = Command() comm.handle() self.assertEqual(len(mail.outbox), 1) self.assertIn('you donated $1', mail.outbox[0].body) self.assertIn('you donated $1', mail.outbox[0].alternatives[0][0]) class DonationFormSubmissionTest(TestCase): def setUp(self): self.client = Client() self.params = { 'address1': "123 Sesame St.", 'city': 'New York', 'state': 'NY', 'zip_code': '12345', 'wants_newsletter': True, 'first_name': 'Elmo', 'last_name': 'Muppet', 'email': 'pandora@courtlistener.com', 'send_annual_reminder': True, 'payment_provider': 'paypal', } def test_paypal_with_other_value_as_anonymous(self): """Can a paypal donation go through using the "Other" field?""" self.params.update({ 'amount': 'other', 'amount_other': '1', }) r = self.client.post( '/donate/', self.params, follow=True, ) self.assertEqual(r.redirect_chain[0][1], 302) def test_paypal_with_regular_value_as_anonymous(self): """Can a stripe donation go through using the "Other" field?""" self.params.update({ 'amount': '10', }) r = self.client.post( '/donate/', self.params, follow=True, ) self.assertEqual(r.redirect_chain[0][1], 302) class StripeTest(TestCase): def setUp(self): self.client = Client() def make_a_donation(self, cc_number, amount, amount_other=''): stripe.api_key = settings.STRIPE_SECRET_KEY # Create a stripe token (this would normally be done via javascript in # the front end when the submit button was pressed) token = stripe.Token.create( card={ 'number': cc_number, 'exp_month': '6', 'exp_year': str(datetime.today().year + 1), 'cvc': '123', } ) # Place a donation as an anonymous (not logged in) person using the # token we just got r = self.client.post('/donate/', data={ 'amount': amount, 'amount_other': amount_other, 'payment_provider': 'cc', 'first_name': 'Barack', 'last_name': 'Obama', 'address1': '1600 Pennsylvania Ave.', 'address2': 'The Whitehouse', 'city': 'DC', 'state': 'DC', 'zip_code': '20500', 'email': 'barack@freelawproject.org', 'referrer': 'tests.py', 'stripeToken': token.id, }) return token, r def get_stripe_event(self, fingerprint): """ Get the stripe event so we can post it to the webhook """ # We don't know the event ID, so we have to get the latest ones, then # filter... events = stripe.Event.all() event = None for obj in events.data: if obj.data.object.card.fingerprint == fingerprint: event = obj break return event def assertEventPostsCorrectly(self, token): event = self.get_stripe_event(token.card.fingerprint) self.assertIsNotNone( event, msg="Unable to find correct event for token: %s" % token.card.fingerprint ) r = self.client.post('/donate/callbacks/stripe/', data=json.dumps(event), content_type='application/json') # Does it return properly? self.assertEqual(r.status_code, 200) def test_making_a_donation_and_getting_the_callback(self): """These two tests must live together because they need to be done sequentially. First, we place a donation using the client. Then we send a mock callback to our webhook, to make sure it accepts it properly. """ token, r = self.make_a_donation( stripe_test_numbers['good']['visa'], amount='25', ) self.assertEqual(r.status_code, 302) # 302 (redirect after a post) self.assertEventPostsCorrectly(token) def test_making_a_donation_with_a_bad_card(self): """Do we do the right thing when bad credentials are provided?""" stripe.api_key = settings.STRIPE_SECRET_KEY # Create a stripe token (this would normally be done via javascript in # the front end when the submit button was pressed) token, r = self.make_a_donation( stripe_test_numbers['bad']['cvc_fail'], amount='25', ) self.assertIn("Your card's security code is incorrect.", r.content) self.assertEventPostsCorrectly(token) def test_making_a_donation_with_a_decimal_value(self): """Do things work when people choose to donate with a decimal instead of an int? """ stripe.api_key = settings.STRIPE_SECRET_KEY token, r = self.make_a_donation( stripe_test_numbers['good']['visa'], amount='other', amount_other='10.00', ) self.assertEqual(r.status_code, 302) # 302 (redirect after a post) self.assertEventPostsCorrectly(token)
brianwc/courtlistener
cl/donate/tests.py
Python
agpl-3.0
6,484
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Date : 2013-06-25 21:56:55 # @Author : Xero # @Link : https://github.com/Johnzero # @Version : $Id$ import socket,threading,struct,sys,base64,hashlib from time import sleep # If flash Socket The policy that is sent to the clients. POLICY = """<cross-domain-policy><allow-access-from domain="*" to-ports="*" /></cross-domain-policy>\0""" # The string the client has to send in order to receive the policy. POLICYREQUEST = "<policy-file-request/>" clientpool = [] IP = "192.168.1.13" #启动websocket server class InitWebSocketServer(object) : def __init__(self): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #tcp 链接 try: sock.bind((IP,8080)) #绑定本地地址 sock.listen(10) except: print("Server is already running,quit") sys.exit() while 1: #创建一个死循环,接受客户端 connection,address = sock.accept() print "Connection from : ",address if(self.handshake(connection) != False): #如果握手失败,不启动任务 t = threading.Thread(target=self.DoRemoteCommand,args=(connection,)) t.start() #连接成功后回应给客户端进行握手 def handshake(self,client): headers = {} shake = client.recv(1024) if not len(shake): return False if shake.startswith(POLICYREQUEST): client.send(POLICY) return True header, data = shake.split('\r\n\r\n', 1) for line in header.split("\r\n")[1:]: key, value = line.split(": ", 1) headers[key] = value if(headers.has_key("Sec-WebSocket-Key") == False): print("this socket is not websocket,close") client.close() return False szKey = base64.b64encode(hashlib.sha1(headers["Sec-WebSocket-Key"] + '258EAFA5-E914-47DA-95CA-C5AB0DC85B11').digest()) szHost = headers["Host"] our_handshake = "HTTP/1.1 101 Switching Protocols\r\n" \ "Upgrade:websocket\r\n"\ "Connection: Upgrade\r\n"\ "Sec-WebSocket-Accept:"+ szKey + "\r\n" \ "WebSocket-Origin:" + "localhost" + "\r\n" \ "WebSocket-Location: ws://" + szHost + "/WebManagerSocket\r\n" \ "WebSocket-Protocol:WebManagerSocket\r\n\r\n" state = client.send(our_handshake) if state: clientpool.append(client) # self.SendData("Welcome to WebSocket!\nThis messsage is from server!",client) return True #接收客户端发送过来的消息,并且解包 def RecvData(self,nNum,client): try: pData = client.recv(nNum) fi = open(r"C:\Users\Administrator\Desktop\temp6.temp","wb") fi.write(pData) fi.close() if not len(pData): return False except: return False else: code_length = ord(pData[1]) & 127 if code_length == 126: masks = pData[4:8] data = pData[8:] elif code_length == 127: masks = pData[10:14] data = pData[14:] else: masks = pData[2:6] data = pData[6:] raw_str = "" i = 0 for d in data: print ord(masks[i%4]) raw_str += chr(ord(d) ^ ord(masks[i%4])) i += 1 return raw_str #这算是客户端一个循环接受数据并且处理数据的线程 def DoRemoteCommand(self,connection): while 1: szBuf = self.RecvData(65550,connection) if(szBuf == False): try : clientpool.remove(connection) for connect in clientpool: self.SendData(str(connection.getpeername())+" quit!",connect) except ValueError:pass break else: head = '\x81' if len(szBuf) < 126: head += struct.pack('B', len(szBuf)) elif len(szBuf) <= 0xFFFF: head += struct.pack('!BH', 126, len(szBuf)) else: head += struct.pack('!BQ', 127, len(szBuf)) # while 1: # for connect in clientpool: # connect.sendall(head+szBuf) # sleep(5) for connect in clientpool: connect.sendall(head+szBuf) #打包发送数据给客户端 def SendData(self,pData,client): if(pData == False): return False else: pData = str(pData) token = "\x81" length = len(pData) if length < 126: token += struct.pack("B", length) elif length <= 0xFFFF: token += struct.pack("!BH", 126, length) else: token += struct.pack("!BQ", 127, length) pData = '%s%s' % (token,pData) client.send(pData) return True if __name__ == '__main__': websocket = InitWebSocketServer()
Johnzero/titanium-websocket
flask/InitTestWebSocketServer.py
Python
apache-2.0
5,551
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from datetime import datetime, timedelta from openerp.osv import fields, osv from openerp.tools.translate import _ from openerp import SUPERUSER_ID class event_type(osv.osv): """ Event Type """ _name = 'event.type' _description = __doc__ _columns = { 'name': fields.char('Event Type', size=64, required=True), 'default_reply_to': fields.char('Default Reply-To', size=64,help="The email address of the organizer which is put in the 'Reply-To' of all emails sent automatically at event or registrations confirmation. You can also put your email address of your mail gateway if you use one." ), 'default_email_event': fields.many2one('email.template','Event Confirmation Email', help="It will select this default confirmation event mail value when you choose this event"), 'default_email_registration': fields.many2one('email.template','Registration Confirmation Email', help="It will select this default confirmation registration mail value when you choose this event"), 'default_registration_min': fields.integer('Default Minimum Registration', help="It will select this default minimum value when you choose this event"), 'default_registration_max': fields.integer('Default Maximum Registration', help="It will select this default maximum value when you choose this event"), } _defaults = { 'default_registration_min': 0, 'default_registration_max': 0, } class event_event(osv.osv): """Event""" _name = 'event.event' _description = __doc__ _order = 'date_begin' _inherit = ['mail.thread', 'ir.needaction_mixin'] def name_get(self, cr, uid, ids, context=None): if not ids: return [] if isinstance(ids, (long, int)): ids = [ids] res = [] for record in self.browse(cr, uid, ids, context=context): date = record.date_begin.split(" ")[0] date_end = record.date_end.split(" ")[0] if date != date_end: date += ' - ' + date_end display_name = record.name + ' (' + date + ')' res.append((record['id'], display_name)) return res def copy(self, cr, uid, id, default=None, context=None): """ Reset the state and the registrations while copying an event """ if not default: default = {} default.update({ 'state': 'draft', 'registration_ids': False, }) return super(event_event, self).copy(cr, uid, id, default=default, context=context) def button_draft(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'draft'}, context=context) def button_cancel(self, cr, uid, ids, context=None): registration = self.pool.get('event.registration') reg_ids = registration.search(cr, uid, [('event_id','in',ids)], context=context) for event_reg in registration.browse(cr,uid,reg_ids,context=context): if event_reg.state == 'done': raise osv.except_osv(_('Error!'),_("You have already set a registration for this event as 'Attended'. Please reset it to draft if you want to cancel this event.") ) registration.write(cr, uid, reg_ids, {'state': 'cancel'}, context=context) return self.write(cr, uid, ids, {'state': 'cancel'}, context=context) def button_done(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'done'}, context=context) def confirm_event(self, cr, uid, ids, context=None): register_pool = self.pool.get('event.registration') for event in self.browse(cr, uid, ids, context=context): if event.email_confirmation_id: #send reminder that will confirm the event for all the people that were already confirmed reg_ids = register_pool.search(cr, uid, [ ('event_id', '=', event.id), ('state', 'not in', ['draft', 'cancel'])], context=context) register_pool.mail_user_confirm(cr, uid, reg_ids) return self.write(cr, uid, ids, {'state': 'confirm'}, context=context) def button_confirm(self, cr, uid, ids, context=None): """ Confirm Event and send confirmation email to all register peoples """ return self.confirm_event(cr, uid, isinstance(ids, (int, long)) and [ids] or ids, context=context) def _get_seats(self, cr, uid, ids, fields, args, context=None): """Get reserved, available, reserved but unconfirmed and used seats. @return: Dictionary of function field values. """ res = dict([(id, {}) for id in ids]) for event in self.browse(cr, uid, ids, context=context): res[event.id]['seats_reserved'] = sum(reg.nb_register for reg in event.registration_ids if reg.state == "open") res[event.id]['seats_used'] = sum(reg.nb_register for reg in event.registration_ids if reg.state == "done") res[event.id]['seats_unconfirmed'] = sum(reg.nb_register for reg in event.registration_ids if reg.state == "draft") res[event.id]['seats_available'] = event.seats_max - \ (res[event.id]['seats_reserved'] + res[event.id]['seats_used']) \ if event.seats_max > 0 else None return res def _subscribe_fnc(self, cr, uid, ids, fields, args, context=None): """This functional fields compute if the current user (uid) is already subscribed or not to the event passed in parameter (ids) """ register_pool = self.pool.get('event.registration') res = {} for event in self.browse(cr, uid, ids, context=context): res[event.id] = False curr_reg_id = register_pool.search(cr, uid, [('user_id', '=', uid), ('event_id', '=' ,event.id)]) if curr_reg_id: for reg in register_pool.browse(cr, uid, curr_reg_id, context=context): if reg.state in ('open','done'): res[event.id]= True continue return res _columns = { 'name': fields.char('Event Name', size=64, required=True, translate=True, readonly=False, states={'done': [('readonly', True)]}), 'user_id': fields.many2one('res.users', 'Responsible User', readonly=False, states={'done': [('readonly', True)]}), 'type': fields.many2one('event.type', 'Type of Event', readonly=False, states={'done': [('readonly', True)]}), 'seats_max': fields.integer('Maximum Avalaible Seats', oldname='register_max', help="You can for each event define a maximum registration level. If you have too much registrations you are not able to confirm your event. (put 0 to ignore this rule )", readonly=True, states={'draft': [('readonly', False)]}), 'seats_min': fields.integer('Minimum Reserved Seats', oldname='register_min', help="You can for each event define a minimum registration level. If you do not enough registrations you are not able to confirm your event. (put 0 to ignore this rule )", readonly=True, states={'draft': [('readonly', False)]}), 'seats_reserved': fields.function(_get_seats, oldname='register_current', string='Reserved Seats', type='integer', multi='seats_reserved'), 'seats_available': fields.function(_get_seats, oldname='register_avail', string='Available Seats', type='integer', multi='seats_reserved'), 'seats_unconfirmed': fields.function(_get_seats, oldname='register_prospect', string='Unconfirmed Seat Reservations', type='integer', multi='seats_reserved'), 'seats_used': fields.function(_get_seats, oldname='register_attended', string='Number of Participations', type='integer', multi='seats_reserved'), 'registration_ids': fields.one2many('event.registration', 'event_id', 'Registrations', readonly=False, states={'done': [('readonly', True)]}), 'date_begin': fields.datetime('Start Date', required=True, readonly=True, states={'draft': [('readonly', False)]}), 'date_end': fields.datetime('End Date', required=True, readonly=True, states={'draft': [('readonly', False)]}), 'state': fields.selection([ ('draft', 'Unconfirmed'), ('cancel', 'Cancelled'), ('confirm', 'Confirmed'), ('done', 'Done')], 'Status', readonly=True, required=True, help='If event is created, the status is \'Draft\'.If event is confirmed for the particular dates the status is set to \'Confirmed\'. If the event is over, the status is set to \'Done\'.If event is cancelled the status is set to \'Cancelled\'.'), 'email_registration_id' : fields.many2one('email.template','Registration Confirmation Email', help='This field contains the template of the mail that will be automatically sent each time a registration for this event is confirmed.'), 'email_confirmation_id' : fields.many2one('email.template','Event Confirmation Email', help="If you set an email template, each participant will receive this email announcing the confirmation of the event."), 'reply_to': fields.char('Reply-To Email', size=64, readonly=False, states={'done': [('readonly', True)]}, help="The email address of the organizer is likely to be put here, with the effect to be in the 'Reply-To' of the mails sent automatically at event or registrations confirmation. You can also put the email address of your mail gateway if you use one."), 'address_id': fields.many2one('res.partner','Location', readonly=False, states={'done': [('readonly', True)]}), 'country_id': fields.related('address_id', 'country_id', type='many2one', relation='res.country', string='Country', readonly=False, states={'done': [('readonly', True)]}, store=True), 'description': fields.html( 'Description', readonly=False, states={'done': [('readonly', True)]}, oldname='note'), 'company_id': fields.many2one('res.company', 'Company', required=False, change_default=True, readonly=False, states={'done': [('readonly', True)]}), 'is_subscribed' : fields.function(_subscribe_fnc, type="boolean", string='Subscribed'), 'organizer_id': fields.many2one('res.partner', "Organizer"), } _defaults = { 'state': 'draft', 'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'event.event', context=c), 'user_id': lambda obj, cr, uid, context: uid, 'organizer_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, context=c).company_id.partner_id.id, 'address_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, context=c).company_id.partner_id.id } def _check_seats_limit(self, cr, uid, ids, context=None): print "event _check_seats_limit" for event in self.browse(cr, uid, ids, context=context): if event.seats_max and event.seats_available < 0: return False return True _constraints = [ (_check_seats_limit, 'No more available seats.', ['registration_ids','seats_max']), ] def subscribe_to_event(self, cr, uid, ids, context=None): register_pool = self.pool.get('event.registration') user_pool = self.pool.get('res.users') num_of_seats = int(context.get('ticket', 1)) user = user_pool.browse(cr, uid, uid, context=context) curr_reg_ids = register_pool.search(cr, uid, [('user_id', '=', user.id), ('event_id', '=' , ids[0])]) #the subscription is done with SUPERUSER_ID because in case we share the kanban view, we want anyone to be able to subscribe if not curr_reg_ids: curr_reg_ids = [register_pool.create(cr, SUPERUSER_ID, {'event_id': ids[0] ,'email': user.email, 'name':user.name, 'user_id': user.id, 'nb_register': num_of_seats})] else: register_pool.write(cr, uid, curr_reg_ids, {'nb_register': num_of_seats}, context=context) return register_pool.confirm_registration(cr, SUPERUSER_ID, curr_reg_ids, context=context) def unsubscribe_to_event(self, cr, uid, ids, context=None): register_pool = self.pool.get('event.registration') #the unsubscription is done with SUPERUSER_ID because in case we share the kanban view, we want anyone to be able to unsubscribe curr_reg_ids = register_pool.search(cr, SUPERUSER_ID, [('user_id', '=', uid), ('event_id', '=', ids[0])]) return register_pool.button_reg_cancel(cr, SUPERUSER_ID, curr_reg_ids, context=context) def _check_closing_date(self, cr, uid, ids, context=None): for event in self.browse(cr, uid, ids, context=context): if event.date_end < event.date_begin: return False return True _constraints = [ (_check_closing_date, 'Error ! Closing Date cannot be set before Beginning Date.', ['date_end']), ] def onchange_event_type(self, cr, uid, ids, type_event, context=None): if type_event: type_info = self.pool.get('event.type').browse(cr,uid,type_event,context) dic ={ 'reply_to': type_info.default_reply_to, 'email_registration_id': type_info.default_email_registration.id, 'email_confirmation_id': type_info.default_email_event.id, 'seats_min': type_info.default_registration_min, 'seats_max': type_info.default_registration_max, } return {'value': dic} def onchange_start_date(self, cr, uid, ids, date_begin=False, date_end=False, context=None): res = {'value':{}} if date_end: return res if date_begin and isinstance(date_begin, str): date_begin = datetime.strptime(date_begin, "%Y-%m-%d %H:%M:%S") date_end = date_begin + timedelta(hours=1) res['value'] = {'date_end': date_end.strftime("%Y-%m-%d %H:%M:%S")} return res class event_registration(osv.osv): """Event Registration""" _name= 'event.registration' _description = __doc__ _inherit = ['mail.thread', 'ir.needaction_mixin'] _columns = { 'id': fields.integer('ID'), 'origin': fields.char('Source Document', size=124,readonly=True,help="Reference of the sales order which created the registration"), 'nb_register': fields.integer('Number of Participants', required=True, readonly=True, states={'draft': [('readonly', False)]}), 'event_id': fields.many2one('event.event', 'Event', required=True, readonly=True, states={'draft': [('readonly', False)]}), 'partner_id': fields.many2one('res.partner', 'Partner', states={'done': [('readonly', True)]}), 'create_date': fields.datetime('Creation Date' , readonly=True), 'date_closed': fields.datetime('Attended Date', readonly=True), 'date_open': fields.datetime('Registration Date', readonly=True), 'reply_to': fields.related('event_id','reply_to',string='Reply-to Email', type='char', size=128, readonly=True,), 'log_ids': fields.one2many('mail.message', 'res_id', 'Logs', domain=[('model','=',_name)]), 'event_end_date': fields.related('event_id','date_end', type='datetime', string="Event End Date", readonly=True), 'event_begin_date': fields.related('event_id', 'date_begin', type='datetime', string="Event Start Date", readonly=True), 'user_id': fields.many2one('res.users', 'User', states={'done': [('readonly', True)]}), 'company_id': fields.related('event_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True, states={'draft':[('readonly',False)]}), 'state': fields.selection([('draft', 'Unconfirmed'), ('cancel', 'Cancelled'), ('open', 'Confirmed'), ('done', 'Attended')], 'Status', size=16, readonly=True), 'email': fields.char('Email', size=64), 'phone': fields.char('Phone', size=64), 'name': fields.char('Name', size=128, select=True), } _defaults = { 'nb_register': 1, 'state': 'draft', } _order = 'name, create_date desc' def _check_seats_limit(self, cr, uid, ids, context=None): for registration in self.browse(cr, uid, ids, context=context): if registration.event_id.seats_max and \ registration.event_id.seats_available < (registration.state == 'draft' and registration.nb_register or 0): return False return True _constraints = [ (_check_seats_limit, 'No more available seats.', ['event_id','nb_register','state']), ] def do_draft(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'draft'}, context=context) def confirm_registration(self, cr, uid, ids, context=None): for reg in self.browse(cr, uid, ids, context=context or {}): self.pool.get('event.event').message_post(cr, uid, [reg.event_id.id], body=_('New registration confirmed: %s.') % (reg.name or '', ),subtype="event.mt_event_registration", context=context) return self.write(cr, uid, ids, {'state': 'open'}, context=context) def registration_open(self, cr, uid, ids, context=None): """ Open Registration """ res = self.confirm_registration(cr, uid, ids, context=context) self.mail_user(cr, uid, ids, context=context) return res def button_reg_close(self, cr, uid, ids, context=None): """ Close Registration """ if context is None: context = {} today = fields.datetime.now() for registration in self.browse(cr, uid, ids, context=context): if today >= registration.event_id.date_begin: values = {'state': 'done', 'date_closed': today} self.write(cr, uid, ids, values) else: raise osv.except_osv(_('Error!'), _("You must wait for the starting day of the event to do this action.")) return True def button_reg_cancel(self, cr, uid, ids, context=None, *args): return self.write(cr, uid, ids, {'state': 'cancel'}) def mail_user(self, cr, uid, ids, context=None): """ Send email to user with email_template when registration is done """ for registration in self.browse(cr, uid, ids, context=context): if registration.event_id.state == 'confirm' and registration.event_id.email_confirmation_id.id: self.mail_user_confirm(cr, uid, ids, context=context) else: template_id = registration.event_id.email_registration_id.id if template_id: mail_message = self.pool.get('email.template').send_mail(cr,uid,template_id,registration.id) return True def mail_user_confirm(self, cr, uid, ids, context=None): """ Send email to user when the event is confirmed """ for registration in self.browse(cr, uid, ids, context=context): template_id = registration.event_id.email_confirmation_id.id if template_id: mail_message = self.pool.get('email.template').send_mail(cr,uid,template_id,registration.id) return True def onchange_contact_id(self, cr, uid, ids, contact, partner, context=None): if not contact: return {} addr_obj = self.pool.get('res.partner') contact_id = addr_obj.browse(cr, uid, contact, context=context) return {'value': { 'email':contact_id.email, 'name':contact_id.name, 'phone':contact_id.phone, }} def onchange_partner_id(self, cr, uid, ids, part, context=None): res_obj = self.pool.get('res.partner') data = {} if not part: return {'value': data} addr = res_obj.address_get(cr, uid, [part]).get('default', False) if addr: d = self.onchange_contact_id(cr, uid, ids, addr, part, context) data.update(d['value']) return {'value': data} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
trabacus-softapps/openerp-8.0-cc
openerp/addons/event/event.py
Python
agpl-3.0
21,331
# -*- coding: utf-8 -*- CooMax= 10 import xc_base import geom import xc import math from model.sets import sets_mng as sUtils __author__= "Luis C. Pérez Tato (LCPT)" __copyright__= "Copyright 2014, LCPT" __license__= "GPL" __version__= "3.0" __email__= "l.pereztato@gmail.com" feProblem= xc.FEProblem() preprocessor= feProblem.getPreprocessor points= preprocessor.getMultiBlockTopology.getPoints pt1= points.newPntIDPos3d(1,geom.Pos3d(0.0,0.0,0.0)) pt2= points.newPntIDPos3d(2,geom.Pos3d(CooMax/2,CooMax/2,CooMax/2)) pt3= points.newPntIDPos3d(3,geom.Pos3d(CooMax,CooMax,CooMax)) s1= preprocessor.getSets.defSet("S1") sUtils.append_points(s1,[pt1,pt2]) s2= preprocessor.getSets.defSet("S2") sUtils.append_points(s2,[pt2,pt3]) s3= s1+s2 sz3= s3.getPoints.size s4= s1-s2 sz4= s4.getPoints.size s5= s1*s2 sz5= s5.getPoints.size #for p in pnts: # print codigo #print "sz= ", sz import os from miscUtils import LogMessages as lmsg fname= os.path.basename(__file__) if (sz3==3) and (sz4==1) and (sz5==1): print "test ",fname,": ok." else: lmsg.error(fname+' ERROR.')
lcpt/xc
verif/tests/preprocessor/sets/sets_boolean_operations_01.py
Python
gpl-3.0
1,078
"""Component to count within automations.""" from __future__ import annotations import logging import voluptuous as vol from homeassistant.const import ( ATTR_EDITABLE, CONF_ICON, CONF_ID, CONF_MAXIMUM, CONF_MINIMUM, CONF_NAME, ) from homeassistant.core import HomeAssistant, callback from homeassistant.helpers import collection import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.helpers.storage import Store from homeassistant.helpers.typing import ConfigType _LOGGER = logging.getLogger(__name__) ATTR_INITIAL = "initial" ATTR_STEP = "step" ATTR_MINIMUM = "minimum" ATTR_MAXIMUM = "maximum" VALUE = "value" CONF_INITIAL = "initial" CONF_RESTORE = "restore" CONF_STEP = "step" DEFAULT_INITIAL = 0 DEFAULT_STEP = 1 DOMAIN = "counter" ENTITY_ID_FORMAT = DOMAIN + ".{}" SERVICE_DECREMENT = "decrement" SERVICE_INCREMENT = "increment" SERVICE_RESET = "reset" SERVICE_CONFIGURE = "configure" STORAGE_KEY = DOMAIN STORAGE_VERSION = 1 CREATE_FIELDS = { vol.Optional(CONF_ICON): cv.icon, vol.Optional(CONF_INITIAL, default=DEFAULT_INITIAL): cv.positive_int, vol.Required(CONF_NAME): vol.All(cv.string, vol.Length(min=1)), vol.Optional(CONF_MAXIMUM, default=None): vol.Any(None, vol.Coerce(int)), vol.Optional(CONF_MINIMUM, default=None): vol.Any(None, vol.Coerce(int)), vol.Optional(CONF_RESTORE, default=True): cv.boolean, vol.Optional(CONF_STEP, default=DEFAULT_STEP): cv.positive_int, } UPDATE_FIELDS = { vol.Optional(CONF_ICON): cv.icon, vol.Optional(CONF_INITIAL): cv.positive_int, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_MAXIMUM): vol.Any(None, vol.Coerce(int)), vol.Optional(CONF_MINIMUM): vol.Any(None, vol.Coerce(int)), vol.Optional(CONF_RESTORE): cv.boolean, vol.Optional(CONF_STEP): cv.positive_int, } def _none_to_empty_dict(value): if value is None: return {} return value CONFIG_SCHEMA = vol.Schema( { DOMAIN: cv.schema_with_slug_keys( vol.All( _none_to_empty_dict, { vol.Optional(CONF_ICON): cv.icon, vol.Optional( CONF_INITIAL, default=DEFAULT_INITIAL ): cv.positive_int, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_MAXIMUM, default=None): vol.Any( None, vol.Coerce(int) ), vol.Optional(CONF_MINIMUM, default=None): vol.Any( None, vol.Coerce(int) ), vol.Optional(CONF_RESTORE, default=True): cv.boolean, vol.Optional(CONF_STEP, default=DEFAULT_STEP): cv.positive_int, }, ) ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool: """Set up the counters.""" component = EntityComponent(_LOGGER, DOMAIN, hass) id_manager = collection.IDManager() yaml_collection = collection.YamlCollection( logging.getLogger(f"{__name__}.yaml_collection"), id_manager ) collection.sync_entity_lifecycle( hass, DOMAIN, DOMAIN, component, yaml_collection, Counter.from_yaml ) storage_collection = CounterStorageCollection( Store(hass, STORAGE_VERSION, STORAGE_KEY), logging.getLogger(f"{__name__}.storage_collection"), id_manager, ) collection.sync_entity_lifecycle( hass, DOMAIN, DOMAIN, component, storage_collection, Counter ) await yaml_collection.async_load( [{CONF_ID: id_, **(conf or {})} for id_, conf in config.get(DOMAIN, {}).items()] ) await storage_collection.async_load() collection.StorageCollectionWebsocket( storage_collection, DOMAIN, DOMAIN, CREATE_FIELDS, UPDATE_FIELDS ).async_setup(hass) component.async_register_entity_service(SERVICE_INCREMENT, {}, "async_increment") component.async_register_entity_service(SERVICE_DECREMENT, {}, "async_decrement") component.async_register_entity_service(SERVICE_RESET, {}, "async_reset") component.async_register_entity_service( SERVICE_CONFIGURE, { vol.Optional(ATTR_MINIMUM): vol.Any(None, vol.Coerce(int)), vol.Optional(ATTR_MAXIMUM): vol.Any(None, vol.Coerce(int)), vol.Optional(ATTR_STEP): cv.positive_int, vol.Optional(ATTR_INITIAL): cv.positive_int, vol.Optional(VALUE): cv.positive_int, }, "async_configure", ) return True class CounterStorageCollection(collection.StorageCollection): """Input storage based collection.""" CREATE_SCHEMA = vol.Schema(CREATE_FIELDS) UPDATE_SCHEMA = vol.Schema(UPDATE_FIELDS) async def _process_create_data(self, data: dict) -> dict: """Validate the config is valid.""" return self.CREATE_SCHEMA(data) @callback def _get_suggested_id(self, info: dict) -> str: """Suggest an ID based on the config.""" return info[CONF_NAME] async def _update_data(self, data: dict, update_data: dict) -> dict: """Return a new updated data object.""" update_data = self.UPDATE_SCHEMA(update_data) return {**data, **update_data} class Counter(RestoreEntity): """Representation of a counter.""" def __init__(self, config: dict) -> None: """Initialize a counter.""" self._config: dict = config self._state: int | None = config[CONF_INITIAL] self.editable: bool = True @classmethod def from_yaml(cls, config: dict) -> Counter: """Create counter instance from yaml config.""" counter = cls(config) counter.editable = False counter.entity_id = ENTITY_ID_FORMAT.format(config[CONF_ID]) return counter @property def should_poll(self) -> bool: """If entity should be polled.""" return False @property def name(self) -> str | None: """Return name of the counter.""" return self._config.get(CONF_NAME) @property def icon(self) -> str | None: """Return the icon to be used for this entity.""" return self._config.get(CONF_ICON) @property def state(self) -> int | None: """Return the current value of the counter.""" return self._state @property def extra_state_attributes(self) -> dict: """Return the state attributes.""" ret = { ATTR_EDITABLE: self.editable, ATTR_INITIAL: self._config[CONF_INITIAL], ATTR_STEP: self._config[CONF_STEP], } if self._config[CONF_MINIMUM] is not None: ret[CONF_MINIMUM] = self._config[CONF_MINIMUM] if self._config[CONF_MAXIMUM] is not None: ret[CONF_MAXIMUM] = self._config[CONF_MAXIMUM] return ret @property def unique_id(self) -> str | None: """Return unique id of the entity.""" return self._config[CONF_ID] def compute_next_state(self, state) -> int: """Keep the state within the range of min/max values.""" if self._config[CONF_MINIMUM] is not None: state = max(self._config[CONF_MINIMUM], state) if self._config[CONF_MAXIMUM] is not None: state = min(self._config[CONF_MAXIMUM], state) return state async def async_added_to_hass(self) -> None: """Call when entity about to be added to Home Assistant.""" await super().async_added_to_hass() # __init__ will set self._state to self._initial, only override # if needed. if self._config[CONF_RESTORE]: state = await self.async_get_last_state() if state is not None: self._state = self.compute_next_state(int(state.state)) self._config[CONF_INITIAL] = state.attributes.get(ATTR_INITIAL) self._config[CONF_MAXIMUM] = state.attributes.get(ATTR_MAXIMUM) self._config[CONF_MINIMUM] = state.attributes.get(ATTR_MINIMUM) self._config[CONF_STEP] = state.attributes.get(ATTR_STEP) @callback def async_decrement(self) -> None: """Decrement the counter.""" self._state = self.compute_next_state(self._state - self._config[CONF_STEP]) self.async_write_ha_state() @callback def async_increment(self) -> None: """Increment a counter.""" self._state = self.compute_next_state(self._state + self._config[CONF_STEP]) self.async_write_ha_state() @callback def async_reset(self) -> None: """Reset a counter.""" self._state = self.compute_next_state(self._config[CONF_INITIAL]) self.async_write_ha_state() @callback def async_configure(self, **kwargs) -> None: """Change the counter's settings with a service.""" new_state = kwargs.pop(VALUE, self._state) self._config = {**self._config, **kwargs} self._state = self.compute_next_state(new_state) self.async_write_ha_state() async def async_update_config(self, config: dict) -> None: """Change the counter's settings WS CRUD.""" self._config = config self._state = self.compute_next_state(self._state) self.async_write_ha_state()
aronsky/home-assistant
homeassistant/components/counter/__init__.py
Python
apache-2.0
9,477
import os import re import datetime from django.conf import settings from ietf.doc.models import Document, State, DocAlias, DocEvent, DocumentAuthor from ietf.doc.models import NewRevisionDocEvent, save_document_in_history from ietf.doc.models import RelatedDocument, DocRelationshipName from ietf.doc.utils import add_state_change_event, rebuild_reference_relations from ietf.doc.utils import set_replaces_for_document from ietf.doc.mails import send_review_possibly_replaces_request from ietf.group.models import Group from ietf.ietfauth.utils import has_role from ietf.name.models import StreamName from ietf.person.models import Person, Email from ietf.submit.mail import announce_to_lists, announce_new_version, announce_to_authors from ietf.submit.models import Submission, SubmissionEvent, Preapproval, DraftSubmissionStateName from ietf.utils import unaccent from ietf.utils.log import log from ietf.utils.pipe import pipe def check_idnits(path): #p = subprocess.Popen([self.idnits, '--submitcheck', '--nitcount', path], stdout=subprocess.PIPE) cmd = "%s --submitcheck --nitcount %s" % (settings.IDSUBMIT_IDNITS_BINARY, path) code, out, err = pipe(cmd) if code != 0: log("idnits error: %s:\n Error %s: %s" %( cmd, code, err)) return out def found_idnits(idnits_message): if not idnits_message: return False success_re = re.compile('\s+Summary:\s+0\s+|No nits found') if success_re.search(idnits_message): return True return False def validate_submission(submission): errors = {} if submission.state_id not in ("cancel", "posted"): for ext in submission.file_types.split(','): source = os.path.join(settings.IDSUBMIT_STAGING_PATH, '%s-%s%s' % (submission.name, submission.rev, ext)) if not os.path.exists(source): errors['files'] = '"%s" was not found in the staging area. We recommend you that you cancel this submission and upload your files again.' % os.path.basename(source) break if not submission.title: errors['title'] = 'Title is empty or was not found' if submission.group and submission.group.state_id != "active": errors['group'] = 'Group exists but is not an active group' if not submission.abstract: errors['abstract'] = 'Abstract is empty or was not found' if not submission.authors_parsed(): errors['authors'] = 'No authors found' # revision if submission.state_id != "posted": error = validate_submission_rev(submission.name, submission.rev) if error: errors['rev'] = error # draft date error = validate_submission_document_date(submission.submission_date, submission.document_date) if error: errors['document_date'] = error return errors def has_been_replaced_by(name): docs=Document.objects.filter(name=name) if docs: doc=docs[0] return doc.related_that("replaces") return None def validate_submission_rev(name, rev): if not rev: return 'Revision not found' try: rev = int(rev) except ValueError: return 'Revision must be a number' else: if not (0 <= rev <= 99): return 'Revision must be between 00 and 99' expected = 0 existing_revs = [int(i.rev) for i in Document.objects.filter(name=name)] if existing_revs: expected = max(existing_revs) + 1 if rev != expected: return 'Invalid revision (revision %02d is expected)' % expected replaced_by=has_been_replaced_by(name) if replaced_by: return 'This document has been replaced by %s' % ",".join(rd.name for rd in replaced_by) return None def validate_submission_document_date(submission_date, document_date): if not document_date: return 'Document date is empty or not in a proper format' elif abs(submission_date - document_date) > datetime.timedelta(days=3): return 'Document date must be within 3 days of submission date' return None def create_submission_event(request, submission, desc): by = None if request and request.user.is_authenticated(): try: by = request.user.person except Person.DoesNotExist: pass SubmissionEvent.objects.create(submission=submission, by=by, desc=desc) def post_submission(request, submission): system = Person.objects.get(name="(System)") try: draft = Document.objects.get(name=submission.name) save_document_in_history(draft) except Document.DoesNotExist: draft = Document(name=submission.name) draft.intended_std_level = None prev_rev = draft.rev draft.type_id = "draft" draft.time = datetime.datetime.now() draft.title = submission.title group = submission.group or Group.objects.get(type="individ") if not (group.type_id == "individ" and draft.group and draft.group.type_id == "area"): # don't overwrite an assigned area if it's still an individual # submission draft.group_id = group.pk draft.rev = submission.rev draft.pages = submission.pages draft.abstract = submission.abstract was_rfc = draft.get_state_slug() == "rfc" if not draft.stream: stream_slug = None if draft.name.startswith("draft-iab-"): stream_slug = "iab" elif draft.name.startswith("draft-irtf-"): stream_slug = "irtf" elif draft.name.startswith("draft-ietf-") and (draft.group.type_id != "individ" or was_rfc): stream_slug = "ietf" if stream_slug: draft.stream = StreamName.objects.get(slug=stream_slug) draft.expires = datetime.datetime.now() + datetime.timedelta(settings.INTERNET_DRAFT_DAYS_TO_EXPIRE) draft.save() submitter_parsed = submission.submitter_parsed() if submitter_parsed["name"] and submitter_parsed["email"]: submitter = ensure_person_email_info_exists(submitter_parsed["name"], submitter_parsed["email"]).person submitter_info = u'%s <%s>' % (submitter_parsed["name"], submitter_parsed["email"]) else: submitter = system submitter_info = system.name draft.set_state(State.objects.get(used=True, type="draft", slug="active")) DocAlias.objects.get_or_create(name=submission.name, document=draft) update_authors(draft, submission) trouble = rebuild_reference_relations(draft, filename=os.path.join(settings.IDSUBMIT_STAGING_PATH, '%s-%s.txt' % (submission.name, submission.rev))) if trouble: log('Rebuild_reference_relations trouble: %s'%trouble) # new revision event e = NewRevisionDocEvent(type="new_revision", doc=draft, rev=draft.rev) e.time = draft.time #submission.submission_date e.by = submitter e.desc = "New version available: <b>%s-%s.txt</b>" % (draft.name, draft.rev) e.save() if draft.stream_id == "ietf" and draft.group.type_id == "wg" and draft.rev == "00": # automatically set state "WG Document" draft.set_state(State.objects.get(used=True, type="draft-stream-%s" % draft.stream_id, slug="wg-doc")) if draft.get_state_slug("draft-iana-review") in ("ok-act", "ok-noact", "not-ok"): prev_state = draft.get_state("draft-iana-review") next_state = State.objects.get(used=True, type="draft-iana-review", slug="changed") draft.set_state(next_state) add_state_change_event(draft, submitter, prev_state, next_state) # clean up old files if prev_rev != draft.rev: from ietf.doc.expire import move_draft_files_to_archive move_draft_files_to_archive(draft, prev_rev) # automatic state changes state_change_msg = "" if not was_rfc and draft.tags.filter(slug="need-rev"): draft.tags.remove("need-rev") draft.tags.add("ad-f-up") e = DocEvent(type="changed_document", doc=draft) e.desc = "Sub state has been changed to <b>AD Followup</b> from <b>Revised ID Needed</b>" e.by = system e.save() state_change_msg = e.desc move_files_to_repository(submission) submission.state = DraftSubmissionStateName.objects.get(slug="posted") new_replaces, new_possibly_replaces = update_replaces_from_submission(request, submission, draft) announce_to_lists(request, submission) announce_new_version(request, submission, draft, state_change_msg) announce_to_authors(request, submission) if new_possibly_replaces: send_review_possibly_replaces_request(request, draft, submitter_info) submission.save() def update_replaces_from_submission(request, submission, draft): if not submission.replaces: return [], [] is_secretariat = has_role(request.user, "Secretariat") is_chair_of = [] if request.user.is_authenticated(): is_chair_of = list(Group.objects.filter(role__person__user=request.user, role__name="chair")) replaces = DocAlias.objects.filter(name__in=submission.replaces.split(",")).select_related("document", "document__group") existing_replaces = list(draft.related_that_doc("replaces")) existing_suggested = set(draft.related_that_doc("possibly-replaces")) submitter_email = submission.submitter_parsed()["email"] approved = [] suggested = [] for r in replaces: if r in existing_replaces: continue rdoc = r.document if rdoc == draft: continue # TODO - I think the .exists() is in the wrong place below.... if (is_secretariat or (draft.group in is_chair_of and (rdoc.group.type_id == "individ" or rdoc.group in is_chair_of)) or (submitter_email and rdoc.authors.filter(address__iexact=submitter_email)).exists()): approved.append(r) else: if r not in existing_suggested: suggested.append(r) by = request.user.person if request.user.is_authenticated() else Person.objects.get(name="(System)") set_replaces_for_document(request, draft, existing_replaces + approved, by, email_subject="%s replacement status set during submit by %s" % (draft.name, submission.submitter_parsed()["name"])) if suggested: possibly_replaces = DocRelationshipName.objects.get(slug="possibly-replaces") for r in suggested: RelatedDocument.objects.create(source=draft, target=r, relationship=possibly_replaces) DocEvent.objects.create(doc=draft, by=by, type="added_suggested_replaces", desc="Added suggested replacement relationships: %s" % ", ".join(d.name for d in suggested)) return approved, suggested def get_person_from_name_email(name, email): # try email if email: persons = Person.objects.filter(email__address=email).distinct() if len(persons) == 1: return persons[0] else: persons = Person.objects.none() if not persons: persons = Person.objects.all() # try full name p = persons.filter(alias__name=name).distinct() if p: return p[0] return None def ensure_person_email_info_exists(name, email): person = get_person_from_name_email(name, email) # make sure we have a person if not person: person = Person() person.name = name person.ascii = unaccent.asciify(person.name) person.save() # make sure we have an email address if email: addr = email.lower() else: # we're in trouble, use a fake one addr = u"unknown-email-%s" % person.name.replace(" ", "-") try: email = person.email_set.get(address=addr) except Email.DoesNotExist: try: # maybe it's pointing to someone else email = Email.objects.get(address=addr) except Email.DoesNotExist: # most likely we just need to create it email = Email(address=addr) email.active = True email.person = person email.save() return email def update_authors(draft, submission): authors = [] for order, author in enumerate(submission.authors_parsed()): email = ensure_person_email_info_exists(author["name"], author["email"]) a = DocumentAuthor.objects.filter(document=draft, author=email) if a: a = a[0] else: a = DocumentAuthor(document=draft, author=email) a.order = order a.save() authors.append(email) draft.documentauthor_set.exclude(author__in=authors).delete() def cancel_submission(submission): submission.state = DraftSubmissionStateName.objects.get(slug="cancel") submission.save() remove_submission_files(submission) def rename_submission_files(submission, prev_rev, new_rev): from ietf.submit.forms import SubmissionUploadForm for ext in SubmissionUploadForm.base_fields.keys(): source = os.path.join(settings.IDSUBMIT_STAGING_PATH, '%s-%s.%s' % (submission.name, prev_rev, ext)) dest = os.path.join(settings.IDSUBMIT_STAGING_PATH, '%s-%s.%s' % (submission.name, new_rev, ext)) if os.path.exists(source): os.rename(source, dest) def move_files_to_repository(submission): from ietf.submit.forms import SubmissionUploadForm for ext in SubmissionUploadForm.base_fields.keys(): source = os.path.join(settings.IDSUBMIT_STAGING_PATH, '%s-%s.%s' % (submission.name, submission.rev, ext)) dest = os.path.join(settings.IDSUBMIT_REPOSITORY_PATH, '%s-%s.%s' % (submission.name, submission.rev, ext)) if os.path.exists(source): os.rename(source, dest) else: if os.path.exists(dest): log("Intended to move '%s' to '%s', but found source missing while destination exists.") elif ext in submission.file_types.split(','): raise ValueError("Intended to move '%s' to '%s', but found source and destination missing.") def remove_submission_files(submission): for ext in submission.file_types.split(','): source = os.path.join(settings.IDSUBMIT_STAGING_PATH, '%s-%s%s' % (submission.name, submission.rev, ext)) if os.path.exists(source): os.unlink(source) def approvable_submissions_for_user(user): if not user.is_authenticated(): return [] res = Submission.objects.filter(state="grp-appr").order_by('-submission_date') if has_role(user, "Secretariat"): return res # those we can reach as chair return res.filter(group__role__name="chair", group__role__person__user=user) def preapprovals_for_user(user): if not user.is_authenticated(): return [] posted = Submission.objects.distinct().filter(state="posted").values_list('name', flat=True) res = Preapproval.objects.exclude(name__in=posted).order_by("-time").select_related('by') if has_role(user, "Secretariat"): return res acronyms = [g.acronym for g in Group.objects.filter(role__person__user=user, type__in=("wg", "rg"))] res = res.filter(name__regex="draft-[^-]+-(%s)-.*" % "|".join(acronyms)) return res def recently_approved_by_user(user, since): if not user.is_authenticated(): return [] res = Submission.objects.distinct().filter(state="posted", submission_date__gte=since, rev="00").order_by('-submission_date') if has_role(user, "Secretariat"): return res # those we can reach as chair return res.filter(group__role__name="chair", group__role__person__user=user) def expirable_submissions(older_than_days): cutoff = datetime.date.today() - datetime.timedelta(days=older_than_days) return Submission.objects.exclude(state__in=("cancel", "posted")).filter(submission_date__lt=cutoff) def expire_submission(submission, by): submission.state_id = "cancel" submission.save() SubmissionEvent.objects.create(submission=submission, by=by, desc="Canceled expired submission")
wpjesus/codematch
ietf/submit/utils.py
Python
bsd-3-clause
16,013
print("In Python, what do you call a 'box' used to store data?") answer = input() if answer == "variable": print(" :) ") else: print(" :( ") print("Thank you for playing!") print(''' Q1 - In Python, what do you call a 'box' used to store data? a - text b - variable c - a shoe box ''') answer = input().lower() if answer == "a": print(" Nope - text is a type of data :( ") elif answer == "b": print(" Correct!! :) ") elif answer == "c": print(" Don't be silly! :( ") else: print(" You didn't choose a, b or c :( ")
arve0/example_lessons
src/python/lessons/Quiz/Project Resources/Quiz.py
Python
cc0-1.0
553
#! /usr/bin/env python import vcsn from test import * algos = ['distance', 'inplace', 'separate'] # check INPUT EXP ALGORITHM # ------------------------- def check_algo(i, o, algo): i = vcsn.automaton(i) o = vcsn.automaton(o) print("using algorithm: ", algo) print("checking proper") # We call sort().strip() everywhere to avoid seeing differences caused by the # different numbering of the states between the algorithms CHECK_EQ(o.sort().strip(), i.proper(algo=algo).sort().strip()) # Since we remove only states that _become_ inaccessible, # i.proper(prune = False).accessible() is not the same as # i.proper(): in the former case we also removed the non-accessible # states. print("checking proper(prune = False)") CHECK_EQ(o.accessible(), i.proper(prune=False, algo=algo).accessible()) # FIXME: Because proper uses copy, state numbers are changed. # # FIXME: cannot use is_isomorphic because some of our test cases # have unreachable states, which is considered invalid by # is_isomorphic. print("checking idempotence") if i.proper(algo=algo).is_accessible(): CHECK_ISOMORPHIC(i.proper(algo=algo), i.proper(algo=algo).proper(algo=algo)) else: CHECK_EQ(i.proper(algo=algo).sort().strip(), i.proper(algo=algo).proper(algo=algo).sort().strip()) def check_fail_algo(aut, algo): a = vcsn.automaton(aut) try: a.proper(algo=algo) FAIL(r"invalid \\e-cycle not detected") except RuntimeError: PASS() def check(i, o, algs=algos): for algo in algs: check_algo(i, o, algo) def check_fail(i, algs=algos): for algo in algs: check_fail_algo(i, algo) ## --------------------------------------- ## ## lao, r: check the computation of star. ## ## --------------------------------------- ## check(r'''context = "lao, r" $ -> 0 <3> 0 -> 1 <5> 1 -> 1 <.5>\e 1 -> 2 <7>\e 2 -> $ <11> ''', ''' digraph { vcsn_context = "lao, r" rankdir = LR edge [arrowhead = vee, arrowsize = .6] { node [shape = point, width = 0] I0 F1 } { node [shape = circle, style = rounded, width = 0.5] 0 1 } I0 -> 0 [label = "<3>"] 0 -> 1 [label = "<5>"] 1 -> F1 [label = "<154>"] } ''') ## -------------------------------------------- ## ## law_char, r: check the computation of star. ## ## -------------------------------------------- ## check(r'''digraph { vcsn_context = "law_char(ab), r" I -> 0 -> F 0 -> 0 [label = "<.5>\\e"] }''','''digraph { vcsn_context = "wordset<char_letters(ab)>, r" rankdir = LR edge [arrowhead = vee, arrowsize = .6] { node [shape = point, width = 0] I0 F0 } { node [shape = circle, style = rounded, width = 0.5] 0 } I0 -> 0 0 -> F0 [label = "<2>"] }''') ## ------------- ## ## law_char, b. ## ## ------------- ## check(r'''digraph { vcsn_context = "law_char(ab), b" rankdir = LR edge [arrowhead = vee, arrowsize = .6] { node [shape = point, width = 0] I0 F1 } { node [shape = circle, style = rounded, width = 0.5] 0 1 2 } I0 -> 0 0 -> 1 [label = "a"] 0 -> 2 [label = "\\e"] 1 -> F1 1 -> 0 [label = "\\e"] 1 -> 2 [label = "a"] 2 -> 0 [label = "a"] 2 -> 1 [label = "\\e"] }''', '''digraph { vcsn_context = "wordset<char_letters(ab)>, b" rankdir = LR edge [arrowhead = vee, arrowsize = .6] { node [shape = point, width = 0] I0 F0 F1 F2 } { node [shape = circle, style = rounded, width = 0.5] 0 1 2 } I0 -> 0 0 -> F0 0 -> 0 [label = "a"] 0 -> 1 [label = "a"] 0 -> 2 [label = "a"] 1 -> F1 1 -> 0 [label = "a"] 1 -> 1 [label = "a"] 1 -> 2 [label = "a"] 2 -> F2 2 -> 0 [label = "a"] 2 -> 1 [label = "a"] 2 -> 2 [label = "a"] }''') ## ------------------------------------------------- ## ## law_char, z: invalid \e-cycle (weight is not 0). ## ## ------------------------------------------------- ## check_fail(r'''digraph { vcsn_context = "law_char(ab), z" rankdir = LR edge [arrowhead = vee, arrowsize = .6] { node [shape = point, width = 0] I0 F1 } { node [shape = circle, style = rounded, width = 0.5] 0 1 2 3 } I0 -> 0 0 -> 1 [label = "<2>a"] 0 -> 2 [label = "<-1>\\e"] 1 -> F1 1 -> 0 [label = "<-1>\\e"] 2 -> 1 [label = "<-1>\\e"] 2 -> 3 [label = "<2>a"] 3 -> 0 [label = "<2>a"] }''') ## ------------- ## ## law_char, z. ## ## ------------- ## check(r'''digraph { vcsn_context = "law_char(ab), z" rankdir = LR edge [arrowhead = vee, arrowsize = .6] { node [shape = point, width = 0] I0 F1 } { node [shape = circle, style = rounded, width = 0.5] 0 1 2 3 } I0 -> 0 0 -> 1 [label = "<2>a"] 0 -> 2 [label = "<-1>a"] 1 -> F1 1 -> 0 [label = "<-1>\\e"] 2 -> 1 [label = "<-1>\\e"] 2 -> 3 [label = "<2>a"] 3 -> 0 [label = "<2>a"] }''', '''digraph { vcsn_context = "wordset<char_letters(ab)>, z" rankdir = LR edge [arrowhead = vee, arrowsize = .6] { node [shape = point, width = 0] I0 F1 F2 } { node [shape = circle, style = rounded, width = 0.5] 0 1 2 3 } I0 -> 0 0 -> 1 [label = "<2>a"] 0 -> 2 [label = "<-1>a"] 1 -> F1 1 -> 1 [label = "<-2>a"] 1 -> 2 [label = "a"] 2 -> F2 [label = "<-1>"] 2 -> 1 [label = "<2>a"] 2 -> 2 [label = "<-1>a"] 2 -> 3 [label = "<2>a"] 3 -> 0 [label = "<2>a"] }''') ## ---------------------------------- ## ## law_char, zmin: invalid \e-cycle. ## ## ---------------------------------- ## check_fail(r'''digraph { vcsn_context = "law_char(ab), zmin" rankdir = LR edge [arrowhead = vee, arrowsize = .6] { node [shape = point, width = 0] I0 F1 } { node [shape = circle, style = rounded, width = 0.5] 0 1 2 } I0 -> 0 [label = "<0>"] 0 -> 1 [label = "<2>a"] 0 -> 2 [label = "<-1>\\e"] 1 -> F1 [label = "<0>"] 1 -> 0 [label = "<-1>\\e"] 1 -> 2 [label = "<2>a"] 2 -> 0 [label = "<2>a"] 2 -> 1 [label = "<-1>\\e"] }''') ## ---------------------------- ## ## lan_char, zr: a long cycle. ## ## ---------------------------- ## # FIXME(ap): with distance, weights are equivalent but not the same check(r'''digraph { vcsn_context = "lan_char(z), expressionset<lal_char(abcd), q>" rankdir = LR node [shape = circle] { node [shape = point, width = 0] I F } { 0 1 2 3 4 } I -> 0 0 -> 1 [label = "<a>\\e"] 1 -> 2 [label = "<b>\\e"] 2 -> 3 [label = "<c>\\e"] 3 -> 0 [label = "<d>\\e"] 0 -> 4 [label = "z"] 4 -> F }''', r'''digraph { vcsn_context = "letterset<char_letters(z)>, expressionset<letterset<char_letters(abcd)>, q>" rankdir = LR edge [arrowhead = vee, arrowsize = .6] { node [shape = point, width = 0] I0 F1 } { node [shape = circle, style = rounded, width = 0.5] 0 1 } I0 -> 0 0 -> 1 [label = "<(abcd)*>z"] 1 -> F1 }''', [algo for algo in algos if algo != 'distance']) ## ----------------------------------------- ## ## lan_char, zr: remove now-useless states. ## ## ----------------------------------------- ## # Check that we remove states that _end_ without incoming transitions, # but leave states that were inaccessible before the elimination of # the spontaneous transitions. # FIXME(ap): with distance, inaccessible states get pruned check(r'''digraph { vcsn_context = "lan_char(z), expressionset<lal_char(abcdefgh), q>" rankdir = LR node [shape = circle] { node [shape = point, width = 0] I F } { 0 1 2 3 4 5 6 7 8 9 } I -> 0 0 -> 3 [label = "<a>\\e"] 0 -> 5 [label = "<b>\\e"] 1 -> 2 [label = "<c>\\e"] 3 -> 4 [label = "<d>\\e"] 5 -> 6 [label = "<e>\\e"] 7 -> 8 [label = "<f>\\e"] 6 -> 9 [label = "<g>\\e"] 8 -> 9 [label = "<h>\\e"] 9 -> F }''', '''digraph { vcsn_context = "letterset<char_letters(z)>, expressionset<letterset<char_letters(abcdefgh)>, q>" rankdir = LR edge [arrowhead = vee, arrowsize = .6] { node [shape = point, width = 0] I0 F0 F2 } { node [shape = circle, style = rounded, width = 0.5] 0 1 [color = DimGray] 2 [color = DimGray] } I0 -> 0 0 -> F0 [label = "<beg>"] 2 -> F2 [label = "<fh>", color = DimGray] }''', [algo for algo in algos if algo != 'distance']) ## ------------- ## ## lan_char, b. ## ## ------------- ## check(r'''digraph { vcsn_context = "lan_char(ab), b" I -> 0 0 -> 1 [label = "\\e"] 1 -> 0 [label = "\\e"] 0 -> 4 [label = "a"] 4 -> F }''', '''digraph { vcsn_context = "letterset<char_letters(ab)>, b" rankdir = LR edge [arrowhead = vee, arrowsize = .6] { node [shape = point, width = 0] I0 F1 } { node [shape = circle, style = rounded, width = 0.5] 0 1 } I0 -> 0 0 -> 1 [label = "a"] 1 -> F1 }''') ## ---------------------------- ## ## lat<lan_char, lan_char>, b. ## ## ---------------------------- ## check(r'''digraph { vcsn_context = "lat<lan_char(ab),lan_char(xy)>, b" I0 -> 0 0 -> 1 [label = "(\\e,\\e)"] 0 -> 1 [label = "(a,x)"] 0 -> 2 [label = "(b,\\e)"] 1 -> F1 1 -> 2 [label = "(\\e,y)"] 2 -> F2 }''', r'''digraph { vcsn_context = "lat<nullableset<letterset<char_letters(ab)>>, nullableset<letterset<char_letters(xy)>>>, b" rankdir = LR edge [arrowhead = vee, arrowsize = .6] { node [shape = point, width = 0] I0 F0 F1 F2 } { node [shape = circle, style = rounded, width = 0.5] 0 1 2 } I0 -> 0 0 -> F0 0 -> 1 [label = "(a,x)"] 0 -> 2 [label = "(\\e,y), (b,\\e)"] 1 -> F1 1 -> 2 [label = "(\\e,y)"] 2 -> F2 }''') ## ---------------------------- ## ## lat<lan_char, lal_char>, b. ## ## ---------------------------- ## check(r'''digraph { vcsn_context = "lat<lan_char(ab),lal_char(xy)>, b" I0 -> 0 0 -> 1 [label = "(a,x)"] 0 -> 2 [label = "(b,y)"] 1 -> F1 1 -> 2 [label = "(\\e,y)"] 2 -> F2 }''', r'''digraph { vcsn_context = "lat<nullableset<letterset<char_letters(ab)>>, letterset<char_letters(xy)>>, b" rankdir = LR edge [arrowhead = vee, arrowsize = .6] { node [shape = point, width = 0] I0 F1 F2 } { node [shape = circle, style = rounded, width = 0.5] 0 1 2 } I0 -> 0 0 -> 1 [label = "(a,x)"] 0 -> 2 [label = "(b,y)"] 1 -> F1 1 -> 2 [label = "(\\e,y)"] 2 -> F2 }''') ## ---------------------- ## ## Forward vs. backward. ## ## ---------------------- ## for algo in algos: a = vcsn.context('lan_char(ab), b').expression('a*').thompson() CHECK_EQ(vcsn.automaton(r'''digraph { vcsn_context = "letterset<char_letters(ab)>, b" rankdir = LR edge [arrowhead = vee, arrowsize = .6] { node [shape = point, width = 0] I1 F0 F1 } { node [shape = circle, style = rounded, width = 0.5] 0 1 } I1 -> 1 0 -> F0 0 -> 0 [label = "a"] 1 -> F1 1 -> 0 [label = "a"] }''').sort().strip(), a.proper(backward=True, algo=algo).sort().strip()) CHECK_EQ(vcsn.automaton(r'''digraph { vcsn_context = "letterset<char_letters(ab)>, b" rankdir = LR edge [arrowhead = vee, arrowsize = .6] { node [shape = point, width = 0] I0 I1 F1 } { node [shape = circle, style = rounded, width = 0.5] 0 1 } I0 -> 0 I1 -> 1 0 -> 0 [label = "a"] 0 -> 1 [label = "a"] 1 -> F1 }''').sort().strip(), a.proper(backward=False, algo=algo).sort().strip())
pombredanne/https-gitlab.lrde.epita.fr-vcsn-vcsn
tests/python/proper.py
Python
gpl-3.0
11,408
# -*- coding: utf-8 *-* """ @author: López Ricardo Ezequiel @license: GNU GENERAL PUBLIC LICENSE @contact: mail@lopezezequiel.com """
lopezezequiel/fractalZE
FractalZE/__init__.py
Python
gpl-2.0
135
from django import http from django.views.decorators.csrf import csrf_exempt from django.utils.decorators import method_decorator from django.utils.cache import add_never_cache_headers from django.utils.translation import ugettext_lazy as _ from hyperadmin.links import Link class ConditionalAccessMixin(object): etag_function = None def check_etag(self, data): new_etag = self.etag_function and self.etag_function(data) if not new_etag: return if self.request.META.get('HTTP_IF_NONE_MATCH', None) == new_etag: raise http.HttpResponseNotModified() if self.request.META.get('HTTP_IF_MATCH', new_etag) != new_etag: raise http.HttpResponse(status=412) # Precondition Failed class EndpointViewMixin(ConditionalAccessMixin): #state = None global_state = None cacheable = False submit_methods = ['POST', 'PUT', 'DELETE'] template_name = None def get_template_names(self): if self.template_name: if isinstance(self.template_name, basestring): template_names = [self.template_name] else: template_names = self.template_name return self.expand_template_names(template_names) return None def get_request_form_kwargs(self): return self.api_request.payload def get_item(self): return None @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): """ Takes a django request object and builds an APIRequest object Calls dispatch_api with the api request :rtype: HttpResponse """ assert not self.api_request api_request = self.create_apirequest(request=request, url_args=args, url_kwargs=kwargs) endpoint = api_request.get_endpoint(self.get_url_name()) return endpoint.dispatch_api(api_request) def dispatch_api(self, api_request): ''' Execute the api request :rtype: HttpResponse ''' response = self.generate_api_response(api_request) return self.normalize_response(response) def generate_api_response(self, api_request): ''' Returns the result of executing a link :rtype: Link or HttpResponse ''' if api_request.method.lower() in self.http_method_names: handler = getattr(self, api_request.method.lower(), self.handle_link_submission) else: handler = self.http_method_not_allowed self.api_request = api_request self.args = api_request.url_args self.kwargs = api_request.url_kwargs self.initialize_state() assert self.state is not None self.common_state.update(self.get_common_state_data()) permission_response = self.api_permission_check(api_request, self) if permission_response is not None: return permission_response else: return handler(api_request) def normalize_response(self, response_or_link): ''' Converts a link response to an HttpResponse :rtype: HttpResponse ''' if isinstance(response_or_link, Link): #TODO TemplateResponse with a link response = self.generate_response(response_or_link) else: response = response_or_link if not self.cacheable and isinstance(response, http.HttpResponse): add_never_cache_headers(response) return response def get_common_state_data(self): """ Return state data that should be available at the resource level for processing the api request """ return {} def handle_link_submission(self, api_request): """ Looks up the appropriate link for the HTTP Method and returns the link. If Method is in `self.submit_methods` then return the result of submitting the link. """ method = api_request.method.upper() proto = self.get_link_prototype_for_method(method) if proto: if proto.show_link(): kwargs = {'use_request_url':True} if method in self.submit_methods: #TODO other kwargs may be added kwargs['form_kwargs'] = api_request.payload kwargs = self.get_link_kwargs(**kwargs) link = proto.get_link(**kwargs) if method in self.submit_methods: response_link = link.submit() return response_link return link else: return http.HttpResponseForbidden(_(u"You may not access this endpoint")) return http.HttpResponseBadRequest(_(u"Method %s is not allowed") % method) def options(self, api_request): links = self.get_available_links() return self.generate_options_response(links=links)
webcube/django-hyperadmin
hyperadmin/views.py
Python
bsd-3-clause
5,021
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be> # Copyright (C) 2014 Jesús Espino <jespinog@gmail.com> # Copyright (C) 2014 David Barragán <bameda@dbarragan.com> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from django.utils.translation import ugettext_lazy as _ from taiga.base import exceptions as exc from taiga.base import response from taiga.base.api.viewsets import GenericViewSet from taiga.base.utils import json from taiga.projects.models import Project from .exceptions import ActionSyntaxException class BaseWebhookApiViewSet(GenericViewSet): # We don't want rest framework to parse the request body and transform it in # a dict in request.DATA, we need it raw parser_classes = () # This dict associates the event names we are listening for # with their reponsible classes (extending event_hooks.BaseEventHook) event_hook_classes = {} def _validate_signature(self, project, request): raise NotImplemented def _get_project(self, request): project_id = request.GET.get("project", None) try: project = Project.objects.get(id=project_id) return project except Project.DoesNotExist: return None def _get_payload(self, request): try: payload = json.loads(request.body.decode("utf-8")) except ValueError: raise exc.BadRequest(_("The payload is not a valid json")) return payload def _get_event_name(self, request): raise NotImplemented def create(self, request, *args, **kwargs): project = self._get_project(request) if not project: raise exc.BadRequest(_("The project doesn't exist")) if not self._validate_signature(project, request): raise exc.BadRequest(_("Bad signature")) event_name = self._get_event_name(request) payload = self._get_payload(request) event_hook_class = self.event_hook_classes.get(event_name, None) if event_hook_class is not None: event_hook = event_hook_class(project, payload) try: event_hook.process_event() except ActionSyntaxException as e: raise exc.BadRequest(e) return response.NoContent()
Zaneh-/bearded-tribble-back
taiga/hooks/api.py
Python
agpl-3.0
2,881
#!/usr/bin/env python # -*- coding: utf-8 -*- # These imports are for python3 compatibility inside python2 from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading import time import cachetools from apex.aprs import util as aprs_util from .util import echo_colorized_frame from .util import echo_colorized_warning class NonrepeatingBuffer(object): def __init__(self, base_tnc, base_name, base_port=None, echo_packets=True, buffer_size=10000, buffer_time=30): self.packet_cache = cachetools.TTLCache(buffer_size, buffer_time) self.lock = threading.Lock() self.base_tnc = base_tnc self.base_port = base_port self.base_name = base_name self.echo_packets = echo_packets @property def port(self): return self.base_port @property def name(self): return self.base_name def connect(self, *args, **kwargs): self.base_tnc.connect(*args, **kwargs) def close(self, *args, **kwargs): self.base_tnc.close(*args, **kwargs) def write(self, frame, *args, **kwargs): with self.lock: frame_hash = str(aprs_util.hash_frame(frame)) if frame_hash not in self.packet_cache: self.packet_cache[frame_hash] = frame if self.base_port: self.base_tnc.write(frame, self.base_port) else: self.base_tnc.write(frame) if self.echo_packets: echo_colorized_frame(frame, self.base_name, False) def read(self, *args, **kwargs): with self.lock: frame = self.base_tnc.read(*args, **kwargs) if not frame: return frame frame_hash = str(aprs_util.hash_frame(frame)) if frame_hash not in self.packet_cache: self.packet_cache[frame_hash] = frame if self.echo_packets: echo_colorized_frame(frame, self.base_name, True) return frame else: return None class ReconnectingPacketBuffer(object): STARTING_WAIT_TIME = 2 MAX_WAIT_TIME = 300 WAIT_TIME_MULTIPLIER = 2 MAX_INDEX = 1000000 def __init__(self, packet_layer): self.packet_layer = packet_layer self.to_packet_layer = cachetools.TTLCache(10, 30) self.current_index = 0 self.from_packet_layer = cachetools.TTLCache(10, 30) self.connect_thread = None self.lock = threading.Lock() self.running = False self.reconnect_wait_time = self.STARTING_WAIT_TIME self.last_connect_attempt = None self.connect_args = None self.connect_kwargs = None self.connected = False def __increment_wait_time(self): self.reconnect_wait_time *= self.WAIT_TIME_MULTIPLIER if self.reconnect_wait_time > self.MAX_WAIT_TIME: self.reconnect_wait_time = self.MAX_WAIT_TIME def __reset_wait_time(self): self.reconnect_wait_time = self.STARTING_WAIT_TIME def __run(self): while self.running: if not self.connected: if not self.last_connect_attempt or time.time() - self.last_connect_attempt > self.reconnect_wait_time: try: self.last_connect_attempt = time.time() self.packet_layer.connect(*self.connect_args, **self.connect_kwargs) self.connected = True except IOError: echo_colorized_warning('Could not connect, will reattempt.') try: self.packet_layer.close() except IOError: pass self.__increment_wait_time() else: time.sleep(1) else: io_occured = False # lets attempt to read in a packet try: read_packet = self.packet_layer.read() self.__reset_wait_time() if read_packet: with self.lock: self.from_packet_layer[str(aprs_util.hash_frame(read_packet))] = read_packet io_occured = True except IOError: echo_colorized_warning('Read failed. Will disconnect and attempt to reconnect.') try: self.packet_layer.close() except IOError: pass self.connected = False continue # lets try to write a packet, if any are waiting. write_packet = None with self.lock: if self.to_packet_layer: write_packet = self.to_packet_layer.popitem()[1] if write_packet: try: self.packet_layer.write(write_packet) io_occured = True self.__reset_wait_time() except IOError: echo_colorized_warning('Write failed. Will disconnect and attempt to reconnect.') self.to_packet_layer[str(aprs_util.hash_frame(read_packet))] = write_packet try: self.packet_layer.close() except IOError: pass self.connected = False continue if not io_occured: time.sleep(1) try: self.packet_layer.close() except IOError: pass def connect(self, *args, **kwargs): with self.lock: if self.connect_thread: raise RuntimeError('already connected') self.running = True self.connect_args = args self.connect_kwargs = kwargs self.connect_thread = threading.Thread(target=self.__run) self.connect_thread.start() def close(self): with self.lock: if not self.connect_thread: raise RuntimeError('not connected') self.running = False self.connect_thread.join() self.connect_thread = None def read(self): with self.lock: if self.from_packet_layer: return self.from_packet_layer.popitem()[1] return None def write(self, packet): with self.lock: self.to_packet_layer[str(aprs_util.hash_frame(packet))] = packet
Syncleus/apex
src/apex/buffers.py
Python
apache-2.0
6,760
#!/usr/bin/python2.4 # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Visual Studio project reader/writer.""" import common import xml.dom import xml.dom.minidom #------------------------------------------------------------------------------ class Writer(object): """Visual Studio XML tool file writer.""" def __init__(self, tool_file_path): """Initializes the tool file. Args: tool_file_path: Path to the tool file. """ self.tool_file_path = tool_file_path self.doc = None def Create(self, name): """Creates the tool file document. Args: name: Name of the tool file. """ self.name = name # Create XML doc xml_impl = xml.dom.getDOMImplementation() self.doc = xml_impl.createDocument(None, 'VisualStudioToolFile', None) # Add attributes to root element self.n_root = self.doc.documentElement self.n_root.setAttribute('Version', '8.00') self.n_root.setAttribute('Name', self.name) # Add rules section self.n_rules = self.doc.createElement('Rules') self.n_root.appendChild(self.n_rules) def AddCustomBuildRule(self, name, cmd, description, additional_dependencies, outputs, extensions): """Adds a rule to the tool file. Args: name: Name of the rule. description: Description of the rule. cmd: Command line of the rule. additional_dependencies: other files which may trigger the rule. outputs: outputs of the rule. extensions: extensions handled by the rule. """ n_rule = self.doc.createElement('CustomBuildRule') n_rule.setAttribute('Name', name) n_rule.setAttribute('ExecutionDescription', description) n_rule.setAttribute('CommandLine', cmd) n_rule.setAttribute('Outputs', ';'.join(outputs)) n_rule.setAttribute('FileExtensions', ';'.join(extensions)) n_rule.setAttribute('AdditionalDependencies', ';'.join(additional_dependencies)) self.n_rules.appendChild(n_rule) def Write(self, writer=common.WriteOnDiff): """Writes the tool file.""" f = writer(self.tool_file_path) self.doc.writexml(f, encoding='Windows-1252', addindent=' ', newl='\r\n') f.close() #------------------------------------------------------------------------------
gvaf/breakpad
src/tools/gyp/pylib/gyp/MSVSToolFile.py
Python
bsd-3-clause
2,430
# Bayes' Theorem # P(A|B) = P(B|A) P(A) / P(B) # A = "like" # B = has key value B # P(A) = |liked| / |url| # P(B) = |key| / |url| # P(B|A) = |liked from B| / |liked| # P(A|B) = ((|liked from B| / |liked|) (|liked| / |url|)) / (|key| / |url|) # = (|liked from B| / |url|) / (|key| / |url|) = |liked from B| / |key| from counter import Counter from scoredValue import ScoredValue from itemCounters import getCounter SMOOTHING_SCORE = 0.01 # Only used for "established" accounts class BayesScorer: def __init__(self, votes, keyMaker, universeCountKey): self.keyMaker = keyMaker self.universeKeyCount = getCounter(universeCountKey) self.userCounters = {'more': Counter(), 'less': Counter()} # If this is an "established account" introduce some smoothing. if len(votes) > 100: self.init_score = SMOOTHING_SCORE else: # Don't introduce smoothing because then all items will have some score. # If we don't have legitimate recs to show new accounts, best to just show # them popular stuff. self.init_score = 0 for v in votes: for k in keyMaker(v.item): if k in self.userCounters[v.type]: self.userCounters[v.type][k] += 1 else: self.userCounters[v.type][k] = 1 # XXX: shouldn't take universe count def getScore(self, item, universeKeyCount): keys = self.keyMaker(item) if(len(keys) < 1): return ScoredValue(0.0, '') debug_text = [] score_sum = self.init_score for k in keys: cur_score = self.getKeyScore(k) if cur_score != 0: debug_text.append("[%s, %f]" % (str(k), cur_score)) score_sum += cur_score debug_text = '\n\t'.join(debug_text) + '\n' # Normalize score_sum = max(0, score_sum) score_sum = min(1, score_sum) return ScoredValue(score_sum, debug_text) def getKeyScore(self, k): # XXX: quick fix. This should be refactored if isinstance(k, long): universeKey = str(k) else: universeKey = k if ((k in self.userCounters['more'] or k in self.userCounters['less']) and (universeKey in self.universeKeyCount)): moreVoteCount = 0 if k in self.userCounters['more']: moreVoteCount = self.userCounters['more'][k] lessVoteCount = 0 if k in self.userCounters['less']: lessVoteCount = self.userCounters['less'][k] score = float(moreVoteCount - lessVoteCount) / self.universeKeyCount[universeKey] assert(-1 <= score <= 1) return score return 0 def report(self): sortedKeys = sorted(self.userCounters['more'].count, key=self.getKeyScore, reverse=True) for i in sortedKeys: print("%s, %s" % (i, self.getKeyScore(i)))
TobyRoseman/PS4M
engine/analyzers/bayesScorer.py
Python
mit
3,041
from abc import ABCMeta, abstractmethod, abstractproperty from contextlib import contextmanager from functools import wraps import gzip from inspect import getargspec from itertools import ( combinations, count, product, ) import operator import os from os.path import abspath, dirname, join, realpath import shutil from sys import _getframe import tempfile from logbook import TestHandler from mock import patch from nose.tools import nottest from numpy.testing import assert_allclose, assert_array_equal import pandas as pd from six import itervalues, iteritems, with_metaclass from six.moves import filter, map from sqlalchemy import create_engine from testfixtures import TempDirectory from toolz import concat, curry from zipline.assets import AssetFinder, AssetDBWriter from zipline.assets.synthetic import make_simple_equity_info from zipline.data.data_portal import DataPortal from zipline.data.loader import get_benchmark_filename, INDEX_MAPPING from zipline.data.minute_bars import ( BcolzMinuteBarReader, BcolzMinuteBarWriter, US_EQUITIES_MINUTES_PER_DAY ) from zipline.data.us_equity_pricing import ( BcolzDailyBarReader, BcolzDailyBarWriter, SQLiteAdjustmentWriter, ) from zipline.finance.blotter import Blotter from zipline.finance.trading import TradingEnvironment from zipline.finance.order import ORDER_STATUS from zipline.lib.labelarray import LabelArray from zipline.pipeline.data import USEquityPricing from zipline.pipeline.engine import SimplePipelineEngine from zipline.pipeline.factors import CustomFactor from zipline.pipeline.loaders.testing import make_seeded_random_loader from zipline.utils import security_list from zipline.utils.calendars import get_calendar from zipline.utils.input_validation import expect_dimensions from zipline.utils.numpy_utils import as_column, isnat from zipline.utils.pandas_utils import timedelta_to_integral_seconds from zipline.utils.paths import ensure_directory from zipline.utils.sentinel import sentinel import numpy as np from numpy import float64 EPOCH = pd.Timestamp(0, tz='UTC') def seconds_to_timestamp(seconds): return pd.Timestamp(seconds, unit='s', tz='UTC') def to_utc(time_str): """Convert a string in US/Eastern time to UTC""" return pd.Timestamp(time_str, tz='US/Eastern').tz_convert('UTC') def str_to_seconds(s): """ Convert a pandas-intelligible string to (integer) seconds since UTC. >>> from pandas import Timestamp >>> (Timestamp('2014-01-01') - Timestamp(0)).total_seconds() 1388534400.0 >>> str_to_seconds('2014-01-01') 1388534400 """ return timedelta_to_integral_seconds(pd.Timestamp(s, tz='UTC') - EPOCH) def drain_zipline(test, zipline): output = [] transaction_count = 0 msg_counter = 0 # start the simulation for update in zipline: msg_counter += 1 output.append(update) if 'daily_perf' in update: transaction_count += \ len(update['daily_perf']['transactions']) return output, transaction_count def check_algo_results(test, results, expected_transactions_count=None, expected_order_count=None, expected_positions_count=None, sid=None): if expected_transactions_count is not None: txns = flatten_list(results["transactions"]) test.assertEqual(expected_transactions_count, len(txns)) if expected_positions_count is not None: raise NotImplementedError if expected_order_count is not None: # de-dup orders on id, because orders are put back into perf packets # whenever they a txn is filled orders = set([order['id'] for order in flatten_list(results["orders"])]) test.assertEqual(expected_order_count, len(orders)) def flatten_list(list): return [item for sublist in list for item in sublist] def assert_single_position(test, zipline): output, transaction_count = drain_zipline(test, zipline) if 'expected_transactions' in test.zipline_test_config: test.assertEqual( test.zipline_test_config['expected_transactions'], transaction_count ) else: test.assertEqual( test.zipline_test_config['order_count'], transaction_count ) # the final message is the risk report, the second to # last is the final day's results. Positions is a list of # dicts. closing_positions = output[-2]['daily_perf']['positions'] # confirm that all orders were filled. # iterate over the output updates, overwriting # orders when they are updated. Then check the status on all. orders_by_id = {} for update in output: if 'daily_perf' in update: if 'orders' in update['daily_perf']: for order in update['daily_perf']['orders']: orders_by_id[order['id']] = order for order in itervalues(orders_by_id): test.assertEqual( order['status'], ORDER_STATUS.FILLED, "") test.assertEqual( len(closing_positions), 1, "Portfolio should have one position." ) sid = test.zipline_test_config['sid'] test.assertEqual( closing_positions[0]['sid'], sid, "Portfolio should have one position in " + str(sid) ) return output, transaction_count @contextmanager def security_list_copy(): old_dir = security_list.SECURITY_LISTS_DIR new_dir = tempfile.mkdtemp() try: for subdir in os.listdir(old_dir): shutil.copytree(os.path.join(old_dir, subdir), os.path.join(new_dir, subdir)) with patch.object(security_list, 'SECURITY_LISTS_DIR', new_dir), \ patch.object(security_list, 'using_copy', True, create=True): yield finally: shutil.rmtree(new_dir, True) def add_security_data(adds, deletes): if not hasattr(security_list, 'using_copy'): raise Exception('add_security_data must be used within ' 'security_list_copy context') directory = os.path.join( security_list.SECURITY_LISTS_DIR, "leveraged_etf_list/20150127/20150125" ) if not os.path.exists(directory): os.makedirs(directory) del_path = os.path.join(directory, "delete") with open(del_path, 'w') as f: for sym in deletes: f.write(sym) f.write('\n') add_path = os.path.join(directory, "add") with open(add_path, 'w') as f: for sym in adds: f.write(sym) f.write('\n') def all_pairs_matching_predicate(values, pred): """ Return an iterator of all pairs, (v0, v1) from values such that `pred(v0, v1) == True` Parameters ---------- values : iterable pred : function Returns ------- pairs_iterator : generator Generator yielding pairs matching `pred`. Examples -------- >>> from zipline.testing import all_pairs_matching_predicate >>> from operator import eq, lt >>> list(all_pairs_matching_predicate(range(5), eq)) [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] >>> list(all_pairs_matching_predicate("abcd", lt)) [('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')] """ return filter(lambda pair: pred(*pair), product(values, repeat=2)) def product_upper_triangle(values, include_diagonal=False): """ Return an iterator over pairs, (v0, v1), drawn from values. If `include_diagonal` is True, returns all pairs such that v0 <= v1. If `include_diagonal` is False, returns all pairs such that v0 < v1. """ return all_pairs_matching_predicate( values, operator.le if include_diagonal else operator.lt, ) def all_subindices(index): """ Return all valid sub-indices of a pandas Index. """ return ( index[start:stop] for start, stop in product_upper_triangle(range(len(index) + 1)) ) def chrange(start, stop): """ Construct an iterable of length-1 strings beginning with `start` and ending with `stop`. Parameters ---------- start : str The first character. stop : str The last character. Returns ------- chars: iterable[str] Iterable of strings beginning with start and ending with stop. Examples -------- >>> chrange('A', 'C') ['A', 'B', 'C'] """ return list(map(chr, range(ord(start), ord(stop) + 1))) def make_trade_data_for_asset_info(dates, asset_info, price_start, price_step_by_date, price_step_by_sid, volume_start, volume_step_by_date, volume_step_by_sid, frequency, writer=None): """ Convert the asset info dataframe into a dataframe of trade data for each sid, and write to the writer if provided. Write NaNs for locations where assets did not exist. Return a dict of the dataframes, keyed by sid. """ trade_data = {} sids = asset_info.index price_sid_deltas = np.arange(len(sids), dtype=float64) * price_step_by_sid price_date_deltas = (np.arange(len(dates), dtype=float64) * price_step_by_date) prices = (price_sid_deltas + as_column(price_date_deltas)) + price_start volume_sid_deltas = np.arange(len(sids)) * volume_step_by_sid volume_date_deltas = np.arange(len(dates)) * volume_step_by_date volumes = volume_sid_deltas + as_column(volume_date_deltas) + volume_start for j, sid in enumerate(sids): start_date, end_date = asset_info.loc[sid, ['start_date', 'end_date']] # Normalize here so the we still generate non-NaN values on the minutes # for an asset's last trading day. for i, date in enumerate(dates.normalize()): if not (start_date <= date <= end_date): prices[i, j] = 0 volumes[i, j] = 0 df = pd.DataFrame( { "open": prices[:, j], "high": prices[:, j], "low": prices[:, j], "close": prices[:, j], "volume": volumes[:, j], }, index=dates, ) if writer: writer.write_sid(sid, df) trade_data[sid] = df return trade_data def check_allclose(actual, desired, rtol=1e-07, atol=0, err_msg='', verbose=True): """ Wrapper around np.testing.assert_allclose that also verifies that inputs are ndarrays. See Also -------- np.assert_allclose """ if type(actual) != type(desired): raise AssertionError("%s != %s" % (type(actual), type(desired))) return assert_allclose( actual, desired, atol=atol, rtol=rtol, err_msg=err_msg, verbose=verbose, ) def check_arrays(x, y, err_msg='', verbose=True, check_dtypes=True): """ Wrapper around np.testing.assert_array_equal that also verifies that inputs are ndarrays. See Also -------- np.assert_array_equal """ assert type(x) == type(y), "{x} != {y}".format(x=type(x), y=type(y)) assert x.dtype == y.dtype, "{x.dtype} != {y.dtype}".format(x=x, y=y) if isinstance(x, LabelArray): # Check that both arrays have missing values in the same locations... assert_array_equal( x.is_missing(), y.is_missing(), err_msg=err_msg, verbose=verbose, ) # ...then check the actual values as well. x = x.as_string_array() y = y.as_string_array() elif x.dtype.kind in 'mM': x_isnat = isnat(x) y_isnat = isnat(y) assert_array_equal( x_isnat, y_isnat, err_msg="NaTs not equal", verbose=verbose, ) # Fill NaTs with zero for comparison. x = np.where(x_isnat, np.zeros_like(x), x) y = np.where(y_isnat, np.zeros_like(y), y) return assert_array_equal(x, y, err_msg=err_msg, verbose=verbose) class UnexpectedAttributeAccess(Exception): pass class ExplodingObject(object): """ Object that will raise an exception on any attribute access. Useful for verifying that an object is never touched during a function/method call. """ def __getattribute__(self, name): raise UnexpectedAttributeAccess(name) def write_minute_data(trading_calendar, tempdir, minutes, sids): first_session = trading_calendar.minute_to_session_label( minutes[0], direction="none" ) last_session = trading_calendar.minute_to_session_label( minutes[-1], direction="none" ) sessions = trading_calendar.sessions_in_range(first_session, last_session) write_bcolz_minute_data( trading_calendar, sessions, tempdir.path, create_minute_bar_data(minutes, sids), ) return tempdir.path def create_minute_bar_data(minutes, sids): length = len(minutes) for sid_idx, sid in enumerate(sids): yield sid, pd.DataFrame( { 'open': np.arange(length) + 10 + sid_idx, 'high': np.arange(length) + 15 + sid_idx, 'low': np.arange(length) + 8 + sid_idx, 'close': np.arange(length) + 10 + sid_idx, 'volume': 100 + sid_idx, }, index=minutes, ) def create_daily_bar_data(sessions, sids): length = len(sessions) for sid_idx, sid in enumerate(sids): yield sid, pd.DataFrame( { "open": (np.array(range(10, 10 + length)) + sid_idx), "high": (np.array(range(15, 15 + length)) + sid_idx), "low": (np.array(range(8, 8 + length)) + sid_idx), "close": (np.array(range(10, 10 + length)) + sid_idx), "volume": np.array(range(100, 100 + length)) + sid_idx, "day": [session.value for session in sessions] }, index=sessions, ) def write_daily_data(tempdir, sim_params, sids, trading_calendar): path = os.path.join(tempdir.path, "testdaily.bcolz") BcolzDailyBarWriter(path, trading_calendar, sim_params.start_session, sim_params.end_session).write( create_daily_bar_data(sim_params.sessions, sids), ) return path def create_data_portal(asset_finder, tempdir, sim_params, sids, trading_calendar, adjustment_reader=None): if sim_params.data_frequency == "daily": daily_path = write_daily_data(tempdir, sim_params, sids, trading_calendar) equity_daily_reader = BcolzDailyBarReader(daily_path) return DataPortal( asset_finder, trading_calendar, first_trading_day=equity_daily_reader.first_trading_day, equity_daily_reader=equity_daily_reader, adjustment_reader=adjustment_reader ) else: minutes = trading_calendar.minutes_in_range( sim_params.first_open, sim_params.last_close ) minute_path = write_minute_data(trading_calendar, tempdir, minutes, sids) equity_minute_reader = BcolzMinuteBarReader(minute_path) return DataPortal( asset_finder, trading_calendar, first_trading_day=equity_minute_reader.first_trading_day, equity_minute_reader=equity_minute_reader, adjustment_reader=adjustment_reader ) def write_bcolz_minute_data(trading_calendar, days, path, data): BcolzMinuteBarWriter( path, trading_calendar, days[0], days[-1], US_EQUITIES_MINUTES_PER_DAY ).write(data) def create_minute_df_for_asset(trading_calendar, start_dt, end_dt, interval=1, start_val=1, minute_blacklist=None): asset_minutes = trading_calendar.minutes_for_sessions_in_range( start_dt, end_dt ) minutes_count = len(asset_minutes) minutes_arr = np.array(range(start_val, start_val + minutes_count)) df = pd.DataFrame( { "open": minutes_arr + 1, "high": minutes_arr + 2, "low": minutes_arr - 1, "close": minutes_arr, "volume": 100 * minutes_arr, }, index=asset_minutes, ) if interval > 1: counter = 0 while counter < len(minutes_arr): df[counter:(counter + interval - 1)] = 0 counter += interval if minute_blacklist is not None: for minute in minute_blacklist: df.loc[minute] = 0 return df def create_daily_df_for_asset(trading_calendar, start_day, end_day, interval=1): days = trading_calendar.sessions_in_range(start_day, end_day) days_count = len(days) days_arr = np.arange(days_count) + 2 df = pd.DataFrame( { "open": days_arr + 1, "high": days_arr + 2, "low": days_arr - 1, "close": days_arr, "volume": days_arr * 100, }, index=days, ) if interval > 1: # only keep every 'interval' rows for idx, _ in enumerate(days_arr): if (idx + 1) % interval != 0: df["open"].iloc[idx] = 0 df["high"].iloc[idx] = 0 df["low"].iloc[idx] = 0 df["close"].iloc[idx] = 0 df["volume"].iloc[idx] = 0 return df def trades_by_sid_to_dfs(trades_by_sid, index): for sidint, trades in iteritems(trades_by_sid): opens = [] highs = [] lows = [] closes = [] volumes = [] for trade in trades: opens.append(trade.open_price) highs.append(trade.high) lows.append(trade.low) closes.append(trade.close_price) volumes.append(trade.volume) yield sidint, pd.DataFrame( { "open": opens, "high": highs, "low": lows, "close": closes, "volume": volumes, }, index=index, ) def create_data_portal_from_trade_history(asset_finder, trading_calendar, tempdir, sim_params, trades_by_sid): if sim_params.data_frequency == "daily": path = os.path.join(tempdir.path, "testdaily.bcolz") writer = BcolzDailyBarWriter( path, trading_calendar, sim_params.start_session, sim_params.end_session ) writer.write( trades_by_sid_to_dfs(trades_by_sid, sim_params.sessions), ) equity_daily_reader = BcolzDailyBarReader(path) return DataPortal( asset_finder, trading_calendar, first_trading_day=equity_daily_reader.first_trading_day, equity_daily_reader=equity_daily_reader, ) else: minutes = trading_calendar.minutes_in_range( sim_params.first_open, sim_params.last_close ) length = len(minutes) assets = {} for sidint, trades in iteritems(trades_by_sid): opens = np.zeros(length) highs = np.zeros(length) lows = np.zeros(length) closes = np.zeros(length) volumes = np.zeros(length) for trade in trades: # put them in the right place idx = minutes.searchsorted(trade.dt) opens[idx] = trade.open_price * 1000 highs[idx] = trade.high * 1000 lows[idx] = trade.low * 1000 closes[idx] = trade.close_price * 1000 volumes[idx] = trade.volume assets[sidint] = pd.DataFrame({ "open": opens, "high": highs, "low": lows, "close": closes, "volume": volumes, "dt": minutes }).set_index("dt") write_bcolz_minute_data( trading_calendar, sim_params.sessions, tempdir.path, assets ) equity_minute_reader = BcolzMinuteBarReader(tempdir.path) return DataPortal( asset_finder, trading_calendar, first_trading_day=equity_minute_reader.first_trading_day, equity_minute_reader=equity_minute_reader, ) class FakeDataPortal(DataPortal): def __init__(self, env, trading_calendar=None, first_trading_day=None): if trading_calendar is None: trading_calendar = get_calendar("NYSE") super(FakeDataPortal, self).__init__(env.asset_finder, trading_calendar, first_trading_day) def get_spot_value(self, asset, field, dt, data_frequency): if field == "volume": return 100 else: return 1.0 def get_history_window(self, assets, end_dt, bar_count, frequency, field, data_frequency, ffill=True): if frequency == "1d": end_idx = \ self.trading_calendar.all_sessions.searchsorted(end_dt) days = self.trading_calendar.all_sessions[ (end_idx - bar_count + 1):(end_idx + 1) ] df = pd.DataFrame( np.full((bar_count, len(assets)), 100.0), index=days, columns=assets ) return df class FetcherDataPortal(DataPortal): """ Mock dataportal that returns fake data for history and non-fetcher spot value. """ def __init__(self, asset_finder, trading_calendar, first_trading_day=None): super(FetcherDataPortal, self).__init__(asset_finder, trading_calendar, first_trading_day) def get_spot_value(self, asset, field, dt, data_frequency): # if this is a fetcher field, exercise the regular code path if self._is_extra_source(asset, field, self._augmented_sources_map): return super(FetcherDataPortal, self).get_spot_value( asset, field, dt, data_frequency) # otherwise just return a fixed value return int(asset) # XXX: These aren't actually the methods that are used by the superclasses, # so these don't do anything, and this class will likely produce unexpected # results for history(). def _get_daily_window_for_sid(self, asset, field, days_in_window, extra_slot=True): return np.arange(days_in_window, dtype=np.float64) def _get_minute_window_for_asset(self, asset, field, minutes_for_window): return np.arange(minutes_for_window, dtype=np.float64) class tmp_assets_db(object): """Create a temporary assets sqlite database. This is meant to be used as a context manager. Parameters ---------- url : string The URL for the database connection. **frames The frames to pass to the AssetDBWriter. By default this maps equities: ('A', 'B', 'C') -> map(ord, 'ABC') See Also -------- empty_assets_db tmp_asset_finder """ _default_equities = sentinel('_default_equities') def __init__(self, url='sqlite:///:memory:', equities=_default_equities, **frames): self._url = url self._eng = None if equities is self._default_equities: equities = make_simple_equity_info( list(map(ord, 'ABC')), pd.Timestamp(0), pd.Timestamp('2015'), ) frames['equities'] = equities self._frames = frames self._eng = None # set in enter and exit def __enter__(self): self._eng = eng = create_engine(self._url) AssetDBWriter(eng).write(**self._frames) return eng def __exit__(self, *excinfo): assert self._eng is not None, '_eng was not set in __enter__' self._eng.dispose() self._eng = None def empty_assets_db(): """Context manager for creating an empty assets db. See Also -------- tmp_assets_db """ return tmp_assets_db(equities=None) class tmp_asset_finder(tmp_assets_db): """Create a temporary asset finder using an in memory sqlite db. Parameters ---------- url : string The URL for the database connection. finder_cls : type, optional The type of asset finder to create from the assets db. **frames Forwarded to ``tmp_assets_db``. See Also -------- tmp_assets_db """ def __init__(self, url='sqlite:///:memory:', finder_cls=AssetFinder, **frames): self._finder_cls = finder_cls super(tmp_asset_finder, self).__init__(url=url, **frames) def __enter__(self): return self._finder_cls(super(tmp_asset_finder, self).__enter__()) def empty_asset_finder(): """Context manager for creating an empty asset finder. See Also -------- empty_assets_db tmp_assets_db tmp_asset_finder """ return tmp_asset_finder(equities=None) class tmp_trading_env(tmp_asset_finder): """Create a temporary trading environment. Parameters ---------- load : callable, optional Function that returns benchmark returns and treasury curves. finder_cls : type, optional The type of asset finder to create from the assets db. **frames Forwarded to ``tmp_assets_db``. See Also -------- empty_trading_env tmp_asset_finder """ def __init__(self, load=None, *args, **kwargs): super(tmp_trading_env, self).__init__(*args, **kwargs) self._load = load def __enter__(self): return TradingEnvironment( load=self._load, asset_db_path=super(tmp_trading_env, self).__enter__().engine, ) def empty_trading_env(): return tmp_trading_env(equities=None) class SubTestFailures(AssertionError): def __init__(self, *failures): self.failures = failures def __str__(self): return 'failures:\n %s' % '\n '.join( '\n '.join(( ', '.join('%s=%r' % item for item in scope.items()), '%s: %s' % (type(exc).__name__, exc), )) for scope, exc in self.failures, ) @nottest def subtest(iterator, *_names): """ Construct a subtest in a unittest. Consider using ``zipline.testing.parameter_space`` when subtests are constructed over a single input or over the cross-product of multiple inputs. ``subtest`` works by decorating a function as a subtest. The decorated function will be run by iterating over the ``iterator`` and *unpacking the values into the function. If any of the runs fail, the result will be put into a set and the rest of the tests will be run. Finally, if any failed, all of the results will be dumped as one failure. Parameters ---------- iterator : iterable[iterable] The iterator of arguments to pass to the function. *name : iterator[str] The names to use for each element of ``iterator``. These will be used to print the scope when a test fails. If not provided, it will use the integer index of the value as the name. Examples -------- :: class MyTest(TestCase): def test_thing(self): # Example usage inside another test. @subtest(([n] for n in range(100000)), 'n') def subtest(n): self.assertEqual(n % 2, 0, 'n was not even') subtest() @subtest(([n] for n in range(100000)), 'n') def test_decorated_function(self, n): # Example usage to parameterize an entire function. self.assertEqual(n % 2, 1, 'n was not odd') Notes ----- We use this when we: * Will never want to run each parameter individually. * Have a large parameter space we are testing (see tests/utils/test_events.py). ``nose_parameterized.expand`` will create a test for each parameter combination which bloats the test output and makes the travis pages slow. We cannot use ``unittest2.TestCase.subTest`` because nose, pytest, and nose2 do not support ``addSubTest``. See Also -------- zipline.testing.parameter_space """ def dec(f): @wraps(f) def wrapped(*args, **kwargs): names = _names failures = [] for scope in iterator: scope = tuple(scope) try: f(*args + scope, **kwargs) except Exception as e: if not names: names = count() failures.append((dict(zip(names, scope)), e)) if failures: raise SubTestFailures(*failures) return wrapped return dec class MockDailyBarReader(object): def get_value(self, col, sid, dt): return 100 def create_mock_adjustment_data(splits=None, dividends=None, mergers=None): if splits is None: splits = create_empty_splits_mergers_frame() elif not isinstance(splits, pd.DataFrame): splits = pd.DataFrame(splits) if mergers is None: mergers = create_empty_splits_mergers_frame() elif not isinstance(mergers, pd.DataFrame): mergers = pd.DataFrame(mergers) if dividends is None: dividends = create_empty_dividends_frame() elif not isinstance(dividends, pd.DataFrame): dividends = pd.DataFrame(dividends) return splits, mergers, dividends def create_mock_adjustments(tempdir, days, splits=None, dividends=None, mergers=None): path = tempdir.getpath("test_adjustments.db") SQLiteAdjustmentWriter(path, MockDailyBarReader(), days).write( *create_mock_adjustment_data(splits, dividends, mergers) ) return path def assert_timestamp_equal(left, right, compare_nat_equal=True, msg=""): """ Assert that two pandas Timestamp objects are the same. Parameters ---------- left, right : pd.Timestamp The values to compare. compare_nat_equal : bool, optional Whether to consider `NaT` values equal. Defaults to True. msg : str, optional A message to forward to `pd.util.testing.assert_equal`. """ if compare_nat_equal and left is pd.NaT and right is pd.NaT: return return pd.util.testing.assert_equal(left, right, msg=msg) def powerset(values): """ Return the power set (i.e., the set of all subsets) of entries in `values`. """ return concat(combinations(values, i) for i in range(len(values) + 1)) def to_series(knowledge_dates, earning_dates): """ Helper for converting a dict of strings to a Series of datetimes. This is just for making the test cases more readable. """ return pd.Series( index=pd.to_datetime(knowledge_dates), data=pd.to_datetime(earning_dates), ) def gen_calendars(start, stop, critical_dates): """ Generate calendars to use as inputs. """ all_dates = pd.date_range(start, stop, tz='utc') for to_drop in map(list, powerset(critical_dates)): # Have to yield tuples. yield (all_dates.drop(to_drop),) # Also test with the trading calendar. trading_days = get_calendar("NYSE").all_days yield (trading_days[trading_days.slice_indexer(start, stop)],) @contextmanager def temp_pipeline_engine(calendar, sids, random_seed, symbols=None): """ A contextManager that yields a SimplePipelineEngine holding a reference to an AssetFinder generated via tmp_asset_finder. Parameters ---------- calendar : pd.DatetimeIndex Calendar to pass to the constructed PipelineEngine. sids : iterable[int] Sids to use for the temp asset finder. random_seed : int Integer used to seed instances of SeededRandomLoader. symbols : iterable[str], optional Symbols for constructed assets. Forwarded to make_simple_equity_info. """ equity_info = make_simple_equity_info( sids=sids, start_date=calendar[0], end_date=calendar[-1], symbols=symbols, ) loader = make_seeded_random_loader(random_seed, calendar, sids) def get_loader(column): return loader with tmp_asset_finder(equities=equity_info) as finder: yield SimplePipelineEngine(get_loader, calendar, finder) def parameter_space(__fail_fast=False, **params): """ Wrapper around subtest that allows passing keywords mapping names to iterables of values. The decorated test function will be called with the cross-product of all possible inputs Examples -------- >>> from unittest import TestCase >>> class SomeTestCase(TestCase): ... @parameter_space(x=[1, 2], y=[2, 3]) ... def test_some_func(self, x, y): ... # Will be called with every possible combination of x and y. ... self.assertEqual(somefunc(x, y), expected_result(x, y)) See Also -------- zipline.testing.subtest """ def decorator(f): argspec = getargspec(f) if argspec.varargs: raise AssertionError("parameter_space() doesn't support *args") if argspec.keywords: raise AssertionError("parameter_space() doesn't support **kwargs") if argspec.defaults: raise AssertionError("parameter_space() doesn't support defaults.") # Skip over implicit self. argnames = argspec.args if argnames[0] == 'self': argnames = argnames[1:] extra = set(params) - set(argnames) if extra: raise AssertionError( "Keywords %s supplied to parameter_space() are " "not in function signature." % extra ) unspecified = set(argnames) - set(params) if unspecified: raise AssertionError( "Function arguments %s were not " "supplied to parameter_space()." % extra ) def make_param_sets(): return product(*(params[name] for name in argnames)) if __fail_fast: @wraps(f) def wrapped(self): for args in make_param_sets(): f(self, *args) return wrapped else: @wraps(f) def wrapped(*args, **kwargs): subtest(make_param_sets(), *argnames)(f)(*args, **kwargs) return wrapped return decorator def create_empty_dividends_frame(): return pd.DataFrame( np.array( [], dtype=[ ('ex_date', 'datetime64[ns]'), ('pay_date', 'datetime64[ns]'), ('record_date', 'datetime64[ns]'), ('declared_date', 'datetime64[ns]'), ('amount', 'float64'), ('sid', 'int32'), ], ), index=pd.DatetimeIndex([], tz='UTC'), ) def create_empty_splits_mergers_frame(): return pd.DataFrame( np.array( [], dtype=[ ('effective_date', 'int64'), ('ratio', 'float64'), ('sid', 'int64'), ], ), index=pd.DatetimeIndex([]), ) def make_alternating_boolean_array(shape, first_value=True): """ Create a 2D numpy array with the given shape containing alternating values of False, True, False, True,... along each row and each column. Examples -------- >>> make_alternating_boolean_array((4,4)) array([[ True, False, True, False], [False, True, False, True], [ True, False, True, False], [False, True, False, True]], dtype=bool) >>> make_alternating_boolean_array((4,3), first_value=False) array([[False, True, False], [ True, False, True], [False, True, False], [ True, False, True]], dtype=bool) """ if len(shape) != 2: raise ValueError( 'Shape must be 2-dimensional. Given shape was {}'.format(shape) ) alternating = np.empty(shape, dtype=np.bool) for row in alternating: row[::2] = first_value row[1::2] = not(first_value) first_value = not(first_value) return alternating def make_cascading_boolean_array(shape, first_value=True): """ Create a numpy array with the given shape containing cascading boolean values, with `first_value` being the top-left value. Examples -------- >>> make_cascading_boolean_array((4,4)) array([[ True, True, True, False], [ True, True, False, False], [ True, False, False, False], [False, False, False, False]], dtype=bool) >>> make_cascading_boolean_array((4,2)) array([[ True, False], [False, False], [False, False], [False, False]], dtype=bool) >>> make_cascading_boolean_array((2,4)) array([[ True, True, True, False], [ True, True, False, False]], dtype=bool) """ if len(shape) != 2: raise ValueError( 'Shape must be 2-dimensional. Given shape was {}'.format(shape) ) cascading = np.full(shape, not(first_value), dtype=np.bool) ending_col = shape[1] - 1 for row in cascading: if ending_col > 0: row[:ending_col] = first_value ending_col -= 1 else: break return cascading @expect_dimensions(array=2) def permute_rows(seed, array): """ Shuffle each row in ``array`` based on permutations generated by ``seed``. Parameters ---------- seed : int Seed for numpy.RandomState array : np.ndarray[ndim=2] Array over which to apply permutations. """ rand = np.random.RandomState(seed) return np.apply_along_axis(rand.permutation, 1, array) @nottest def make_test_handler(testcase, *args, **kwargs): """ Returns a TestHandler which will be used by the given testcase. This handler can be used to test log messages. Parameters ---------- testcase: unittest.TestCase The test class in which the log handler will be used. *args, **kwargs Forwarded to the new TestHandler object. Returns ------- handler: logbook.TestHandler The handler to use for the test case. """ handler = TestHandler(*args, **kwargs) testcase.addCleanup(handler.close) return handler def write_compressed(path, content): """ Write a compressed (gzipped) file to `path`. """ with gzip.open(path, 'wb') as f: f.write(content) def read_compressed(path): """ Write a compressed (gzipped) file from `path`. """ with gzip.open(path, 'rb') as f: return f.read() zipline_git_root = abspath( join(realpath(dirname(__file__)), '..', '..'), ) @nottest def test_resource_path(*path_parts): return os.path.join(zipline_git_root, 'tests', 'resources', *path_parts) @contextmanager def patch_os_environment(remove=None, **values): """ Context manager for patching the operating system environment. """ old_values = {} remove = remove or [] for key in remove: old_values[key] = os.environ.pop(key) for key, value in values.iteritems(): old_values[key] = os.getenv(key) os.environ[key] = value try: yield finally: for old_key, old_value in old_values.iteritems(): if old_value is None: # Value was not present when we entered, so del it out if it's # still present. try: del os.environ[key] except KeyError: pass else: # Restore the old value. os.environ[old_key] = old_value class tmp_dir(TempDirectory, object): """New style class that wrapper for TempDirectory in python 2. """ pass class _TmpBarReader(with_metaclass(ABCMeta, tmp_dir)): """A helper for tmp_bcolz_equity_minute_bar_reader and tmp_bcolz_equity_daily_bar_reader. Parameters ---------- env : TradingEnvironment The trading env. days : pd.DatetimeIndex The days to write for. data : dict[int -> pd.DataFrame] The data to write. path : str, optional The path to the directory to write the data into. If not given, this will be a unique name. """ @abstractproperty def _reader_cls(self): raise NotImplementedError('_reader') @abstractmethod def _write(self, env, days, path, data): raise NotImplementedError('_write') def __init__(self, env, days, data, path=None): super(_TmpBarReader, self).__init__(path=path) self._env = env self._days = days self._data = data def __enter__(self): tmpdir = super(_TmpBarReader, self).__enter__() env = self._env try: self._write( env, self._days, tmpdir.path, self._data, ) return self._reader_cls(tmpdir.path) except: self.__exit__(None, None, None) raise class tmp_bcolz_equity_minute_bar_reader(_TmpBarReader): """A temporary BcolzMinuteBarReader object. Parameters ---------- env : TradingEnvironment The trading env. days : pd.DatetimeIndex The days to write for. data : iterable[(int, pd.DataFrame)] The data to write. path : str, optional The path to the directory to write the data into. If not given, this will be a unique name. See Also -------- tmp_bcolz_equity_daily_bar_reader """ _reader_cls = BcolzMinuteBarReader _write = staticmethod(write_bcolz_minute_data) class tmp_bcolz_equity_daily_bar_reader(_TmpBarReader): """A temporary BcolzDailyBarReader object. Parameters ---------- env : TradingEnvironment The trading env. days : pd.DatetimeIndex The days to write for. data : dict[int -> pd.DataFrame] The data to write. path : str, optional The path to the directory to write the data into. If not given, this will be a unique name. See Also -------- tmp_bcolz_equity_daily_bar_reader """ _reader_cls = BcolzDailyBarReader @staticmethod def _write(env, days, path, data): BcolzDailyBarWriter(path, days).write(data) @contextmanager def patch_read_csv(url_map, module=pd, strict=False): """Patch pandas.read_csv to map lookups from url to another. Parameters ---------- url_map : mapping[str or file-like object -> str or file-like object] The mapping to use to redirect read_csv calls. module : module, optional The module to patch ``read_csv`` on. By default this is ``pandas``. This should be set to another module if ``read_csv`` is early-bound like ``from pandas import read_csv`` instead of late-bound like: ``import pandas as pd; pd.read_csv``. strict : bool, optional If true, then this will assert that ``read_csv`` is only called with elements in the ``url_map``. """ read_csv = pd.read_csv def patched_read_csv(filepath_or_buffer, *args, **kwargs): if filepath_or_buffer in url_map: return read_csv(url_map[filepath_or_buffer], *args, **kwargs) elif not strict: return read_csv(filepath_or_buffer, *args, **kwargs) else: raise AssertionError( 'attempted to call read_csv on %r which not in the url map' % filepath_or_buffer, ) with patch.object(module, 'read_csv', patched_read_csv): yield def copy_market_data(src_market_data_dir, dest_root_dir): symbol = 'SPY' filenames = (get_benchmark_filename(symbol), INDEX_MAPPING[symbol][1]) ensure_directory(os.path.join(dest_root_dir, 'data')) for filename in filenames: shutil.copyfile( os.path.join(src_market_data_dir, filename), os.path.join(dest_root_dir, 'data', filename) ) @curry def ensure_doctest(f, name=None): """Ensure that an object gets doctested. This is useful for instances of objects like curry or partial which are not discovered by default. Parameters ---------- f : any The thing to doctest. name : str, optional The name to use in the doctest function mapping. If this is None, Then ``f.__name__`` will be used. Returns ------- f : any ``f`` unchanged. """ _getframe(2).f_globals.setdefault('__test__', {})[ f.__name__ if name is None else name ] = f return f class RecordBatchBlotter(Blotter): """Blotter that tracks how its batch_order method was called. """ def __init__(self, data_frequency): super(RecordBatchBlotter, self).__init__(data_frequency) self.order_batch_called = [] def batch_order(self, *args, **kwargs): self.order_batch_called.append((args, kwargs)) return super(RecordBatchBlotter, self).batch_order(*args, **kwargs) #################################### # Shared factors for pipeline tests. #################################### class AssetID(CustomFactor): """ CustomFactor that returns the AssetID of each asset. Useful for providing a Factor that produces a different value for each asset. """ window_length = 1 inputs = () def compute(self, today, assets, out): out[:] = assets class AssetIDPlusDay(CustomFactor): window_length = 1 inputs = () def compute(self, today, assets, out): out[:] = assets + today.day class OpenPrice(CustomFactor): window_length = 1 inputs = [USEquityPricing.open] def compute(self, today, assets, out, open): out[:] = open
bartosh/zipline
zipline/testing/core.py
Python
apache-2.0
47,174
# python3 # Copyright 2018 DeepMind Technologies Limited. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for discrete BCQ learner.""" from absl.testing import absltest from acme import specs from acme.agents.tf import bcq from acme.testing import fakes from acme.tf import utils as tf2_utils from acme.tf.networks import discrete as discrete_networks from acme.utils import counting import numpy as np import sonnet as snt def _make_network(action_spec: specs.DiscreteArray) -> snt.Module: return snt.Sequential([ snt.Flatten(), snt.nets.MLP([50, 50, action_spec.num_values]), ]) class DiscreteBCQLearnerTest(absltest.TestCase): def test_full_learner(self): # Create dataset. environment = fakes.DiscreteEnvironment( num_actions=5, num_observations=10, obs_dtype=np.float32, episode_length=10) spec = specs.make_environment_spec(environment) dataset = fakes.transition_dataset(environment).batch(2) # Build network. g_network = _make_network(spec.actions) q_network = _make_network(spec.actions) network = discrete_networks.DiscreteFilteredQNetwork(g_network=g_network, q_network=q_network, threshold=0.5) tf2_utils.create_variables(network, [spec.observations]) # Build learner. counter = counting.Counter() learner = bcq.DiscreteBCQLearner( network=network, dataset=dataset, learning_rate=1e-4, discount=0.99, importance_sampling_exponent=0.2, target_update_period=100, counter=counter) # Run a learner step. learner.step() # Check counts from BC and BCQ learners. counts = counter.get_counts() self.assertEqual(1, counts['bc_steps']) self.assertEqual(1, counts['bcq_steps']) # Check learner state. self.assertEqual(1, learner.state['bc_num_steps'].numpy()) self.assertEqual(1, learner.state['bcq_num_steps'].numpy()) if __name__ == '__main__': absltest.main()
deepmind/acme
acme/agents/tf/bcq/discrete_learning_test.py
Python
apache-2.0
2,614
# -*- coding: utf-8 -*- ############################################################################## # For copyright and license notices, see __openerp__.py file in root directory ############################################################################## from openerp import models, fields, api from openerp.addons import decimal_precision as dp from openerp.tools.float_utils import float_compare class StockPickingPackageWeightLot(models.Model): _name = 'stock.picking.package.weight.lot' _description = 'Stock Picking Package Weight Total' _order = 'sequence' picking = fields.Many2one( comodel_name='stock.picking', string='Picking') sequence = fields.Integer(string='Sequence') package = fields.Many2one( comodel_name='stock.quant.package', string='Package') lots = fields.Many2many( comodel_name='stock.production.lot', string='Lots/Serial Numbers', relation='rel_package_weight_lot') net_weight = fields.Float( string='Net Weight', digits=dp.get_precision('Stock Weight')) gross_weight = fields.Float( string='Gross Weight', digits=dp.get_precision('Stock Weight')) class StockPickingPackageTotal(models.Model): _name = 'stock.picking.package.total' _description = 'Stock Picking Package Total' picking = fields.Many2one( comodel_name='stock.picking', string='Picking') ul = fields.Many2one( comodel_name='product.ul', string='Logistic Unit') quantity = fields.Integer(string='# Packages') class StockPicking(models.Model): _inherit = 'stock.picking' @api.multi @api.depends('pack_operation_ids', 'pack_operation_ids.result_package_id') def _calc_picking_packages(self): for record in self: record.packages = record.pack_operation_ids.mapped( 'result_package_id') @api.multi @api.depends('packages', 'packages.ul_id') def _calc_picking_packages_info(self): pack_weight = self.env['stock.picking.package.weight.lot'] pack_weight_obj = self.env['stock.picking.package.weight.lot'] pack_total = self.env['stock.picking.package.total'] pack_total_obj = self.env['stock.picking.package.total'] for record in self: sequence = 0 for package in record.packages: sequence += 1 package_operations = record.pack_operation_ids.filtered( lambda r: r.result_package_id == package) total_weight = 0.0 for pack_operation in package_operations: total_weight += (pack_operation.product_qty * pack_operation.product_id.weight) vals = { 'picking': record.id, 'sequence': sequence, 'package': package.id, 'lots': [(6, 0, (package.quant_ids.mapped('lot_id').ids or package_operations.mapped('lot_id').ids))], 'net_weight': total_weight, 'gross_weight': total_weight + package.empty_weight, } pack_weight += pack_weight_obj.create(vals) if record.packages: for product_ul in self.env['product.ul'].search([]): cont = len(record.packages.filtered( lambda x: x.ul_id.id == product_ul.id)) if cont: vals = { 'picking': record.id, 'ul': product_ul.id, 'quantity': cont, } pack_total += pack_total_obj.create(vals) record.packages_info = pack_weight record.package_totals = pack_total record.num_packages = sum(x.quantity for x in record.package_totals) packages = fields.Many2many( comodel_name='stock.quant.package', string='Packages', compute='_calc_picking_packages') packages_info = fields.One2many( comodel_name='stock.picking.package.weight.lot', inverse_name='picking', string='Packages Info', compute='_calc_picking_packages_info') package_totals = fields.One2many( comodel_name='stock.picking.package.total', inverse_name='picking', string='Total UL Packages Info', compute='_calc_picking_packages_info') num_packages = fields.Integer( string='# Packages', compute='_calc_picking_packages_info') @api.multi def create_all_move_packages(self): pack_op_obj = self.env['stock.pack.operation'] ops = self.env['stock.pack.operation'] for picking in self: forced_qties = {} picking_quants = [] for move in picking.move_lines: if move.state not in ('assigned', 'confirmed', 'waiting'): continue move_quants = move.reserved_quant_ids picking_quants += move_quants forced_qty = (move.state == 'assigned') and \ move.product_qty - sum([x.qty for x in move_quants]) or 0 rounding = move.product_id.uom_id.rounding if float_compare(forced_qty, 0, precision_rounding=rounding) > 0: if forced_qties.get(move.product_id): forced_qties[move.product_id] += forced_qty else: forced_qties[move.product_id] = forced_qty for vals in picking._prepare_pack_ops( picking, picking_quants, forced_qties): domain = [('picking_id', '=', picking.id)] if vals.get('lot_id', False): domain += [('lot_id', '=', vals.get('lot_id'))] if vals.get('product_id', False): domain += [('product_id', '=', vals.get('product_id'))] packs = pack_op_obj.search(domain) if packs: qty = sum([x.product_qty for x in packs]) new_qty = vals.get('product_qty', 0) - qty if new_qty: vals.update({'product_qty': new_qty}) else: continue ops |= pack_op_obj.create(vals) return ops
ddico/odoomrp-wip
stock_picking_package_info/models/stock_picking.py
Python
agpl-3.0
6,517
#!/usr/bin/env python __author__ = 'Rolf Jagerman' from PySide import QtGui import os from authentication import AuthenticationListener, AuthenticationClient from loadui import loadUi from config import UI_DIRECTORY from drawers import Drawers class RetrieveDelivery(QtGui.QFrame, AuthenticationListener): """ The retrieve delivery form that enables a user to receive a delivery from the robot """ def __init__(self, content): super(RetrieveDelivery, self).__init__() self.content = content loadUi(os.path.join(UI_DIRECTORY, 'receive_delivery.ui'), self) self.drawer1_button.clicked.connect(self.drawer1_open) self.drawer2_button.clicked.connect(self.drawer2_open) self.drawer3_button.clicked.connect(self.drawer3_open) self.cancel_button.clicked.connect(self.cancel) self.recipient_id = None self.drawer_id = '1' AuthenticationClient.add_listener(self) def show(self, *args, **kwargs): super(RetrieveDelivery, self).show() self.disable_drawers() self.prepare_open_drawer() for drawer in Drawers.receive_drawers(self.recipient_id): if drawer == '1': self.drawer1_button.setEnabled(True) elif drawer == '2': self.drawer2_button.setEnabled(True) elif drawer == '3': self.drawer3_button.setEnabled(True) def prepare_open_drawer(self): self.content.components['open_drawer'].save = lambda : self.save() self.content.components['open_drawer'].back = lambda : self.back() self.content.components['open_drawer'].success_message = 'Have you succesfully received your delivery?' self.content.components['open_drawer'].instruction_message = 'retrieve' def on_login(self, user): self.recipient_id = user.id def on_login_failure(self, user): self.recipient_id = None self.disable_drawers() def on_logout(self, user): self.disable_drawers() def back(self): self.content.activate(self.content.components['retrieve_delivery']) def save(self): Drawers.remove_delivery(self.drawer_id) self.content.activate(self.content.components['welcome']) def disable_drawers(self): self.drawer1_button.setEnabled(False) self.drawer2_button.setEnabled(False) self.drawer3_button.setEnabled(False) def cancel(self): self.content.activate(self.content.components['welcome']) def drawer1_open(self): self.drawer_id = '1' self.content.activate(self.content.components['open_drawer']) def drawer2_open(self): self.drawer_id = '2' self.content.activate(self.content.components['open_drawer']) def drawer3_open(self): self.drawer_id = '3' self.content.activate(self.content.components['open_drawer'])
MartienLagerweij/aidu
aidu_gui/src/aidu_gui/retrieve_delivery.py
Python
mit
2,907
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os import sys import warnings from django.utils.translation import pgettext_lazy from django.utils.translation import ugettext_lazy as _ from openstack_dashboard import exceptions from openstack_dashboard.static_settings import find_static_files # noqa from openstack_dashboard.static_settings import get_staticfiles_dirs # noqa from openstack_dashboard import theme_settings warnings.formatwarning = lambda message, category, *args, **kwargs: \ '%s: %s' % (category.__name__, message) ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) BIN_DIR = os.path.abspath(os.path.join(ROOT_PATH, '..', 'bin')) if ROOT_PATH not in sys.path: sys.path.append(ROOT_PATH) DEBUG = False TEMPLATE_DEBUG = DEBUG SITE_BRANDING = 'OpenStack Dashboard' WEBROOT = '/' LOGIN_URL = None LOGOUT_URL = None LOGIN_REDIRECT_URL = None STATIC_ROOT = None STATIC_URL = None ROOT_URLCONF = 'openstack_dashboard.urls' HORIZON_CONFIG = { 'user_home': 'openstack_dashboard.views.get_user_home', 'ajax_queue_limit': 10, 'auto_fade_alerts': { 'delay': 3000, 'fade_duration': 1500, 'types': ['alert-success', 'alert-info'] }, 'bug_url': None, 'help_url': "http://docs.openstack.org", 'exceptions': {'recoverable': exceptions.RECOVERABLE, 'not_found': exceptions.NOT_FOUND, 'unauthorized': exceptions.UNAUTHORIZED}, 'modal_backdrop': 'static', 'angular_modules': [], 'js_files': [], 'js_spec_files': [], 'external_templates': [], 'plugins': [] } # Set to True to allow users to upload images to glance via Horizon server. # When enabled, a file form field will appear on the create image form. # See documentation for deployment considerations. HORIZON_IMAGES_ALLOW_UPLOAD = True # The OPENSTACK_IMAGE_BACKEND settings can be used to customize features # in the OpenStack Dashboard related to the Image service, such as the list # of supported image formats. OPENSTACK_IMAGE_BACKEND = { 'image_formats': [ ('', _('Select format')), ('aki', _('AKI - Amazon Kernel Image')), ('ami', _('AMI - Amazon Machine Image')), ('ari', _('ARI - Amazon Ramdisk Image')), ('docker', _('Docker')), ('iso', _('ISO - Optical Disk Image')), ('ova', _('OVA - Open Virtual Appliance')), ('qcow2', _('QCOW2 - QEMU Emulator')), ('raw', _('Raw')), ('vdi', _('VDI - Virtual Disk Image')), ('vhd', _('VHD - Virtual Hard Disk')), ('vmdk', _('VMDK - Virtual Machine Disk')), ] } MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'horizon.middleware.HorizonMiddleware', 'horizon.themes.ThemeMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.request', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.contrib.messages.context_processors.messages', 'horizon.context_processors.horizon', 'openstack_dashboard.context_processors.openstack', ) TEMPLATE_LOADERS = ( 'horizon.themes.ThemeTemplateLoader', ('django.template.loaders.cached.Loader', ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', 'horizon.loaders.TemplateLoader', )), ) TEMPLATE_DIRS = ( os.path.join(ROOT_PATH, 'templates'), ) STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) COMPRESS_PRECOMPILERS = ( ('text/scss', 'horizon.utils.scss_filter.HorizonScssFilter'), ) COMPRESS_CSS_FILTERS = ( 'compressor.filters.css_default.CssAbsoluteFilter', ) COMPRESS_ENABLED = True COMPRESS_OUTPUT_DIR = 'dashboard' COMPRESS_CSS_HASHING_METHOD = 'hash' COMPRESS_PARSER = 'compressor.parser.HtmlParser' INSTALLED_APPS = [ 'openstack_dashboard', 'django.contrib.contenttypes', 'django.contrib.auth', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.humanize', 'django_pyscss', 'openstack_dashboard.django_pyscss_fix', 'compressor', 'horizon', 'openstack_auth', ] TEST_RUNNER = 'django_nose.NoseTestSuiteRunner' AUTHENTICATION_BACKENDS = ('openstack_auth.backend.KeystoneBackend',) AUTHENTICATION_URLS = ['openstack_auth.urls'] AUTH_USER_MODEL = 'openstack_auth.User' MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage' SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies' SESSION_COOKIE_HTTPONLY = True SESSION_EXPIRE_AT_BROWSER_CLOSE = True SESSION_COOKIE_SECURE = False # SESSION_TIMEOUT is a method to supersede the token timeout with a shorter # horizon session timeout (in seconds). So if your token expires in 60 # minutes, a value of 1800 will log users out after 30 minutes SESSION_TIMEOUT = 3600 # When using cookie-based sessions, log error when the session cookie exceeds # the following size (common browsers drop cookies above a certain size): SESSION_COOKIE_MAX_SIZE = 4093 # when doing upgrades, it may be wise to stick to PickleSerializer # NOTE(berendt): Check during the K-cycle if this variable can be removed. # https://bugs.launchpad.net/horizon/+bug/1349463 SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer' LANGUAGES = ( ('cs', 'Czech'), ('de', 'German'), ('en', 'English'), ('en-au', 'Australian English'), ('en-gb', 'British English'), ('es', 'Spanish'), ('fr', 'French'), ('it', 'Italian'), ('ja', 'Japanese'), ('ko', 'Korean (Korea)'), ('pl', 'Polish'), ('pt-br', 'Portuguese (Brazil)'), ('ru', 'Russian'), ('tr', 'Turkish'), ('zh-cn', 'Simplified Chinese'), ('zh-tw', 'Chinese (Taiwan)'), ) LANGUAGE_CODE = 'en' LANGUAGE_COOKIE_NAME = 'horizon_language' USE_I18N = True USE_L10N = True USE_TZ = True OPENSTACK_KEYSTONE_DEFAULT_ROLE = '_member_' DEFAULT_EXCEPTION_REPORTER_FILTER = 'horizon.exceptions.HorizonReporterFilter' POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") # Map of local copy of service policy files POLICY_FILES = { 'identity': 'keystone_policy.json', 'compute': 'nova_policy.json', 'volume': 'cinder_policy.json', 'image': 'glance_policy.json', 'orchestration': 'heat_policy.json', 'network': 'neutron_policy.json', 'telemetry': 'ceilometer_policy.json', } SECRET_KEY = None LOCAL_PATH = None SECURITY_GROUP_RULES = { 'all_tcp': { 'name': _('All TCP'), 'ip_protocol': 'tcp', 'from_port': '1', 'to_port': '65535', }, 'all_udp': { 'name': _('All UDP'), 'ip_protocol': 'udp', 'from_port': '1', 'to_port': '65535', }, 'all_icmp': { 'name': _('All ICMP'), 'ip_protocol': 'icmp', 'from_port': '-1', 'to_port': '-1', }, } ADD_INSTALLED_APPS = [] # Deprecated Theme Settings CUSTOM_THEME_PATH = None DEFAULT_THEME_PATH = None # 'key', 'label', 'path' AVAILABLE_THEMES = [ ( 'default', pgettext_lazy('Default style theme', 'Default'), 'themes/default' ), ( 'material', pgettext_lazy("Google's Material Design style theme", "Material"), 'themes/material' ), ] # The default theme if no cookie is present DEFAULT_THEME = 'default' # Theme Static Directory THEME_COLLECTION_DIR = 'themes' # Theme Cookie Name THEME_COOKIE_NAME = 'theme' try: from local.local_settings import * # noqa except ImportError: logging.warning("No local_settings file found.") # allow to drop settings snippets into a local_settings_dir LOCAL_SETTINGS_DIR_PATH = os.path.join(ROOT_PATH, "local", "local_settings.d") if os.path.exists(LOCAL_SETTINGS_DIR_PATH): for (dirpath, dirnames, filenames) in os.walk(LOCAL_SETTINGS_DIR_PATH): for filename in sorted(filenames): if filename.endswith(".py"): try: execfile(os.path.join(dirpath, filename)) except Exception as e: logging.exception( "Can not exec settings snippet %s" % (filename)) if not WEBROOT.endswith('/'): WEBROOT += '/' if LOGIN_URL is None: LOGIN_URL = WEBROOT + 'auth/login/' if LOGOUT_URL is None: LOGOUT_URL = WEBROOT + 'auth/logout/' if LOGIN_REDIRECT_URL is None: LOGIN_REDIRECT_URL = WEBROOT MEDIA_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'media')) MEDIA_URL = WEBROOT + 'media/' if STATIC_ROOT is None: STATIC_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'static')) if STATIC_URL is None: STATIC_URL = WEBROOT + 'static/' AVAILABLE_THEMES, DEFAULT_THEME = theme_settings.get_available_themes( AVAILABLE_THEMES, CUSTOM_THEME_PATH, DEFAULT_THEME_PATH, DEFAULT_THEME ) STATICFILES_DIRS = get_staticfiles_dirs(STATIC_URL) + \ theme_settings.get_theme_static_dirs( AVAILABLE_THEMES, THEME_COLLECTION_DIR, ROOT_PATH) if CUSTOM_THEME_PATH is not None: logging.warning("CUSTOM_THEME_PATH has been deprecated. Please convert " "your settings to make use of AVAILABLE_THEMES.") if DEFAULT_THEME_PATH is not None: logging.warning("DEFAULT_THEME_PATH has been deprecated. Please convert " "your settings to make use of AVAILABLE_THEMES.") # populate HORIZON_CONFIG with auto-discovered JavaScript sources, mock files, # specs files and external templates. find_static_files(HORIZON_CONFIG) # Ensure that we always have a SECRET_KEY set, even when no local_settings.py # file is present. See local_settings.py.example for full documentation on the # horizon.utils.secret_key module and its use. if not SECRET_KEY: if not LOCAL_PATH: LOCAL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'local') from horizon.utils import secret_key SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH, '.secret_key_store')) # Load the pluggable dashboard settings import openstack_dashboard.enabled import openstack_dashboard.local.enabled from openstack_dashboard.utils import settings INSTALLED_APPS = list(INSTALLED_APPS) # Make sure it's mutable settings.update_dashboards( [ openstack_dashboard.enabled, openstack_dashboard.local.enabled, ], HORIZON_CONFIG, INSTALLED_APPS, ) INSTALLED_APPS[0:0] = ADD_INSTALLED_APPS from openstack_auth import policy POLICY_CHECK_FUNCTION = policy.check # This base context objects gets added to the offline context generator # for each theme configured. HORIZON_COMPRESS_OFFLINE_CONTEXT_BASE = { 'WEBROOT': WEBROOT, 'STATIC_URL': STATIC_URL, 'HORIZON_CONFIG': HORIZON_CONFIG } COMPRESS_OFFLINE_CONTEXT = 'horizon.themes.offline_context' if DEBUG: logging.basicConfig(level=logging.DEBUG) # during django reloads and an active user is logged in, the monkey # patch below will not otherwise be applied in time - resulting in developers # appearing to be logged out. In typical production deployments this section # below may be omitted, though it should not be harmful from openstack_auth import utils as auth_utils auth_utils.patch_middleware_get_user() CSRF_COOKIE_AGE = None
ankur-gupta91/horizon-net-ip
openstack_dashboard/settings.py
Python
apache-2.0
12,704
# Copyright (c) 2015 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib._i18n import _ from neutron_lib import exceptions class VlanTransparencyDriverError(exceptions.NeutronException): """Vlan Transparency not supported by all mechanism drivers.""" message = _("Backend does not support VLAN Transparency.")
openstack/neutron-lib
neutron_lib/exceptions/vlantransparent.py
Python
apache-2.0
902
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import struct import socket import logging import netaddr from ryu.ofproto import ether from ryu.ofproto import inet from ryu.ofproto import ofproto_v1_2 from ryu.ofproto import ofproto_v1_2_parser from ryu.lib import hub from ryu.lib import mac LOG = logging.getLogger('ryu.lib.ofctl_v1_2') DEFAULT_TIMEOUT = 1.0 def str_to_int(src): if isinstance(src, str): if src.startswith("0x") or src.startswith("0X"): dst = int(src, 16) else: dst = int(src) else: dst = src return dst def to_action(dp, dic): ofp = dp.ofproto parser = dp.ofproto_parser result = None action_type = dic.get('type') if action_type == 'OUTPUT': out_port = int(dic.get('port', ofp.OFPP_ANY)) max_len = int(dic.get('max_len', ofp.OFPCML_MAX)) result = parser.OFPActionOutput(out_port, max_len) elif action_type == 'COPY_TTL_OUT': result = parser.OFPActionCopyTtlOut() elif action_type == 'COPY_TTL_IN': result = parser.OFPActionCopyTtlIn() elif action_type == 'SET_MPLS_TTL': mpls_ttl = int(dic.get('mpls_ttl')) result = parser.OFPActionSetMplsTtl(mpls_ttl) elif action_type == 'DEC_MPLS_TTL': result = parser.OFPActionDecMplsTtl() elif action_type == 'PUSH_VLAN': ethertype = int(dic.get('ethertype')) result = parser.OFPActionPushVlan(ethertype) elif action_type == 'POP_VLAN': result = parser.OFPActionPopVlan() elif action_type == 'PUSH_MPLS': ethertype = int(dic.get('ethertype')) result = parser.OFPActionPushMpls(ethertype) elif action_type == 'POP_MPLS': ethertype = int(dic.get('ethertype')) result = parser.OFPActionPopMpls(ethertype) elif action_type == 'SET_QUEUE': queue_id = int(dic.get('queue_id')) result = parser.OFPActionSetQueue(queue_id) elif action_type == 'GROUP': group_id = int(dic.get('group_id')) result = parser.OFPActionGroup(group_id) elif action_type == 'SET_NW_TTL': nw_ttl = int(dic.get('nw_ttl')) result = parser.OFPActionSetNwTtl(nw_ttl) elif action_type == 'DEC_NW_TTL': result = parser.OFPActionDecNwTtl() elif action_type == 'SET_FIELD': field = dic.get('field') value = dic.get('value') result = parser.OFPActionSetField(**{field: value}) else: LOG.debug('Unknown action type: %s' % action_type) return result def to_actions(dp, acts): inst = [] actions = [] ofp = dp.ofproto parser = dp.ofproto_parser for a in acts: action = to_action(dp, a) if action is not None: actions.append(action) else: action_type = a.get('type') if action_type == 'GOTO_TABLE': table_id = int(a.get('table_id')) inst.append(parser.OFPInstructionGotoTable(table_id)) elif action_type == 'WRITE_METADATA': metadata = str_to_int(a.get('metadata')) metadata_mask = (str_to_int(a['metadata_mask']) if 'metadata_mask' in a else parser.UINT64_MAX) inst.append( parser.OFPInstructionWriteMetadata( metadata, metadata_mask)) else: LOG.debug('Unknown action type: %s' % action_type) inst.append(parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)) return inst def action_to_str(act): action_type = act.cls_action_type if action_type == ofproto_v1_2.OFPAT_OUTPUT: buf = 'OUTPUT:' + str(act.port) elif action_type == ofproto_v1_2.OFPAT_COPY_TTL_OUT: buf = 'COPY_TTL_OUT' elif action_type == ofproto_v1_2.OFPAT_COPY_TTL_IN: buf = 'COPY_TTL_IN' elif action_type == ofproto_v1_2.OFPAT_SET_MPLS_TTL: buf = 'SET_MPLS_TTL:' + str(act.mpls_ttl) elif action_type == ofproto_v1_2.OFPAT_DEC_MPLS_TTL: buf = 'DEC_MPLS_TTL' elif action_type == ofproto_v1_2.OFPAT_PUSH_VLAN: buf = 'PUSH_VLAN:' + str(act.ethertype) elif action_type == ofproto_v1_2.OFPAT_POP_VLAN: buf = 'POP_VLAN' elif action_type == ofproto_v1_2.OFPAT_PUSH_MPLS: buf = 'PUSH_MPLS:' + str(act.ethertype) elif action_type == ofproto_v1_2.OFPAT_POP_MPLS: buf = 'POP_MPLS' elif action_type == ofproto_v1_2.OFPAT_SET_QUEUE: buf = 'SET_QUEUE:' + str(act.queue_id) elif action_type == ofproto_v1_2.OFPAT_GROUP: buf = 'GROUP:' + str(act.group_id) elif action_type == ofproto_v1_2.OFPAT_SET_NW_TTL: buf = 'SET_NW_TTL:' + str(act.nw_ttl) elif action_type == ofproto_v1_2.OFPAT_DEC_NW_TTL: buf = 'DEC_NW_TTL' elif action_type == ofproto_v1_2.OFPAT_SET_FIELD: buf = 'SET_FIELD: {%s:%s}' % (act.key, act.value) else: buf = 'UNKNOWN' return buf def actions_to_str(instructions): actions = [] for instruction in instructions: if isinstance(instruction, ofproto_v1_2_parser.OFPInstructionActions): for a in instruction.actions: actions.append(action_to_str(a)) elif isinstance(instruction, ofproto_v1_2_parser.OFPInstructionGotoTable): buf = 'GOTO_TABLE:' + str(instruction.table_id) actions.append(buf) elif isinstance(instruction, ofproto_v1_2_parser.OFPInstructionWriteMetadata): buf = ('WRITE_METADATA:0x%x/0x%x' % (instruction.metadata, instruction.metadata_mask) if instruction.metadata_mask else 'WRITE_METADATA:0x%x' % instruction.metadata) actions.append(buf) else: continue return actions def to_match(dp, attrs): match = dp.ofproto_parser.OFPMatch() convert = {'in_port': int, 'in_phy_port': int, 'dl_src': mac.haddr_to_bin, 'dl_dst': mac.haddr_to_bin, 'dl_type': int, 'dl_vlan': int, 'vlan_pcp': int, 'ip_dscp': int, 'ip_ecn': int, 'nw_src': to_match_ip, 'nw_dst': to_match_ip, 'nw_proto': int, 'tp_src': int, 'tp_dst': int, 'eth_src': mac.haddr_to_bin, 'eth_dst': mac.haddr_to_bin, 'eth_type': int, 'vlan_vid': int, 'ipv4_src': to_match_ip, 'ipv4_dst': to_match_ip, 'ip_proto': int, 'tcp_src': int, 'tcp_dst': int, 'udp_src': int, 'udp_dst': int, 'sctp_src': int, 'sctp_dst': int, 'icmpv4_type': int, 'icmpv4_code': int, 'arp_op': int, 'arp_spa': to_match_ip, 'arp_tpa': to_match_ip, 'arp_sha': mac.haddr_to_bin, 'arp_tha': mac.haddr_to_bin, 'ipv6_src': to_match_ipv6, 'ipv6_dst': to_match_ipv6, 'ipv6_flabel': int, 'icmpv6_type': int, 'icmpv6_code': int, 'ipv6_nd_target': to_match_ipv6, 'ipv6_nd_sll': mac.haddr_to_bin, 'ipv6_nd_tll': mac.haddr_to_bin, 'mpls_tc': int} match_append = {'in_port': match.set_in_port, 'in_phy_port': match.set_in_phy_port, 'dl_src': match.set_dl_src, 'dl_dst': match.set_dl_dst, 'dl_type': match.set_dl_type, 'dl_vlan': match.set_vlan_vid, 'vlan_pcp': match.set_vlan_pcp, 'nw_src': match.set_ipv4_src_masked, 'nw_dst': match.set_ipv4_dst_masked, 'nw_proto': match.set_ip_proto, 'tp_src': to_match_tpsrc, 'tp_dst': to_match_tpdst, 'eth_src': match.set_dl_src, 'eth_dst': match.set_dl_dst, 'eth_type': match.set_dl_type, 'vlan_vid': match.set_vlan_vid, 'ip_dscp': match.set_ip_dscp, 'ip_ecn': match.set_ip_ecn, 'ipv4_src': match.set_ipv4_src_masked, 'ipv4_dst': match.set_ipv4_dst_masked, 'ip_proto': match.set_ip_proto, 'tcp_src': to_match_tpsrc, 'tcp_dst': to_match_tpdst, 'udp_src': to_match_tpsrc, 'udp_dst': to_match_tpdst, 'sctp_src': match.set_sctp_src, 'sctp_dst': match.set_sctp_dst, 'icmpv4_type': match.set_icmpv4_type, 'icmpv4_code': match.set_icmpv4_code, 'arp_op': match.set_arp_opcode, 'arp_spa': match.set_arp_spa_masked, 'arp_tpa': match.set_arp_tpa_masked, 'arp_sha': match.set_arp_sha, 'arp_tha': match.set_arp_tha, 'ipv6_src': match.set_ipv6_src_masked, 'ipv6_dst': match.set_ipv6_dst_masked, 'ipv6_flabel': match.set_ipv6_flabel, 'icmpv6_type': match.set_icmpv6_type, 'icmpv6_code': match.set_icmpv6_code, 'ipv6_nd_target': match.set_ipv6_nd_target, 'ipv6_nd_sll': match.set_ipv6_nd_sll, 'ipv6_nd_tll': match.set_ipv6_nd_tll, 'mpls_tc': match.set_mpls_tc} if attrs.get('dl_type') == ether.ETH_TYPE_ARP or \ attrs.get('eth_type') == ether.ETH_TYPE_ARP: if 'nw_src' in attrs and not 'arp_spa' in attrs: attrs['arp_spa'] = attrs['nw_src'] del attrs['nw_src'] if 'nw_dst' in attrs and not 'arp_tpa' in attrs: attrs['arp_tpa'] = attrs['nw_dst'] del attrs['nw_dst'] for key, value in attrs.items(): if key in convert: value = convert[key](value) if key in match_append: if key == 'nw_src' or key == 'nw_dst' or \ key == 'ipv4_src' or key == 'ipv4_dst' or \ key == 'arp_spa' or key == 'arp_tpa' or \ key == 'ipv6_src' or key == 'ipv6_dst': # IP address ip = value[0] mask = value[1] match_append[key](ip, mask) elif key == 'tp_src' or key == 'tp_dst' or \ key == 'tcp_src' or key == 'tcp_dst' or \ key == 'udp_src' or key == 'udp_dst': # tp_src/dst match_append[key](value, match, attrs) else: # others match_append[key](value) return match def to_match_tpsrc(value, match, rest): match_append = {inet.IPPROTO_TCP: match.set_tcp_src, inet.IPPROTO_UDP: match.set_udp_src} nw_proto = rest.get('nw_proto', rest.get('ip_proto', 0)) if nw_proto in match_append: match_append[nw_proto](value) return match def to_match_tpdst(value, match, rest): match_append = {inet.IPPROTO_TCP: match.set_tcp_dst, inet.IPPROTO_UDP: match.set_udp_dst} nw_proto = rest.get('nw_proto', rest.get('ip_proto', 0)) if nw_proto in match_append: match_append[nw_proto](value) return match def to_match_ip(value): ip_mask = value.split('/') # IP address ipv4 = struct.unpack('!I', socket.inet_aton(ip_mask[0]))[0] # netmask netmask = ofproto_v1_2_parser.UINT32_MAX if len(ip_mask) == 2: # Check the mask is CIDR or not. if ip_mask[1].isdigit(): netmask &= ofproto_v1_2_parser.UINT32_MAX << 32 - int(ip_mask[1]) else: netmask = struct.unpack('!I', socket.inet_aton(ip_mask[1]))[0] return ipv4, netmask def to_match_ipv6(value): ip_mask = value.split('/') if len(ip_mask) == 2 and ip_mask[1].isdigit() is False: # Both address and netmask are colon-hexadecimal. ipv6 = netaddr.IPAddress(ip_mask[0]).words netmask = netaddr.IPAddress(ip_mask[1]).words else: # For other formats. network = netaddr.IPNetwork(value) ipv6 = network.ip.words netmask = network.netmask.words return ipv6, netmask def match_to_str(ofmatch): keys = {ofproto_v1_2.OXM_OF_IN_PORT: 'in_port', ofproto_v1_2.OXM_OF_IN_PHY_PORT: 'in_phy_port', ofproto_v1_2.OXM_OF_ETH_SRC: 'dl_src', ofproto_v1_2.OXM_OF_ETH_DST: 'dl_dst', ofproto_v1_2.OXM_OF_ETH_TYPE: 'dl_type', ofproto_v1_2.OXM_OF_VLAN_VID: 'dl_vlan', ofproto_v1_2.OXM_OF_VLAN_PCP: 'vlan_pcp', ofproto_v1_2.OXM_OF_IP_DSCP: 'ip_dscp', ofproto_v1_2.OXM_OF_IP_ECN: 'ip_ecn', ofproto_v1_2.OXM_OF_IPV4_SRC: 'nw_src', ofproto_v1_2.OXM_OF_IPV4_DST: 'nw_dst', ofproto_v1_2.OXM_OF_IPV4_SRC_W: 'nw_src', ofproto_v1_2.OXM_OF_IPV4_DST_W: 'nw_dst', ofproto_v1_2.OXM_OF_IP_PROTO: 'nw_proto', ofproto_v1_2.OXM_OF_TCP_SRC: 'tp_src', ofproto_v1_2.OXM_OF_TCP_DST: 'tp_dst', ofproto_v1_2.OXM_OF_UDP_SRC: 'tp_src', ofproto_v1_2.OXM_OF_UDP_DST: 'tp_dst', ofproto_v1_2.OXM_OF_SCTP_SRC: 'sctp_src', ofproto_v1_2.OXM_OF_SCTP_DST: 'sctp_dst', ofproto_v1_2.OXM_OF_ICMPV4_TYPE: 'icmpv4_type', ofproto_v1_2.OXM_OF_ICMPV4_CODE: 'icmpv4_code', ofproto_v1_2.OXM_OF_MPLS_LABEL: 'mpls_label', ofproto_v1_2.OXM_OF_MPLS_TC: 'mpls_tc', ofproto_v1_2.OXM_OF_ARP_OP: 'arp_op', ofproto_v1_2.OXM_OF_ARP_SPA: 'arp_spa', ofproto_v1_2.OXM_OF_ARP_TPA: 'arp_tpa', ofproto_v1_2.OXM_OF_ARP_SPA_W: 'arp_spa', ofproto_v1_2.OXM_OF_ARP_TPA_W: 'arp_tpa', ofproto_v1_2.OXM_OF_ARP_SHA: 'arp_sha', ofproto_v1_2.OXM_OF_ARP_THA: 'arp_tha', ofproto_v1_2.OXM_OF_IPV6_SRC: 'ipv6_src', ofproto_v1_2.OXM_OF_IPV6_DST: 'ipv6_dst', ofproto_v1_2.OXM_OF_IPV6_SRC_W: 'ipv6_src', ofproto_v1_2.OXM_OF_IPV6_DST_W: 'ipv6_dst', ofproto_v1_2.OXM_OF_IPV6_FLABEL: 'ipv6_flabel', ofproto_v1_2.OXM_OF_ICMPV6_TYPE: 'icmpv6_type', ofproto_v1_2.OXM_OF_ICMPV6_CODE: 'icmpv6_code', ofproto_v1_2.OXM_OF_IPV6_ND_TARGET: 'ipv6_nd_target', ofproto_v1_2.OXM_OF_IPV6_ND_SLL: 'ipv6_nd_sll', ofproto_v1_2.OXM_OF_IPV6_ND_TLL: 'ipv6_nd_tll'} match = {} for match_field in ofmatch.fields: key = keys[match_field.header] if key == 'dl_src' or key == 'dl_dst': value = mac.haddr_to_str(match_field.value) elif key == 'nw_src' or key == 'nw_dst' or \ key == 'arp_spa' or key == 'arp_tpa': value = match_ip_to_str(match_field.value, match_field.mask) elif key == 'ipv6_src' or key == 'ipv6_dst': value = match_ipv6_to_str(match_field.value, match_field.mask) else: value = match_field.value match.setdefault(key, value) return match def match_ip_to_str(value, mask): ip = socket.inet_ntoa(struct.pack('!I', value)) if mask is not None and mask != 0: binary_str = bin(mask)[2:].zfill(32).rstrip('0') if binary_str.find('0') >= 0: netmask = '/%s' % socket.inet_ntoa(struct.pack('!I', mask)) else: netmask = '/%d' % len(binary_str) else: netmask = '' return ip + netmask def match_ipv6_to_str(value, mask): ip_list = [] for word in value: ip_list.append('%04x' % word) ip = netaddr.IPNetwork(':'.join(ip_list)) netmask = 128 netmask_str = None if mask is not None: mask_list = [] for word in mask: mask_list.append('%04x' % word) mask_v = netaddr.IPNetwork(':'.join(mask_list)) binary_str = mask_v.ip.bits().replace(':', '').zfill(128).rstrip('0') if binary_str.find('0') >= 0: netmask_str = str(mask_v.ip) else: netmask = len(binary_str) if netmask_str is not None: ip_str = str(ip.ip) + '/' + netmask_str elif netmask == 128: ip_str = str(ip.ip) else: ip.prefixlen = netmask ip_str = str(ip) return ip_str def send_stats_request(dp, stats, waiters, msgs): dp.set_xid(stats) waiters_per_dp = waiters.setdefault(dp.id, {}) lock = hub.Event() waiters_per_dp[stats.xid] = (lock, msgs) dp.send_msg(stats) try: lock.wait(timeout=DEFAULT_TIMEOUT) except hub.Timeout: del waiters_per_dp[stats.xid] def get_desc_stats(dp, waiters): stats = dp.ofproto_parser.OFPDescStatsRequest(dp) msgs = [] send_stats_request(dp, stats, waiters, msgs) s = {} for msg in msgs: stats = msg.body s = {'mfr_desc': stats.mfr_desc, 'hw_desc': stats.hw_desc, 'sw_desc': stats.sw_desc, 'serial_num': stats.serial_num, 'dp_desc': stats.dp_desc} desc = {str(dp.id): s} return desc def get_flow_stats(dp, waiters): table_id = dp.ofproto.OFPTT_ALL out_port = dp.ofproto.OFPP_ANY out_group = dp.ofproto.OFPG_ANY cookie = 0 cookie_mask = 0 match = dp.ofproto_parser.OFPMatch() stats = dp.ofproto_parser.OFPFlowStatsRequest( dp, table_id, out_port, out_group, cookie, cookie_mask, match) msgs = [] send_stats_request(dp, stats, waiters, msgs) flows = [] for msg in msgs: for stats in msg.body: actions = actions_to_str(stats.instructions) match = match_to_str(stats.match) s = {'priority': stats.priority, 'cookie': stats.cookie, 'idle_timeout': stats.idle_timeout, 'hard_timeout': stats.hard_timeout, 'actions': actions, 'match': match, 'byte_count': stats.byte_count, 'duration_sec': stats.duration_sec, 'duration_nsec': stats.duration_nsec, 'packet_count': stats.packet_count, 'table_id': stats.table_id} flows.append(s) flows = {str(dp.id): flows} return flows def get_port_stats(dp, waiters): stats = dp.ofproto_parser.OFPPortStatsRequest( dp, dp.ofproto.OFPP_ANY, 0) msgs = [] send_stats_request(dp, stats, waiters, msgs) ports = [] for msg in msgs: for stats in msg.body: s = {'port_no': stats.port_no, 'rx_packets': stats.rx_packets, 'tx_packets': stats.tx_packets, 'rx_bytes': stats.rx_bytes, 'tx_bytes': stats.tx_bytes, 'rx_dropped': stats.rx_dropped, 'tx_dropped': stats.tx_dropped, 'rx_errors': stats.rx_errors, 'tx_errors': stats.tx_errors, 'rx_frame_err': stats.rx_frame_err, 'rx_over_err': stats.rx_over_err, 'rx_crc_err': stats.rx_crc_err, 'collisions': stats.collisions} ports.append(s) ports = {str(dp.id): ports} return ports def get_group_stats(dp, waiters): stats = dp.ofproto_parser.OFPGroupStatsRequest( dp, dp.ofproto.OFPG_ALL, 0) msgs = [] send_stats_request(dp, stats, waiters, msgs) groups = [] for msg in msgs: for stats in msg.body: bucket_counters = [] for bucket_counter in stats.bucket_counters: c = {'packet_count': bucket_counter.packet_count, 'byte_count': bucket_counter.byte_count} bucket_counters.append(c) g = {'group_id': stats.group_id, 'ref_count': stats.ref_count, 'packet_count': stats.packet_count, 'byte_count': stats.byte_count, 'bucket_stats': bucket_counters} groups.append(g) groups = {str(dp.id): groups} return groups def get_group_features(dp, waiters): ofp = dp.ofproto type_convert = {ofp.OFPGT_ALL: 'ALL', ofp.OFPGT_SELECT: 'SELECT', ofp.OFPGT_INDIRECT: 'INDIRECT', ofp.OFPGT_FF: 'FF'} cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT', ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS', ofp.OFPGFC_CHAINING: 'CHAINING', ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHCEKS'} act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT', ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT', ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN', ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL', ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL', ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN', ofp.OFPAT_POP_VLAN: 'POP_VLAN', ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS', ofp.OFPAT_POP_MPLS: 'POP_MPLS', ofp.OFPAT_SET_QUEUE: 'SET_QUEUE', ofp.OFPAT_GROUP: 'GROUP', ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL', ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL', ofp.OFPAT_SET_FIELD: 'SET_FIELD'} stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0) msgs = [] send_stats_request(dp, stats, waiters, msgs) features = [] for msg in msgs: feature = msg.body types = [] for k, v in type_convert.items(): if (1 << k) & feature.types: types.append(v) capabilities = [] for k, v in cap_convert.items(): if k & feature.capabilities: capabilities.append(v) max_groups = [] for k, v in type_convert.items(): max_groups.append({v: feature.max_groups[k]}) actions = [] for k1, v1 in type_convert.items(): acts = [] for k2, v2 in act_convert.items(): if (1 << k2) & feature.actions[k1]: acts.append(v2) actions.append({v1: acts}) f = {'types': types, 'capabilities': capabilities, 'max_groups': max_groups, 'actions': actions} features.append(f) features = {str(dp.id): features} return features def get_group_desc(dp, waiters): type_convert = {dp.ofproto.OFPGT_ALL: 'ALL', dp.ofproto.OFPGT_SELECT: 'SELECT', dp.ofproto.OFPGT_INDIRECT: 'INDIRECT', dp.ofproto.OFPGT_FF: 'FF'} stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0) msgs = [] send_stats_request(dp, stats, waiters, msgs) descs = [] for msg in msgs: for stats in msg.body: buckets = [] for bucket in stats.buckets: actions = [] for action in bucket.actions: actions.append(action_to_str(action)) b = {'weight': bucket.weight, 'watch_port': bucket.watch_port, 'watch_group': bucket.watch_group, 'actions': actions} buckets.append(b) d = {'type': type_convert.get(stats.type), 'group_id': stats.group_id, 'buckets': buckets} descs.append(d) descs = {str(dp.id): descs} return descs def mod_flow_entry(dp, flow, cmd): cookie = int(flow.get('cookie', 0)) cookie_mask = int(flow.get('cookie_mask', 0)) table_id = int(flow.get('table_id', 0)) idle_timeout = int(flow.get('idle_timeout', 0)) hard_timeout = int(flow.get('hard_timeout', 0)) priority = int(flow.get('priority', 0)) buffer_id = int(flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER)) out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY)) out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY)) flags = int(flow.get('flags', 0)) match = to_match(dp, flow.get('match', {})) inst = to_actions(dp, flow.get('actions', [])) flow_mod = dp.ofproto_parser.OFPFlowMod( dp, cookie, cookie_mask, table_id, cmd, idle_timeout, hard_timeout, priority, buffer_id, out_port, out_group, flags, match, inst) dp.send_msg(flow_mod) def mod_group_entry(dp, group, cmd): type_convert = {'ALL': dp.ofproto.OFPGT_ALL, 'SELECT': dp.ofproto.OFPGT_SELECT, 'INDIRECT': dp.ofproto.OFPGT_INDIRECT, 'FF': dp.ofproto.OFPGT_FF} type_ = type_convert.get(group.get('type')) if type_ is None: LOG.debug('Unknown type: %s', group.get('type')) group_id = int(group.get('group_id', 0)) buckets = [] for bucket in group.get('buckets', []): weight = int(bucket.get('weight', 0)) watch_port = int(bucket.get('watch_port', dp.ofproto.OFPP_ANY)) watch_group = int(bucket.get('watch_group', dp.ofproto.OFPG_ANY)) actions = [] for dic in bucket.get('actions', []): action = to_action(dp, dic) if action is not None: actions.append(action) buckets.append(dp.ofproto_parser.OFPBucket( weight, watch_port, watch_group, actions)) group_mod = dp.ofproto_parser.OFPGroupMod( dp, cmd, type_, group_id, buckets) dp.send_msg(group_mod) def send_experimenter(dp, exp): experimenter = exp.get('experimenter', 0) exp_type = exp.get('exp_type', 0) data_type = exp.get('data_type', 'ascii') if data_type != 'ascii' and data_type != 'base64': LOG.debug('Unknown data type: %s', data_type) data = exp.get('data', '') if data_type == 'base64': data = base64.b64decode(data) expmsg = dp.ofproto_parser.OFPExperimenter( dp, experimenter, exp_type, data) dp.send_msg(expmsg)
mikhaelharswanto/ryu
ryu/lib/ofctl_v1_2.py
Python
apache-2.0
27,009
### import #################################################################### import pycmds.project.classes as pc import pycmds.hardware.hardware as hw import pathlib import appdirs import toml import yaqc ### driver #################################################################### class Driver(hw.Driver): def __init__(self, *args, **kwargs): self._yaqd_port = kwargs.pop("yaqd_port") super().__init__(*args, **kwargs) self.grating_index = pc.Combo( name="Grating", allowed_values=[1, 2], section=self.name, option="grating_index", display=True, set_method="set_turret", ) self.exposed.append(self.grating_index) def get_position(self): native_position = self.ctrl.get_position() self.position.write(native_position, self.native_units) return self.position.read() def initialize(self, *args, **kwargs): # open control self.ctrl = yaqc.Client(self._yaqd_port) # import some information from control id_dict = self.ctrl.id() self.serial_number = id_dict["serial"] self.position.write(self.ctrl.get_position()) # recorded self.recorded[self.name] = [self.position, self.native_units, 1.0, "m", False] self.wait_until_still() # finish self.initialized.write(True) self.initialized_signal.emit() def is_busy(self): return self.ctrl.busy() def set_position(self, destination): self.ctrl.set_position(float(destination)) self.wait_until_still() def set_turret(self, destination_index): if type(destination_index) == list: destination_index = destination_index[0] # turret index on ActiveX call starts from zero destination_index_zero_based = int(destination_index) - 1 self.ctrl.set_turret(destination_index_zero_based) self.grating_index.write(destination_index) self.wait_until_still() self.limits.write(*self.ctrl.get_limits(), self.native_units) ### gui ####################################################################### class GUI(hw.GUI): pass ### hardware ################################################################## class Hardware(hw.Hardware): def __init__(self, *args, **kwargs): self.kind = "spectrometer" hw.Hardware.__init__(self, *args, **kwargs) ### import #################################################################### conf = pathlib.Path(appdirs.user_config_dir("pycmds", "pycmds")) / "config.toml" conf = toml.load(conf) hardwares, gui, advanced_gui = hw.import_hardwares( conf.get("hardware", {}).get("spectrometers", {}), name="Spectrometers", Driver=Driver, GUI=GUI, Hardware=Hardware, )
wright-group/PyCMDS
pycmds/hardware/spectrometers.py
Python
mit
2,840
# # Copyright 2018 Analytics Zoo Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import pytest from test.zoo.pipeline.utils.test_utils import ZooTestCase from zoo.chronos.model.prophet import ProphetModel import numpy as np import os from numpy.testing import assert_array_almost_equal import pandas as pd class TestProphetModel(ZooTestCase): def setup_method(self, method): self.seq_len = 480 self.config = { "changepoint_prior_scale": np.exp(np.random.uniform(np.log(0.001), np.log(0.5))), "seasonality_prior_scale": np.exp(np.random.uniform(np.log(0.01), np.log(10))), "holidays_prior_scale": np.exp(np.random.uniform(np.log(0.01), np.log(10))), "seasonality_mode": np.random.choice(['additive', 'multiplicative']), "changepoint_range": np.random.uniform(0.8, 0.95) } self.model = ProphetModel() self.data = pd.DataFrame(pd.date_range('20130101', periods=self.seq_len), columns=['ds']) self.data.insert(1, 'y', np.random.rand(self.seq_len)) self.horizon = np.random.randint(2, 50) self.validation_data = pd.DataFrame(pd.date_range('20140426', periods=self.horizon), columns=['ds'] ) self.validation_data.insert(1, 'y', np.random.rand(self.horizon)) def teardown_method(self, method): del self.model del self.data del self.validation_data def test_prophet(self): # test fit_eval evaluate_result = self.model.fit_eval(data=self.data, validation_data=self.validation_data, **self.config) # test predict result = self.model.predict(horizon=self.horizon) assert result.shape[0] == self.horizon # test evaluate evaluate_result = self.model.evaluate(target=self.validation_data, metrics=['mae', 'smape']) assert len(evaluate_result) == 2 def test_error(self): with pytest.raises(ValueError, match="We don't support input data currently"): self.model.evaluate(target=self.validation_data, data=1) with pytest.raises(ValueError, match="Input invalid target of None"): self.model.evaluate(target=None) with pytest.raises(Exception, match="Needs to call fit_eval or restore first before calling predict"): self.model.predict() with pytest.raises(Exception, match="Needs to call fit_eval or restore first before calling evaluate"): self.model.evaluate(target=self.validation_data) with pytest.raises(Exception, match="Needs to call fit_eval or restore first before calling save"): model_file = "tmp.json" self.model.save(model_file) def test_save_restore(self): self.model.fit_eval(data=self.data, validation_data=self.validation_data, **self.config) result_save = self.model.predict(horizon=self.horizon) model_file = "tmp.json" self.model.save(model_file) assert os.path.isfile(model_file) new_model = ProphetModel() new_model.restore(model_file) assert new_model.model result_restore = new_model.predict(horizon=self.horizon) assert_array_almost_equal(result_save['yhat'], result_restore['yhat'], decimal=2), \ "Prediction values are not the same after restore: " \ "predict before is {}, and predict after is {}".format(result_save, result_restore) os.remove(model_file) if __name__ == '__main__': pytest.main([__file__])
intel-analytics/analytics-zoo
pyzoo/test/zoo/chronos/model/test_prophet.py
Python
apache-2.0
4,374
from peewee import MySQLDatabase DB = MySQLDatabase('db_name', user='DB_user', password='db_Password', host='DB_URL')
vincentdavis/Colorado-Property-Data
dbconfig_pro.py
Python
mit
174
#!/usr/bin/python from execrunner import o, selo, selmo, cmdList, f import subprocess, argparse # Dataset prefix dp = '/u/nlp/data/smart-autocomplete/datasets/' constants = { 'python': '/u/nlp/packages/python-2.7.4/bin/python2.7', 'statePath': '/u/nlp/data/smart-autocomplete/state' } def main(): parser = \ argparse.ArgumentParser(description="iterate runs on nlp cluster") parser.add_argument("-s", "--nlpServer", default='jacob.stanford.edu', help="url of nlp cluster") parser.add_argument("-d", "--srcDir", default='~/src/smartAutocomplete', help="location of smartAutocomplete src on nlp cluster") parser.add_argument("-v", "--addToView", default='') args = parser.parse_args() constants['srcDir'] = args.srcDir constants['nlpServer'] = args.nlpServer constants['addToView'] = args.addToView cmds = \ cmdList('{python} server/runBenchmarks.py'.format(**constants), selo('startAt', 2, 0, .5, .9), selmo(('trainTokens', 'weightTraining', 'maxSamples'), 0, (100000, None, 500), (1000000, 10000, 1000)), selmo(('dataset', 'extension'), 1, (dp + 'node', 'js'), (dp + 'django', 'py'), (dp + 'english-large-jokes', None), (dp + 'english-small-sanity-check', None), (dp + 'javascript-large-d3subset', 'js'), (dp + 'javascript-medium-emile', 'js'), (dp + 'python-large-web-py-subset', 'py'), (dp + 'python-medium-singularity-chess', 'py')), selo('features', -2, # KN ['scope', 'ngram'], # Ngrams ['simple', 'prev', 'prevTwo', 'prevThree'], # Basic features ['simple', 'path', 'filetype', 'prev', 'prevTwo', 'prevThree', 'prevPrev'], # Experiment ['simple', 'path', 'prev'], # Individual features ['simple'], ['path'], ['filetype'], ['prev'], ['prevTwo'], ['prevThree'], ['prevPrev'], ['prevForm'], ['lineStart'], ['indentLevel'], ['dirH'], ['linePrefix'], ['scope'], ['ngram'], # All features None), f("onlyIdentifiers"), o("samplePeriod", 50), o("statePath", constants["statePath"]), selo("learningAlgorithm", 0, "counter", "weights", "perceptron", "perceptronMixer", "bucketPerceptronMixer", "naiveBayes", "bma", "grid")) proc = subprocess.Popen('ssh {nlpServer} {python} ' '{srcDir}/server/populateTasks.py {srcDir} {addToView}' .format(**constants), shell=True, stdin=subprocess.PIPE) proc.communicate('\n'.join(cmds)+'\n') if __name__ == "__main__": main()
pokey/smartAutocomplete
pythonServer/iterateRuns.py
Python
mit
3,283
# pylint: disable = C0301 from bs4 import BeautifulSoup from urllib2 import urlopen import pandas as pd pos_idx_map = { 'qb': 2, 'rb': 3, 'wr': 4, 'te': 5, } def make_url(pos, wk): ii = pos_idx_map[pos] fstr = "http://fantasydata.com/nfl-stats/nfl-fantasy-football-stats.aspx?fs=1&stype=0&sn=1&w=%s&s=&t=0&p=%s&st=FantasyPointsPPR&d=1&ls=&live=false" \ % (wk, ii) return fstr def html2df(soup): table = soup.find('table') headers = [header.text.lower() for header in table.find_all('th')] rows = [] for row in table.find_all('tr'): rows.append([val.text.encode('utf8') for val in row.find_all('td')]) rows = [rr for rr in rows if len(rr) > 0] df = pd.DataFrame.from_records(rows) df.columns = headers return df def position_html_local(posn): dflist = [] for ii in range(1, 17): fname = '%s%s.html' % (posn, ii) with open(fname) as f: df = html2df(BeautifulSoup(f)) df['wk'] = ii df.columns = header_clean(df.columns, posn) dflist.append(df) return pd.concat(dflist) def position_html(posn): dflist = [] for ii in range(1, 17): fname = make_url(posn, ii) df = html2df(BeautifulSoup(urlopen(fname))) df['wk'] = ii df.columns = header_clean(df.columns, posn) dflist.append(df) return pd.concat(dflist) pos_header_suffixes = { 'qb': ['_pass', '_rush'], 'rb': ['_rush', '_recv'], 'wr': ['_recv'], 'te': ['_recv'], } exclude_cols = ['rk', 'player', 'team', 'pos', 'fantasy points', 'wk', 'fum', 'lost', 'qb rating'] def header_clean(header, posn): res = [] if posn in pos_header_suffixes: suffixes = pos_header_suffixes[posn] seen_dict = {hh: 0 for hh in header} for hh in header: if not hh in exclude_cols: hres = hh + suffixes[seen_dict[hh]] seen_dict[hh] += 1 res.append(hres) else: res.append(hh) else: res = header return res if __name__ == '__main__': data_all = {} for pp in ['qb', 'wr', 'rb', 'te']: data_all[pp] = position_html_local(pp) data_all[pp].to_pickle('%s.pkl' % pp)
yikelu/nfl_fantasy_data
htmls2csvs.py
Python
gpl-2.0
2,265
import _plotly_utils.basevalidators class LenmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator): def __init__( self, plotly_name="lenmode", parent_name="barpolar.marker.colorbar", **kwargs ): super(LenmodeValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "colorbars"), role=kwargs.pop("role", "info"), values=kwargs.pop("values", ["fraction", "pixels"]), **kwargs )
plotly/python-api
packages/python/plotly/plotly/validators/barpolar/marker/colorbar/_lenmode.py
Python
mit
546
"""WSGI Middleware components NERC Data Grid Project""" __author__ = "P J Kershaw" __date__ = "27/05/08" __copyright__ = "(C) 2009 Science and Technology Facilities Council" __license__ = "BSD - see LICENSE file in top-level directory" __contact__ = "Philip.Kershaw@stfc.ac.uk" __revision__ = '$Id$' import logging log = logging.getLogger(__name__) import httplib import re # for NDGSecurityPathFilter class NDGSecurityMiddlewareError(Exception): '''Base exception class for NDG Security middleware''' class NDGSecurityMiddlewareConfigError(NDGSecurityMiddlewareError): '''NDG Security Middleware Configuration error''' class NDGSecurityMiddlewareBase(object): """Base class for NDG Security Middleware classes @cvar USERNAME_ENVIRON_KEYNAME: environ key name for user id as used by AuthKit @type USERNAME_ENVIRON_KEYNAME: string """ USERNAME_ENVIRON_KEYNAME = 'REMOTE_USER' USERDATA_ENVIRON_KEYNAME = 'REMOTE_USER_DATA' USERNAME_SESSION_KEYNAME = 'username' propertyDefaults = { 'mountPath': '/', } __slots__ = ('_app', '_environ', '_start_response', '_pathInfo', '_path', '_mountPath') def __init__(self, app, app_conf, prefix='', **local_conf): ''' @type app: callable following WSGI interface @param app: next middleware application in the chain @type app_conf: dict @param app_conf: PasteDeploy global configuration dictionary @type prefix: basestring @param prefix: prefix for app_conf parameters e.g. 'ndgsecurity.' - enables other global configuration parameters to be filtered out @type local_conf: dict @param local_conf: PasteDeploy application specific configuration dictionary ''' self._app = app self._environ = {} self._start_response = None self._pathInfo = None self._path = None self._mountPath = '/' # Convenient utility for tracing start_response call self.debug_start_response = False opt = self.__class__.propertyDefaults.copy() # If no prefix is set, there is no way to distinguish options set for # this app and those applying to other applications if app_conf is not None and prefix: # Update from application config dictionary - filter using prefix self.__class__._filterOpts(opt, app_conf, prefix=prefix) # Similarly, filter keyword input self.__class__._filterOpts(opt, local_conf, prefix=prefix) # Set options as object attributes for name, val in opt.items(): if not name.startswith('_'): setattr(self, name, val) def _initCall(self, environ, start_response): """Call from derived class' __call__() to set environ and path attributes @type environ: dict @param environ: WSGI environment variables dictionary @type start_response: function @param start_response: standard WSGI start response function """ self.environ = environ if self.debug_start_response: def start_response_(status, response_headers, exc_info=None): log.debug('Calling start response with ' 'environ["PATH_INFO"]=%r, ' 'status=%r, headers=%r, exc_info=%r', environ['PATH_INFO'], status, response_headers, exc_info) return start_response(status, response_headers, exc_info=exc_info) self.start_response = start_response_ else: self.start_response = start_response self.setPathInfo() self.setPath() @staticmethod def initCall(__call__): '''Decorator to __call__ to enable convenient attribute initialisation ''' def __call__wrapper(self, environ, start_response): self._initCall(environ, start_response) return __call__(self, environ, start_response) return __call__wrapper def __call__(self, environ, start_response): """ @type environ: dict @param environ: WSGI environment variables dictionary @type start_response: function @param start_response: standard WSGI start response function @rtype: iterable @return: response """ self._initCall(environ, start_response) return self._setResponse(environ, start_response) def _setResponse(self, environ=None, start_response=None, notFoundMsg=None, notFoundMsgContentType=None): """Convenience method to wrap call to next WSGI app in stack or set an error if none is set @type environ: dict @param environ: WSGI environment variables dictionary defaults to environ object attribute. For the latter to be available, the initCall decorator method must have been invoked. @type start_response: function @param start_response: standard WSGI start response function defaults to start_response object attribute. For the latter to be available, the initCall decorator method must have been invoked. """ if environ is None: environ = self.environ if start_response is None: start_response = self.start_response if self._app: return self._app(environ, start_response) else: return self._setErrorResponse(start_response=start_response, msg=notFoundMsg, code=404, contentType=notFoundMsgContentType) def _setErrorResponse(self, start_response=None, msg=None, code=500, contentType=None): '''Convenience method to set a simple error response @type start_response: function @param start_response: standard WSGI callable to set the HTTP header defaults to start_response object attribute. For the latter to be available, the initCall decorator method must have been invoked. @type msg: basestring @param msg: optional error message @type code: int @param code: standard HTTP error response code @type contentType: basestring @param contentType: set 'Content-type' HTTP header field - defaults to 'text/plain' ''' if start_response is None: start_response = self.start_response status = '%d %s' % (code, httplib.responses[code]) if msg is None: response = status else: response = msg if contentType is None: contentType = 'text/plain' start_response(status, [('Content-type', contentType), ('Content-Length', str(len(response)))]) return [response] @staticmethod def getStatusMessage(statusCode): '''Make a standard status message for use with start_response @type statusCode: int @param statusCode: HTTP status code @rtype: str @return: status code with standard message @raise KeyError: for invalid status code ''' return '%d %s' % (statusCode, httplib.responses[statusCode]) # Utility functions to support Paste Deploy application and filter function # signatures @classmethod def filter_app_factory(cls, app, app_conf, **local_conf): '''Function signature for Paste Deploy filter @type app: callable following WSGI interface @param app: next middleware application in the chain @type app_conf: dict @param app_conf: PasteDeploy global configuration dictionary @type prefix: basestring @param prefix: prefix for app_conf parameters e.g. 'ndgsecurity.' - enables other global configuration parameters to be filtered out @type local_conf: dict @param local_conf: PasteDeploy application specific configuration dictionary ''' return cls(app, app_conf, **local_conf) @classmethod def app_factory(cls, app_conf, **local_conf): '''Function Signature for Paste Deploy app''' return cls(None, app_conf, **local_conf) @classmethod def _filterOpts(cls, opt, newOpt, prefix='', propertyDefaults=None): '''Convenience utility to filter input options set in __init__ via app_conf or keywords @type opt: dict @param opt: existing options set. These will be updated by this method based on the content of newOpt @type newOpt: dict @param newOpt: new options to update opt with @type prefix: basestring @param prefix: if set, remove the given prefix from the input options @type propertyDefaults: iterable/None @param propertyDefaults: property names restricted to this dictionary of names. If None, default to propertyDefaults class variable setting @raise KeyError: if an option is set that is not in the classes defOpt class variable ''' if propertyDefaults is None: propertyDefaults = cls.propertyDefaults badOpt = [] for k,v in newOpt.items(): if prefix and k.startswith(prefix): subK = k.replace(prefix, '') filtK = '_'.join(subK.split('.')) else: # Ignore items that are not prefixed continue if propertyDefaults is not None and filtK not in propertyDefaults: badOpt += [k] else: opt[filtK] = v if len(badOpt) > 0: raise TypeError("Invalid input option(s) set: %s" % (", ".join(badOpt))) def setMountPath(self, mountPath=None, environ=None): if mountPath: self._mountPath = mountPath else: if environ is None: environ = self._environ self._mountPath = environ.get('SCRIPT_URL') if self._mountPath is None: raise AttributeError("SCRIPT_URL key not set in environ: " "'mountPath' is set to None") # Comment out as it breaks standard for URL trailing slash # if self._mountPath != '/': # self._mountPath = self._mountPath.rstrip('/') def _getMountPath(self): return self._mountPath mountPath = property(fget=_getMountPath, fset=setMountPath, doc="URL path as assigned to SCRIPT_URL environ key") def setPathInfo(self, pathInfo=None, environ=None): if pathInfo: self._pathInfo = pathInfo else: if environ is None: environ = self._environ self._pathInfo = environ['PATH_INFO'] if self._pathInfo != '/': self._pathInfo = self._pathInfo.rstrip('/') def _getPathInfo(self): return self._pathInfo pathInfo = property(fget=_getPathInfo, fset=setPathInfo, doc="URL path as assigned to PATH_INFO environ key") def setPath(self, path=None): if path: self._path = path else: self._path = self.mountPath.rstrip('/') + self._pathInfo if self._path != '/': self._path = self._path.rstrip('/') def _getPath(self): return self._path path = property(fget=_getPath, fset=setPath, doc="Full URL path minus domain name - equivalent to " "self.mountPath PATH_INFO environ setting") def _setEnviron(self, environ): self._environ = environ def _getEnviron(self): return self._environ environ = property(fget=_getEnviron, fset=_setEnviron, doc="Reference to WSGI environ dict") def _setStart_response(self, start_response): self._start_response = start_response def _getStart_response(self): return self._start_response start_response = property(fget=_getStart_response, fset=_setStart_response, doc="Reference to WSGI start_response function") def redirect(self, url, start_response=None): """Do a HTTP 302 redirect @type start_response: callable following WSGI start_response convention @param start_response: WSGI start response callable @type url: basestring @param url: URL to redirect to @rtype: list @return: empty HTML body """ if start_response is None: # self.start_response will be None if initCall decorator wasn't # applied to __call__ if self.start_response is None: raise NDGSecurityMiddlewareConfigError("No start_response " "function set.") start_response = self.start_response start_response(NDGSecurityMiddlewareBase.getStatusMessage(302), [('Content-type', 'text/html'), ('Content-length', '0'), ('Location', url)]) return [] @staticmethod def parseListItem(item): """Utility method for parsing a space separate list of items in a string. Items may be quoted. This method is useful for parsing items assigned to a parameter in a config file e.g. fileList: "a.txt" b.csv 'My File' @type item: basestring @param item: list of space separated items in a string. These may be quoted """ return [i.strip("\"'") for i in item.split()] class NDGSecurityPathFilter(NDGSecurityMiddlewareBase): """Specialisation of NDG Security Middleware to enable filtering based on PATH_INFO """ propertyDefaults = { 'errorResponseCode': 401, 'serverName': None, 'pathMatchList': ['/'] } propertyDefaults.update(NDGSecurityMiddlewareBase.propertyDefaults) CSV_PAT = re.compile(',\s*') # TODO: refactor to: # * enable reading of path list from a database or some other # configuration source. # * enable some kind of pattern matching for paths _pathMatch = lambda self: self._pathInfo in self.pathMatchList pathMatch = property(fget=_pathMatch, doc="Check for input path match to list of paths" "to which this middleware is to be applied") def __init__(self, *arg, **kw): '''See NDGSecurityMiddlewareBase for explanation of args @type arg: tuple @param arg: single element contains next middleware application in the chain and app_conf dict @type kw: dict @param kw: prefix for app_conf parameters and local_conf dict ''' super(NDGSecurityPathFilter, self).__init__(*arg, **kw) def _getPathMatchList(self): return self.__pathMatchList def _setPathMatchList(self, pathList): ''' @type pathList: list or tuple @param pathList: list of URL paths to apply this middleware to. Paths are relative to the point at which this middleware is mounted as set in environ['PATH_INFO'] ''' if isinstance(pathList, basestring): # Try parsing a space separated list of file paths self.__pathMatchList=NDGSecurityPathFilter.CSV_PAT.split(pathList) elif not isinstance(pathList, (list, tuple)): raise TypeError('Expecting a list or tuple for "pathMatchList"') else: self.__pathMatchList = list(pathList) pathMatchList = property(fget=_getPathMatchList, fset=_setPathMatchList, doc='List of URL paths to which to apply SSL ' 'client authentication') def _getErrorResponseCode(self): """Error response code getter @rtype: int @return: HTTP error code set by this middleware on client cert. verification error """ return self._errorResponseCode def _setErrorResponseCode(self, code): """Error response code setter @type code: int or basestring @param code: error response code set if client cert. verification fails""" if isinstance(code, int): self._errorResponseCode = code elif isinstance(code, basestring): self._errorResponseCode = int(code) else: raise TypeError('Expecting int or string type for ' '"errorResponseCode" attribute') if self._errorResponseCode not in httplib.responses: raise ValueError("Error response code [%d] is not recognised " "standard HTTP response code" % self._errorResponseCode) errorResponseCode = property(fget=_getErrorResponseCode, fset=_setErrorResponseCode, doc="Response code raised if client " "certificate verification fails")
philipkershaw/ndg_security_server
ndg/security/server/wsgi/__init__.py
Python
bsd-3-clause
18,251
# =============================================================================== # Copyright 2012 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= from __future__ import absolute_import import math import numpy as np from traits.api import HasTraits, Str, Float, on_trait_change, Instance, Enum, String from traitsui.api import View, Item, HGroup, VGroup from pychron.core.ui.enum_editor import myEnumEditor from pychron.graph.graph import Graph from pychron.processing.arar_constants import ArArConstants # from chaco.axis import PlotAxis # from chaco.plot_factory import create_line_plot, add_default_axes # from pychron.graph.guide_overlay import GuideOverlay powerlaw = lambda pp, x: pp[0] * x ** pp[1] class Index(HasTraits): name = Str start = Float(0) end = Float(0.25) r4039 = Float(8) def calculate(self, age, sensitivity, k2o): c = ArArConstants() xs = np.linspace(self.start, self.end) ys = np.array([self._calculate(wi, age, sensitivity, k2o, c) for wi in xs]) # nxs = np.linspace(max(1e-2, 0), self.end) # n40 = np.linspace(max(1, ys[0]), ys[-1]) n40 = ys[:] if ys[0] == 0: n40 = n40[1:] # r4039 = 7.78 n39 = n40 / self.r4039 # nys = ys[:] # print nys # nys = np.hstack(([1], nys[1:])) # print nys p = (0.0021435788651550671, -0.48505328994128016) e40_scalar = 3 e39 = powerlaw(p, n39) e40 = powerlaw(p, n40) * e40_scalar es = (e39 ** 2 + e40 ** 2) ** 0.5 # es = age * 1e3 * es # es = 0.2 * nys ** (-0.5) return xs, ys, n40, es * 100 def _calculate(self, w, age, sensitivity, k2o, c): moles_40k = ( w / 1000.0 * k2o / 100.0 * 1 / c.mK * (2 * c.mK) / (2 * c.mK + c.mO) * c.abundance_40K ) moles_40Ar = ( moles_40k * (math.exp(c.lambda_k.nominal_value * age * 1e6) - 1) * (c.lambda_e_v / c.lambda_k.nominal_value) ) return moles_40Ar / sensitivity class WeightIndex(Index): name = "Weight" def traits_view(self): v = View( Item("start", label="Weight Start (mg)"), # spring, Item("end", label="Weight End (mg)"), ) return v class VolumeIndex(Index): name = "Volume" depth = Float(0.1) # mm rho = 2580 # kg/m^3 shape = Enum("circle", "square") def traits_view(self): v = View( Item("start", label="Dimension Start (mm)"), Item("end", label="Dimension End (mm)"), HGroup(Item("shape"), Item("depth", label="Depth (mm)")), ) return v def calculate(self, age, sensitivity, k2o): c = ArArConstants() xs = np.linspace(self.start, self.end) def to_weight(d, depth, rho): """ d== mm depth==mm rho==kg/m^3 """ # convert dimension to meters d = d / 1000.0 depth = depth / 1000.0 if self.shape == "circle": v = math.pi * (d / 2.0) ** 2 * depth else: v = d ** 2 * depth m = rho * v # convert mass to mg 1e6 mg in 1 kg return m * 1e6 # convert dim to weight ws = [to_weight(di, self.depth, self.rho) for di in xs] ys = [self._calculate(wi, age, sensitivity, k2o, c) for wi in ws] return xs, ys, xs, ws def _shape_default(self): return "circle" class IndexSelector(HasTraits): name = String("Weight") names = ["Volume", "Weight"] def traits_view(self): v = View(Item("name", editor=myEnumEditor(name="names"))) return v class SignalCalculator(HasTraits): age = Float(28.2) k2o = Float # percent sensitivity = Float(5e-17) # moles/fA r4039 = Float(8) graph = Instance(Graph) # weight_index = Instance(WeightIndex, ()) # volume_index = Instance(VolumeIndex, ()) # kind = Enum('weight', 'volume') x = Instance(IndexSelector, ()) y = Instance(IndexSelector) # def _kind_changed(self): # if self.kind == 'weight': # self.secondary_plot.visible = False # else: # self.secondary_plot.visible = True # # self._calculate() index = Instance(Index) def _r4039_changed(self): self.index.r4039 = self.r4039 @on_trait_change("x:name") def _update_index_kind(self): if self.x.name == "Weight": self.index = WeightIndex() else: self.index = VolumeIndex() @on_trait_change("index:+, index:+, sensitivity, k2o, age") def _calculate(self): """ calculate signal size for n mg of sample with m k2o of age p """ if self.x.name == "weight": # attr = self.weight_index self.graph.set_x_title("weight (mg)") else: self.graph.set_x_title("dimension (mm)") # attr = self.volume_index xs, ys, xx, yy = self.index.calculate(self.age, self.sensitivity, self.k2o) self.graph.set_data(xs) self.graph.set_data(ys, axis=1) self.graph.set_data(xx, plotid=1) self.graph.set_data(yy, plotid=1, axis=1) # self.secondary_plot.index.set_data(xx) # self.secondary_plot.value.set_data(yy) self.graph.redraw() def traits_view(self): cntrl_grp = VGroup( Item("age", label="Age (Ma)"), HGroup( Item("k2o", label="K2O %"), Item("r4039", label="(Ar40*/Ar39K)std"), # spring, Item("sensitivity", label="Sensitivity (mol/fA)"), ), Item("x", style="custom", show_label=False), Item("index", style="custom", show_label=False), # Item('kind'), # Item('volume_index', show_label=False, style='custom', # visible_when='kind=="volume"'), # Item('weight_index', show_label=False, style='custom', # visible_when='kind=="weight"'), ) graph_grp = VGroup( Item("graph", width=800, height=500, show_label=False, style="custom"), ) v = View( VGroup(cntrl_grp, graph_grp), resizable=True, title="Signal Calculator" ) return v def _graph_default(self): g = Graph(container_dict=dict(padding=5, kind="h")) g.new_plot( xtitle="weight (mg)", ytitle="40Ar* (fA)", padding=[60, 20, 60, 60] # padding=60 ) g.new_series() g.new_plot( xtitle="40Ar* (fA)", ytitle="%Error in Age", padding=[30, 30, 60, 60] ) g.new_series() # fp = create_line_plot(([], []), color='red') # left, bottom = add_default_axes(fp) # bottom.visible = False # left.orientation = 'right' # left.axis_line_visible = False # bottom.axis_line_visible = False # left.visible = False # if self.kind == 'weight': # bottom.visible = True # bottom.orientation = 'top' # bottom.title = 'Error (ka)' # bottom.tick_color = 'red' # bottom.tick_label_color = 'red' # bottom.line_color = 'red' # bottom.title_color = 'red' # else: # left.title = 'Weight (mg)' # fp.visible = False # gd = GuideOverlay(fp, value=0.01, orientation='v') # fp.overlays.append(gd) # g.plots[0].add(fp) # self.secondary_plot = fp return g def _index_default(self): return WeightIndex(r4039=self.r4039) # def _kind_default(self): # return 'weight' if __name__ == "__main__": sc = SignalCalculator() sc.configure_traits() # ============= EOF =============================================
USGSDenverPychron/pychron
pychron/experiment/signal_calculator.py
Python
apache-2.0
9,205
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from copy import deepcopy from typing import Any, Awaitable, Optional, TYPE_CHECKING from azure.core.rest import AsyncHttpResponse, HttpRequest from azure.mgmt.core import AsyncARMPipelineClient from msrest import Deserializer, Serializer from .. import models from ._configuration import WebSiteManagementClientConfiguration from .operations import AppServiceCertificateOrdersOperations, AppServiceEnvironmentsOperations, AppServicePlansOperations, CertificateOrdersDiagnosticsOperations, CertificateRegistrationProviderOperations, CertificatesOperations, DeletedWebAppsOperations, DiagnosticsOperations, DomainRegistrationProviderOperations, DomainsOperations, GlobalOperations, KubeEnvironmentsOperations, ProviderOperations, RecommendationsOperations, ResourceHealthMetadataOperations, StaticSitesOperations, TopLevelDomainsOperations, WebAppsOperations, WebSiteManagementClientOperationsMixin if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential class WebSiteManagementClient(WebSiteManagementClientOperationsMixin): """WebSite Management Client. :ivar app_service_certificate_orders: AppServiceCertificateOrdersOperations operations :vartype app_service_certificate_orders: azure.mgmt.web.v2021_01_01.aio.operations.AppServiceCertificateOrdersOperations :ivar certificate_orders_diagnostics: CertificateOrdersDiagnosticsOperations operations :vartype certificate_orders_diagnostics: azure.mgmt.web.v2021_01_01.aio.operations.CertificateOrdersDiagnosticsOperations :ivar certificate_registration_provider: CertificateRegistrationProviderOperations operations :vartype certificate_registration_provider: azure.mgmt.web.v2021_01_01.aio.operations.CertificateRegistrationProviderOperations :ivar domains: DomainsOperations operations :vartype domains: azure.mgmt.web.v2021_01_01.aio.operations.DomainsOperations :ivar top_level_domains: TopLevelDomainsOperations operations :vartype top_level_domains: azure.mgmt.web.v2021_01_01.aio.operations.TopLevelDomainsOperations :ivar domain_registration_provider: DomainRegistrationProviderOperations operations :vartype domain_registration_provider: azure.mgmt.web.v2021_01_01.aio.operations.DomainRegistrationProviderOperations :ivar app_service_environments: AppServiceEnvironmentsOperations operations :vartype app_service_environments: azure.mgmt.web.v2021_01_01.aio.operations.AppServiceEnvironmentsOperations :ivar app_service_plans: AppServicePlansOperations operations :vartype app_service_plans: azure.mgmt.web.v2021_01_01.aio.operations.AppServicePlansOperations :ivar certificates: CertificatesOperations operations :vartype certificates: azure.mgmt.web.v2021_01_01.aio.operations.CertificatesOperations :ivar deleted_web_apps: DeletedWebAppsOperations operations :vartype deleted_web_apps: azure.mgmt.web.v2021_01_01.aio.operations.DeletedWebAppsOperations :ivar diagnostics: DiagnosticsOperations operations :vartype diagnostics: azure.mgmt.web.v2021_01_01.aio.operations.DiagnosticsOperations :ivar global_operations: GlobalOperations operations :vartype global_operations: azure.mgmt.web.v2021_01_01.aio.operations.GlobalOperations :ivar provider: ProviderOperations operations :vartype provider: azure.mgmt.web.v2021_01_01.aio.operations.ProviderOperations :ivar recommendations: RecommendationsOperations operations :vartype recommendations: azure.mgmt.web.v2021_01_01.aio.operations.RecommendationsOperations :ivar resource_health_metadata: ResourceHealthMetadataOperations operations :vartype resource_health_metadata: azure.mgmt.web.v2021_01_01.aio.operations.ResourceHealthMetadataOperations :ivar static_sites: StaticSitesOperations operations :vartype static_sites: azure.mgmt.web.v2021_01_01.aio.operations.StaticSitesOperations :ivar web_apps: WebAppsOperations operations :vartype web_apps: azure.mgmt.web.v2021_01_01.aio.operations.WebAppsOperations :ivar kube_environments: KubeEnvironmentsOperations operations :vartype kube_environments: azure.mgmt.web.v2021_01_01.aio.operations.KubeEnvironmentsOperations :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: Your Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000). :type subscription_id: str :param base_url: Service URL. Default value is 'https://management.azure.com'. :type base_url: str :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. """ def __init__( self, credential: "AsyncTokenCredential", subscription_id: str, base_url: str = "https://management.azure.com", **kwargs: Any ) -> None: self._config = WebSiteManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs) self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) self._serialize.client_side_validation = False self.app_service_certificate_orders = AppServiceCertificateOrdersOperations(self._client, self._config, self._serialize, self._deserialize) self.certificate_orders_diagnostics = CertificateOrdersDiagnosticsOperations(self._client, self._config, self._serialize, self._deserialize) self.certificate_registration_provider = CertificateRegistrationProviderOperations(self._client, self._config, self._serialize, self._deserialize) self.domains = DomainsOperations(self._client, self._config, self._serialize, self._deserialize) self.top_level_domains = TopLevelDomainsOperations(self._client, self._config, self._serialize, self._deserialize) self.domain_registration_provider = DomainRegistrationProviderOperations(self._client, self._config, self._serialize, self._deserialize) self.app_service_environments = AppServiceEnvironmentsOperations(self._client, self._config, self._serialize, self._deserialize) self.app_service_plans = AppServicePlansOperations(self._client, self._config, self._serialize, self._deserialize) self.certificates = CertificatesOperations(self._client, self._config, self._serialize, self._deserialize) self.deleted_web_apps = DeletedWebAppsOperations(self._client, self._config, self._serialize, self._deserialize) self.diagnostics = DiagnosticsOperations(self._client, self._config, self._serialize, self._deserialize) self.global_operations = GlobalOperations(self._client, self._config, self._serialize, self._deserialize) self.provider = ProviderOperations(self._client, self._config, self._serialize, self._deserialize) self.recommendations = RecommendationsOperations(self._client, self._config, self._serialize, self._deserialize) self.resource_health_metadata = ResourceHealthMetadataOperations(self._client, self._config, self._serialize, self._deserialize) self.static_sites = StaticSitesOperations(self._client, self._config, self._serialize, self._deserialize) self.web_apps = WebAppsOperations(self._client, self._config, self._serialize, self._deserialize) self.kube_environments = KubeEnvironmentsOperations(self._client, self._config, self._serialize, self._deserialize) def _send_request( self, request: HttpRequest, **kwargs: Any ) -> Awaitable[AsyncHttpResponse]: """Runs the network request through the client's chained policies. >>> from azure.core.rest import HttpRequest >>> request = HttpRequest("GET", "https://www.example.org/") <HttpRequest [GET], url: 'https://www.example.org/'> >>> response = await client._send_request(request) <AsyncHttpResponse: 200 OK> For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart :param request: The network request you want to make. Required. :type request: ~azure.core.rest.HttpRequest :keyword bool stream: Whether the response payload will be streamed. Defaults to False. :return: The response of your network call. Does not do error handling on your response. :rtype: ~azure.core.rest.AsyncHttpResponse """ request_copy = deepcopy(request) request_copy.url = self._client.format_url(request_copy.url) return self._client.send_request(request_copy, **kwargs) async def close(self) -> None: await self._client.close() async def __aenter__(self) -> "WebSiteManagementClient": await self._client.__aenter__() return self async def __aexit__(self, *exc_details) -> None: await self._client.__aexit__(*exc_details)
Azure/azure-sdk-for-python
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2021_01_01/aio/_web_site_management_client.py
Python
mit
9,664
x = {"hi": "there", "yo": "I'm a dawg"} print x.items() print x.keys() print x.values()
ArcherSys/ArcherSys
skulpt/test/run/t263.py
Python
mit
88
#!/usr/bin/env python # encoding:utf8 """ Created on 2016年1月9日 @author: zengchunyun """ class HAproxy(object): def __init__(self, fb, ): import re self.re_match = re.compile('\w+.*$') self.re_match_line = re.compile('^\w+.*$') # 针对行查找非空白特殊字符 self.file = fb # 传入一个文件 self.file_bak = '%s.bak' % self.file # 备份的文件名 self.file_tmp = None # 临时存储文件名 self.args = None self.file_list = [] # 将文件读出后存入的列表 self.main_list = [] # 将文件第一列的非空行开头内容写入这个列表 self.add_list = [] self.ref_list = [] # 用于参照排序的列表 self.ref_list_bak = [] # 临时变量存储传入列表 self.new_dict = {} # 存储更新后的文件字典数据 self.new_dict_bak = {} # 存储临时写入文件的字典数据 self.flag = False # 设置标志位,用于辅助判断条件用 def backup_conf(self): # 备份配置文件 self.flag = True self.ref_list = self.main_conf() self.new_dict = self.format_dict() self.file_tmp = self.file self.file = self.file_bak self.format_write(self.file_bak, self.ref_list, **self.new_dict) self.ref_list = list(self.ref_list_bak) self.new_dict = self.new_dict_bak self.file = self.file_tmp self.format_write(*self.ref_list, **self.new_dict) return True def format_write(self, *ref_list, **new_dict): if not self.flag: self.ref_list_bak = ref_list self.new_dict_bak = new_dict self.backup_conf() if len(ref_list) > 1: if type(ref_list[1]) == list: self.ref_list = ref_list[1] # self.ref_list = list(ref_list) # self.new_dict = new_dict temp_keys = list(self.new_dict.keys()) deff_keys = set(self.ref_list).symmetric_difference(temp_keys) with open(self.file, 'w') as wr: wr.seek(0) if deff_keys: # 对比是否新增键,如果新增,则对键进行排序插入 for value in self.ref_list: first_half_key = str(list(deff_keys)[0]).split()[0] if first_half_key in value: self.ref_list.insert(self.ref_list.index(value), list(deff_keys)[0]) break for key in self.ref_list: # 参照列表的元素顺序进行读取字典内容 if self.new_dict.get(key): # 如果字典存在这个键才进行操作 wr.write('%s\n' % key) # 先写入文件的第一列内容,在对第一列下面的内容进行判断格式化 if type(self.new_dict[key]) == list: # 首先获取的值必须是列表形式才行 if self.new_dict[key].count(''): # 这里是针对有空行的文件进行排序处理 for index in range(self.new_dict[key].count('')): # 把有空行的移到后面 self.new_dict[key].remove('') self.new_dict[key].append('') for value in self.new_dict[key]: # 开始写入字典值到文件 wr.write(str('\t%s\n' % ''.join(value)).expandtabs(8)) # 以8个空格为一个TAB键宽度写入 def main_conf(self): # 以列表形式返回配置的第一列内容 self.main_list = [] # 新建一个空列表 with open(self.file, 'r') as get_main_conf: get_list = get_main_conf.readlines() get_length = len(get_list) for index in range(get_length): result = self.re_match.match(get_list[index]) # 只匹配非空格,空行开头的内容 if result: self.main_list.append(result.group()) # 将取到的内容追加到列表 return self.main_list # 返回一个列表 def format_dict(self, *args): # 可以选择性的将所要获取的字段传入,如果不传入参数,将打印整个文本成字典返回 if args: if type(args[0]) == list: # 如果第一个参数为列表,则self.args 等于args[0],即第一个参数 self.args = args[0] elif len(args) > 0: self.args = list(args) # 如果该属性有参数,self.args就会被转换成列表 self.main_list = self.args elif self.args: if type(self.args) != list: self.args = list(self.args) # 如果self.args有值,最终也会变成列表 self.main_list = self.args else: self.main_list = self.main_conf() # 如果既没有传参数,也没有给self.args赋值,那么就会使用默认列表 from collections import defaultdict get_dict = defaultdict(list) # 新建字典,默认字典键值为list with open(self.file, 'r+') as format_content: get_list = format_content.readlines() # 将文本转换为列表 get_length = len(get_list) # 获取文本行数 for key in self.main_list: for index in range(get_length): result = self.re_match_line.match(get_list[index]) # 只匹配非空行,空格开头内容 if result: if result.group() == key: self.flag = True get_list[index] = '' if self.flag and get_list[index]: if result and result.group() != key: # 只匹配行与得到的列表元素相同的内容 self.flag = False break else: get_dict[key].append(get_list[index].strip()) # 将以空行,空格内容追加到键对应值的列表 return get_dict # 返回一个字典
zengchunyun/s12
day3/haproxy/HAproxy/HAproxy.py
Python
gpl-2.0
6,032
from chapman.task import Barrier from chapman import model as M from .test_base import TaskTest class TestBarrier(TaskTest): def test_ignore_sub_results(self): t = Barrier.n( self.doubler.n(), self.doubler.n()) t.start(2) self._handle_messages() t.refresh() self.assertEqual(M.Message.m.find().count(), 0) self.assertEqual(M.TaskState.m.find().count(), 1) self.assertEqual(t.result.get(), None)
synappio/chapman
chapman/tests/test_barrier.py
Python
mit
483
#!/usr/bin/env python # -*- coding: utf-8 -*- from math import sqrt, exp import random import scipy.stats class Estimator(object): def __init__(self): self._n = 0 self._M = 0 self._S = 0 def __str__(self): return '\t{:.4f}\t{:.4f}'.format(self._mean, self.confidence()) def _observation(self, x): delta = x - self._M self._M += delta / float(self.n + 1) self._S += delta * (x - self._M) self._n += 1 @property def n(self): return self._n @property def mean(self): return self._M @property def variance(self): return self._S / float(self._n - 1) def confidence(self, level=0.95): t = scipy.stats.t.ppf((1 + level) / 2.0, self._n - 1) return t * sqrt(self.variance / self._n) class MonteCarlo(Estimator): def observation(self, graph, budget): """Sample sample_budget vertices uniformly at random and return maximum degree""" max_degree = max(graph.degree()) estimate = max(random.choice(graph.vs).degree() for _ in range(budget)) relative_error = float(max_degree - estimate) / float(max_degree) self._observation(relative_error) class Greedy(Estimator): def observation(self, graph, budget): """Greedy walk maximum degree estimator. Select a random vertex, look at the degrees of all adjacent vertices, select (if possible) a new neighbor that has maximum degree among all neighbors, move to that vertex, and repeat at most n times. """ max_degree = max(graph.degree()) estimate = 0 v = random.choice(graph.vs) for i in range(budget): estimate = max(estimate, v.degree()) try: v_prime = max(v.neighbors(), key=lambda v: v.degree()) except ValueError: break if v_prime.degree() < v.degree(): break v = v_prime relative_error = float(max_degree - estimate) / float(max_degree) self._observation(relative_error) class GreedyRestart(Estimator): def observation(self, graph, budget): """Greedy walk maximum degree estimator. Select a random vertex, look at the degrees of all adjacent vertices, select (if possible) a new neighbor that has maximum degree among all neighbors, move to that vertex, and repeat at most n times. """ max_degree = max(graph.degree()) estimate = 0 v = random.choice(graph.vs) for i in range(budget): estimate = max(estimate, v.degree()) try: v_prime = max(v.neighbors(), key=lambda v: v.degree()) except ValueError: v_prime = random.choice(graph.vs) if v_prime.degree() < v.degree(): v_prime = random.choice(graph.vs) v = v_prime relative_error = float(max_degree - estimate) / float(max_degree) self._observation(relative_error) class Annealing(Estimator): def observation(self, graph, budget): max_degree = max(graph.degree()) estimate = 0 cool = 1e-5 ** (1. / budget) temperature = 10000 v = random.choice(graph.vs) while temperature > 0.1: temperature *= cool d = v.degree() estimate = max(estimate, d) # select next vertex try: v_prime = random.choice(v.neighbors()) except IndexError: v_prime = random.choice(graph.vs) d_prime = v_prime.degree() p = exp(min(0, d_prime - d) / temperature) uniform_rv = random.random() if d_prime >= d or uniform_rv <= p: v = v_prime relative_error = float(max_degree - estimate) / float(max_degree) self._observation(relative_error)
bradfordboyle/python-projects
graph-sampling/Estimator.py
Python
mit
3,929
class ListResource(object): def __init__(self, version): """ :param Version version: """ self._version = version """ :type: Version """
tysonholub/twilio-python
twilio/base/list_resource.py
Python
mit
180
#!/usr/bin/env python # finds prime numbers in the interval [2, 9] for n in range(2, 10): for x in range(2, n): if n % x == 0: print n, 'equals', x, '*', n/x break # the else clause belongs to the for loop, not to the if statement # the else clause is terminated through the exhaustion of the range the for-loop iterates over. else: # loop fell through without finding a factor print n, 'is a prime number'
mileiio/pywebpr
breakcontinue.py
Python
apache-2.0
475
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradients for operators defined in array_ops.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.compiler.tf2xla.ops import gen_xla_ops from tensorflow.python import pywrap_tfe from tensorflow.python.client import pywrap_tf_session from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_util from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import sparse_ops @ops.RegisterGradient("Pack") def _PackGrad(op, grad): """Gradient for pack op.""" return array_ops.unstack(grad, num=op.get_attr("N"), axis=op.get_attr("axis")) @ops.RegisterGradient("Unpack") def _UnpackGrad(op, *grads): """Gradient for unpack op.""" return array_ops.stack(grads, axis=op.get_attr("axis")) def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index): """Gradient for concat op. Args: op: An operation. grad: `Tensor` or `IndexedSlices` representing the gradients with respect to each output of the op. start_value_index: An integer index of the first value in the op.inputs. end_value_index: An integer index of the last value in the op.inputs. dim_index: An integer index of concat_dim or axis parameter in op.inputs. Returns: Tensors representing the partial gradients with respect to each input of the op. Raises: ValueError: if concat_dim/axis is not statically known. """ def _CreateDenseMaskAndBegin(sizes, concat_dim): """Create variables for iteratively slicing a dense gradients tensor.""" # Since shape is 1-D, shape_of_shape = [rank-of-inputs] shape_of_shape = array_ops.shape(sizes[0]) # Make a vector of length equal to the input's dimensions, # with 0's everywhere and 1 in the concat dim position. # Note: Can't use sparse_to_dense since it isn't GPU-capable (for now) mask = array_ops.concat([ array_ops.fill(array_ops.expand_dims(concat_dim, 0), 0), [1], array_ops.fill(shape_of_shape - concat_dim - 1, 0) ], 0) begin = array_ops.fill(shape_of_shape, 0) return mask, begin def _ExtractInputShapes(inputs): """Extract the shapes of a set of input tensors.""" if context.executing_eagerly(): return array_ops.shape_n(inputs) sizes = [] fully_known = True for x in inputs: input_shape = array_ops.shape(x) if not isinstance(input_shape, ops.Tensor) or input_shape.op.type != "Const": fully_known = False break sizes.append(input_shape) if fully_known: return sizes else: return array_ops.shape_n(inputs) # Degenerate concatenation, just return grad. if len(op.inputs) == 2: return grad + [None] if end_value_index <= dim_index else [None] + grad concat_dim = op.inputs[dim_index] input_values = op.inputs[start_value_index:end_value_index] out_grads = [] if isinstance(grad, ops.Tensor): if context.executing_eagerly() or isinstance(concat_dim, ops.EagerTensor): # Using mod here for convenience since concat_dim is already verified # in concat implementation to be within the allowed [-rank, rank) range. non_neg_concat_dim = ( concat_dim._numpy().item(0) % input_values[0]._rank()) # pylint: disable=protected-access # All inputs are guaranteed to be EagerTensors in eager mode sizes = pywrap_tfe.TFE_Py_TensorShapeSlice(input_values, non_neg_concat_dim) out_grads = array_ops.split(grad, sizes, non_neg_concat_dim) else: if constant_op.is_constant(concat_dim): # If concat_dim is a constant defined in a different context, # then we duplicate it in the current context to avoid passing it # through an Enter node. # This is a small optimization in general, but it is required when # compiling with XLA, as XLA needs the concat input to be folded into a # constant. grad_context = control_flow_util.GetOutputContext(grad.op) dim_context = control_flow_util.GetOutputContext(concat_dim.op) if dim_context != grad_context: value = tensor_util.constant_value(concat_dim) concat_dim = constant_op.constant(value=value, dtype=concat_dim.dtype) # Using mod here for convenience since concat_dim is already verified # in concat implementation to be within the allowed [-rank, rank) range. non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0]) # Get the inputs' tensor shapes sizes = _ExtractInputShapes(input_values) # The magic number of 16 was found through benchmarking a range of sizes # on CPUs and a Maxwell TitanX. A speedup was seen in a large majority of # cases when switching implementations at N=16, but it is possible that # there will be a small number of performance regressions. if len(sizes) > 16: # extract the size of each input along the concat dimension sizes = array_ops.squeeze( array_ops.slice( array_ops.stack(sizes, axis=1), [non_neg_concat_dim, 0], [1, -1])) out_grads = array_ops.split(grad, sizes, non_neg_concat_dim) else: offset = gen_array_ops.concat_offset(non_neg_concat_dim, sizes) for (begin, size) in zip(offset, sizes): out_grads.append(array_ops.slice(grad, begin, size)) elif isinstance(grad, ops.IndexedSlices): # Using mod here for convenience since concat_dim is already verified # in concat implementation to be within the allowed [-rank, rank) range. non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0]) concat_dim_static = tensor_util.constant_value(concat_dim) if concat_dim_static is None: raise ValueError("Can only compute IndexedSlices gradient with " "statically-known concat_dim") if concat_dim_static < 0: rank = tensor_util.constant_value(array_ops.rank(input_values[0])) if rank is None: raise ValueError("Can only compute IndexedSlices gradient with " "negative concat_dim when first value rank is " "statically-known.") concat_dim_static %= rank # Get the inputs' tensor shapes sizes = [array_ops.shape(x) for x in input_values] if concat_dim_static > 0: # IndexedSlices, non_neg_concat_dim > 0. Each input gets IndexedSlices # gradients with all the indices, but with grad.values sliced accordingly. # This is like the Tensor case, except shape(grad.values)[0] is not equal # to shape(sizes[i])[0], since only a subset of the dim-0 values are # stored. mask, begin = _CreateDenseMaskAndBegin(sizes, non_neg_concat_dim) for size in sizes: new_values = array_ops.slice( grad.values, begin, array_ops.concat([[-1], array_ops.slice(size, [1], [-1])], 0)) out_grads.append(ops.IndexedSlices(new_values, grad.indices, size)) # Lint complains begin = begin + ... begin = math_ops.add(begin, size * mask) else: # IndexedSlices, concat_dim == 0. Each input gets IndexedSlices gradients # only for the relevant indices. start = constant_op.constant(0, dtype=grad.indices.dtype) for size in sizes: size_concat_dim = array_ops.gather(size, non_neg_concat_dim) if size_concat_dim.dtype != grad.indices.dtype: size_concat_dim = math_ops.cast( size_concat_dim, dtype=grad.indices.dtype) end = start + size_concat_dim # Compute the 1-D Tensor of indices relevant for this input. indices_to_select = array_ops.squeeze( array_ops.where( math_ops.logical_and(grad.indices >= start, grad.indices < end)), axis=[1]) new_indices = array_ops.gather(grad.indices, indices_to_select) - start new_values = array_ops.gather(grad.values, indices_to_select) out_grads.append(ops.IndexedSlices(new_values, new_indices, size)) start = end else: raise TypeError("Expected Tensor or IndexedSlices, got %s" % type(grad)) return (out_grads + [None] if end_value_index <= dim_index else [None] + out_grads) @ops.RegisterGradient("Concat") def _ConcatGrad(op, grad): return _ConcatGradHelper( op, grad, start_value_index=1, end_value_index=len(op.inputs), dim_index=0) @ops.RegisterGradient("ConcatV2") def _ConcatGradV2(op, grad): return _ConcatGradHelper( op, grad, start_value_index=0, end_value_index=-1, dim_index=-1) ops.NotDifferentiable("ConcatOffset") @ops.RegisterGradient("Slice") def _SliceGrad(op, grad): """Gradient for Slice op.""" # Create an Nx2 padding where the first column represents how many # zeros are to be prepended for each dimension, and the second # column indicates how many zeros are appended. # # The number of zeros to append is the shape of the input # elementwise-subtracted by both the begin vector and sizes vector. # # Some more reshaping is needed to assemble this tensor with the # right dimensions. input_vec = op.inputs[0] begin_vec = op.inputs[1] input_rank = array_ops.rank(input_vec) slice_size = array_ops.shape(op.outputs[0]) if control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()): return gen_xla_ops.xla_dynamic_update_slice(array_ops.zeros_like(input_vec), grad, begin_vec), None, None shape = array_ops.stack([input_rank, 1]) before_pad = array_ops.reshape(begin_vec, shape) after_pad = array_ops.reshape( array_ops.shape(input_vec) - slice_size - begin_vec, shape) paddings = array_ops.concat([before_pad, after_pad], 1) return array_ops.pad(grad, paddings), None, None @ops.RegisterGradient("StridedSlice") def _StridedSliceGrad(op, grad): """Gradient for StridedSlice op.""" begin = op.inputs[1] end = op.inputs[2] strides = op.inputs[3] # StridedSliceGrad requires `x`, `begin`, `end` and `strides` to be of the # same dtype so we build a shape of the same type as other args. # Note that the choice of `begin` for specifying `out_type` is arbitrary. # We could choose any of {begin|end|strides}.dtype since they are required to # be the same. x = array_ops.shape(op.inputs[0], out_type=begin.dtype) x_static = tensor_util.constant_value(x) x = x_static if x_static is not None else x begin_static = tensor_util.constant_value(begin) begin = begin_static if begin_static is not None else begin end_static = tensor_util.constant_value(end) end = end_static if end_static is not None else end strides_static = tensor_util.constant_value(strides) strides = strides_static if strides_static is not None else strides return array_ops.strided_slice_grad( x, begin, end, strides, grad, begin_mask=op.get_attr("begin_mask"), end_mask=op.get_attr("end_mask"), ellipsis_mask=op.get_attr("ellipsis_mask"), new_axis_mask=op.get_attr("new_axis_mask"), shrink_axis_mask=op.get_attr("shrink_axis_mask")), None, None, None @ops.RegisterGradient("StridedSliceGrad") def _StridedSliceGradGrad(op, grad): """Gradient for StridedSliceGrad op.""" begin = op.inputs[1] end = op.inputs[2] strides = op.inputs[3] return None, None, None, None, array_ops.strided_slice( grad, begin, end, strides, begin_mask=op.get_attr("begin_mask"), end_mask=op.get_attr("end_mask"), ellipsis_mask=op.get_attr("ellipsis_mask"), new_axis_mask=op.get_attr("new_axis_mask"), shrink_axis_mask=op.get_attr("shrink_axis_mask")) @ops.RegisterGradient("TensorStridedSliceUpdate") def _TensorStridedSliceUpdateGrad(op, grad): # pylint:disable=missing-function-docstring begin = op.inputs[1] end = op.inputs[2] strides = op.inputs[3] begin_mask = op.get_attr("begin_mask") end_mask = op.get_attr("end_mask") ellipsis_mask = op.get_attr("ellipsis_mask") new_axis_mask = op.get_attr("new_axis_mask") shrink_axis_mask = op.get_attr("shrink_axis_mask") def Apply(f, *args): return f(*args, begin_mask=begin_mask, end_mask=end_mask, shrink_axis_mask=shrink_axis_mask, new_axis_mask=new_axis_mask, ellipsis_mask=ellipsis_mask) dy = Apply(array_ops.strided_slice, grad, begin, end, strides) dx = Apply(array_ops.tensor_strided_slice_update, grad, begin, end, strides, array_ops.zeros_like(dy)) return dx, None, None, None, dy @ops.RegisterGradient("Split") def _SplitGrad(op, *grads): return None, array_ops.concat(list(grads), op.inputs[0]) @ops.RegisterGradient("SplitV") def _SplitVGrad(op, *grads): returnval = array_ops.concat(list(grads), op.inputs[2]) returnval = [returnval] + [ None, ] * ( len(op.inputs) - 1) return returnval ops.NotDifferentiable("Const") @ops.RegisterGradient("Diag") def _DiagGrad(_, grad): return array_ops.diag_part(grad) @ops.RegisterGradient("DiagPart") def _DiagPartGrad(_, grad): return array_ops.diag(grad) @ops.RegisterGradient("MatrixDiag") def _MatrixDiagGrad(_, grad): return array_ops.matrix_diag_part(grad) @ops.RegisterGradient("MatrixDiagV2") def _MatrixDiagV2Grad(op, grad): return array_ops.matrix_diag_part( grad, k=op.inputs[1]), None, None, None, None @ops.RegisterGradient("MatrixDiagV3") def _MatrixDiagV3Grad(op, grad): return array_ops.matrix_diag_part( grad, k=op.inputs[1], align=op.get_attr("align")), None, None, None, None @ops.RegisterGradient("MatrixDiagPart") def _MatrixDiagPartGrad(op, grad): matrix_shape = op.inputs[0].get_shape()[-2:] if matrix_shape.is_fully_defined() and matrix_shape[0] == matrix_shape[1]: return array_ops.matrix_diag(grad) else: return array_ops.matrix_set_diag(array_ops.zeros_like(op.inputs[0]), grad) @ops.RegisterGradient("MatrixDiagPartV2") def _MatrixDiagPartV2Grad(op, grad): """Gradient for MatrixDiagPartV2.""" matrix_shape = op.inputs[0].get_shape()[-2:] if matrix_shape.is_fully_defined(): return array_ops.matrix_diag( grad, k=op.inputs[1], num_rows=matrix_shape[0], num_cols=matrix_shape[1]), None, None else: return array_ops.matrix_set_diag( array_ops.zeros_like(op.inputs[0]), grad, k=op.inputs[1]), None, None @ops.RegisterGradient("MatrixDiagPartV3") def _MatrixDiagPartV3Grad(op, grad): """Gradient for MatrixDiagPartV3.""" matrix_shape = op.inputs[0].get_shape()[-2:] align = op.get_attr("align") if matrix_shape.is_fully_defined(): return array_ops.matrix_diag( grad, k=op.inputs[1], num_rows=matrix_shape[0], num_cols=matrix_shape[1], align=align), None, None else: return array_ops.matrix_set_diag( array_ops.zeros_like(op.inputs[0]), grad, k=op.inputs[1], align=align), None, None @ops.RegisterGradient("MatrixSetDiag") def _MatrixSetDiagGrad(op, grad): """Gradient for MatrixSetDiag.""" input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape()) diag_shape = op.inputs[1].get_shape() batch_shape = input_shape[:-2].merge_with(diag_shape[:-1]) matrix_shape = input_shape[-2:] if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined(): diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())] else: with ops.colocate_with(grad): grad_shape = array_ops.shape(grad) grad_rank = array_ops.rank(grad) batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2]) matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2]) min_dim = math_ops.reduce_min(matrix_shape) diag_shape = array_ops.concat([batch_shape, [min_dim]], 0) grad_input = array_ops.matrix_set_diag( grad, array_ops.zeros(diag_shape, dtype=grad.dtype)) grad_diag = array_ops.matrix_diag_part(grad) return (grad_input, grad_diag) @ops.RegisterGradient("MatrixSetDiagV2") def _MatrixSetDiagGradV2(op, grad): """Gradient for MatrixSetDiagV2.""" diag_shape = op.inputs[1].get_shape() if not diag_shape.is_fully_defined(): # Need to know the values of `d_lower` and `d_upper` to infer diag_shape. grad_shape = array_ops.shape(grad) batch_shape = grad_shape[:-2] matrix_shape = grad_shape[-2:] diag_index = array_ops.reshape(op.inputs[2], [-1]) # Converts to vector. d_lower = diag_index[0] d_upper = diag_index[-1] # Works both when len(diag_index) is 1 and 2. y_offset = control_flow_ops.cond( math_ops.less(d_upper, 0), lambda: d_upper, lambda: 0) x_offset = control_flow_ops.cond( math_ops.greater(d_lower, 0), lambda: -d_lower, lambda: 0) max_diag_len = math_ops.minimum(matrix_shape[0] + y_offset, matrix_shape[1] + x_offset) # pylint: disable=g-long-lambda # pyformat: disable postfix = control_flow_ops.cond( math_ops.equal(d_lower, d_upper), lambda: ops.convert_to_tensor([max_diag_len]), lambda: ops.convert_to_tensor([d_upper - d_lower + 1, max_diag_len])) # pyformat: enable # pylint: enable=g-long-lambda diag_shape = array_ops.concat([batch_shape, postfix], 0) grad_input = array_ops.matrix_set_diag( grad, array_ops.zeros(diag_shape, dtype=grad.dtype), k=op.inputs[2]) grad_diag = array_ops.matrix_diag_part(grad, k=op.inputs[2]) return (grad_input, grad_diag, None) @ops.RegisterGradient("MatrixSetDiagV3") def _MatrixSetDiagGradV3(op, grad): """Gradient for MatrixSetDiagV3.""" diag_shape = op.inputs[1].get_shape() align = op.get_attr("align") if not diag_shape.is_fully_defined(): # Need to know the values of `d_lower` and `d_upper` to infer diag_shape. grad_shape = array_ops.shape(grad) batch_shape = grad_shape[:-2] matrix_shape = grad_shape[-2:] diag_index = array_ops.reshape(op.inputs[2], [-1]) # Converts to vector. d_lower = diag_index[0] d_upper = diag_index[-1] # Works both when len(diag_index) is 1 and 2. y_offset = control_flow_ops.cond( math_ops.less(d_upper, 0), lambda: d_upper, lambda: 0) x_offset = control_flow_ops.cond( math_ops.greater(d_lower, 0), lambda: -d_lower, lambda: 0) max_diag_len = math_ops.minimum(matrix_shape[0] + y_offset, matrix_shape[1] + x_offset) # pylint: disable=g-long-lambda # pyformat: disable postfix = control_flow_ops.cond( math_ops.equal(d_lower, d_upper), lambda: ops.convert_to_tensor([max_diag_len]), lambda: ops.convert_to_tensor([d_upper - d_lower + 1, max_diag_len])) # pyformat: enable # pylint: enable=g-long-lambda diag_shape = array_ops.concat([batch_shape, postfix], 0) grad_input = array_ops.matrix_set_diag( grad, array_ops.zeros(diag_shape, dtype=grad.dtype), k=op.inputs[2], align=align) grad_diag = array_ops.matrix_diag_part(grad, k=op.inputs[2], align=align) return (grad_input, grad_diag, None) @ops.RegisterGradient("MatrixBandPart") def _MatrixBandPartGrad(op, grad): num_lower = op.inputs[1] num_upper = op.inputs[2] return (array_ops.matrix_band_part(grad, num_lower, num_upper), None, None) # Edit Distance has no gradient (but can be used to eval seq2seq or CTC). ops.NotDifferentiable("EditDistance") @ops.RegisterGradient("Fill") def _FillGrad(_, grad): return None, math_ops.reduce_sum(grad) ops.NotDifferentiable("ZerosLike") ops.NotDifferentiable("OnesLike") @ops.RegisterGradient("PreventGradient") def _PreventGradientGrad(op, _): raise LookupError("Gradient explicitly disabled. Reason: %s" % op.get_attr("message")) def _IndexedSlicesToTensorNoWarning(indexed_slices): """Converts an IndexedSlices to a Tensor without sparse->dense warnings.""" if not isinstance(indexed_slices, ops.IndexedSlices): # If it is not IndexedSlices, it's better be a tensor. return indexed_slices if indexed_slices.dense_shape is None: raise ValueError( "Tensor conversion requested for IndexedSlices without dense_shape: %s" % str(indexed_slices)) return math_ops.unsorted_segment_sum(indexed_slices.values, indexed_slices.indices, indexed_slices.dense_shape[0]) @ops.RegisterGradient("Gather") def _GatherGrad(op, grad): """Gradient for Gather op.""" # params can be large, so colocate the shape calculation with it. # # params can be very large for sparse model, array_ops.shape raises # exception on the Windows platform when any dimension is larger than # int32. params_shape is not used in optimizer apply_sparse gradients, # so it's fine to convert it back to int32 regardless of truncation. params = op.inputs[0] with ops.colocate_with(params): params_shape = array_ops.shape(params, out_type=ops.dtypes.int64) params_shape = math_ops.cast(params_shape, dtypes.int32) # Build appropriately shaped IndexedSlices indices = op.inputs[1] size = array_ops.expand_dims(array_ops.size(indices), 0) values_shape = array_ops.concat([size, params_shape[1:]], 0) values = array_ops.reshape( _IndexedSlicesToTensorNoWarning(grad), values_shape) indices = array_ops.reshape(indices, size) return [ops.IndexedSlices(values, indices, params_shape), None] def _GetBatchIndices(params_shape, indices, batch_dims): """Addds the batch offsets to the given indices and returns the results.""" batch_indices = indices indices_ndims = indices.shape.ndims indices_dtype = indices.dtype.base_dtype casted_params_shape = math_ops.cast(params_shape, indices_dtype) accum_dim_value = array_ops.ones((), dtype=indices_dtype) for dim in range(batch_dims, 0, -1): dim_value = casted_params_shape[dim - 1] accum_dim_value *= casted_params_shape[dim] start = array_ops.zeros((), dtype=indices_dtype) step = array_ops.ones((), dtype=indices_dtype) dim_indices = math_ops.range(start, dim_value, step) dim_indices *= accum_dim_value dim_shape = array_ops.stack( [1] * (dim - 1) + [dim_value] + [1] * (indices_ndims - dim), axis=0) batch_indices += array_ops.reshape(dim_indices, dim_shape) return batch_indices def _BatchGatherGrad(params_shape, values, indices, batch_dims, gather_dim_size): """Returns the gradient of GatherV2 with batch dimensions.""" # Axis is the first non-batch dimension. indices_size = array_ops.expand_dims(array_ops.size(indices), 0) if batch_dims: values_shape = array_ops.shape(values) # Add the batch offsets to indices and flatten the batch dimensions. outer_shape = values_shape[:batch_dims] inner_shape = values_shape[batch_dims:][1:] batch_size = gen_math_ops.prod(outer_shape, [0], False) flat_values_shape = array_ops.concat([[-1], inner_shape], 0) gather_dim_size *= batch_size indices = _GetBatchIndices(params_shape, indices, batch_dims) values = array_ops.reshape( _IndexedSlicesToTensorNoWarning(values), flat_values_shape) indices = array_ops.reshape(indices, indices_size) params_grad = math_ops.unsorted_segment_sum(values, indices, gather_dim_size) if batch_dims: # Put back the batch dimensions. params_grad = array_ops.reshape( params_grad, array_ops.concat([outer_shape, flat_values_shape], 0)) return params_grad @ops.RegisterGradient("GatherV2") def _GatherV2Grad(op, grad): """Gradient for GatherV2 op.""" # params can be large, so colocate the shape calculation with it. # # params can be very large for sparse model, array_ops.shape raises # exception on the Windows platform when any dimension is larger than # int32. params_shape is not used in optimizer apply_sparse gradients, # so it's fine to convert it back to int32 regardless of truncation. params = op.inputs[0] with ops.colocate_with(params): params_shape = array_ops.shape(params, out_type=ops.dtypes.int64) params_shape = math_ops.cast(params_shape, dtypes.int32) indices = op.inputs[1] indices_size = array_ops.expand_dims(array_ops.size(indices), 0) axis = op.inputs[2] axis_static = tensor_util.constant_value(axis) batch_dims = int(op.get_attr("batch_dims")) if batch_dims < 0: batch_dims += indices.shape.ndims # For axis 0 gathers, build an appropriately shaped IndexedSlices. if axis_static == 0: if context.executing_eagerly(): with ops.device(indices_size.device): params_tail_shape = array_ops.identity(params_shape)[1:] else: params_tail_shape = params_shape[1:] values_shape = array_ops.concat([indices_size, params_tail_shape], 0) values = array_ops.reshape( _IndexedSlicesToTensorNoWarning(grad), values_shape) indices = array_ops.reshape(indices, indices_size) params_grad = ops.IndexedSlices(values, indices, params_shape) else: # Handle axis by transposing the axis dimension to be the first non-batch # dimension, compute the gradient and transpose the result back. outer_shape = params_shape[:axis] inner_shape = params_shape[axis:][1:] values_shape = array_ops.concat([outer_shape, [-1], inner_shape], 0) values_dims = array_ops.size(values_shape) axis_dims = array_ops.size(outer_shape) outer_batches_indices = math_ops.range(batch_dims) batch_axis_indices = math_ops.range(batch_dims, axis_dims) inner_axes_indices = math_ops.range(axis_dims + 1, values_dims) values = array_ops.reshape( _IndexedSlicesToTensorNoWarning(grad), values_shape) # Move values[axis] up to values[batch_dims] transpose_dims = array_ops.concat([ outer_batches_indices, [axis_dims], batch_axis_indices, inner_axes_indices ], 0) values_transpose = array_ops.transpose(values, transpose_dims) params_grad = _BatchGatherGrad(params_shape, values_transpose, indices, batch_dims, params_shape[axis]) # Inverts the above transpose by moving dimension batch_dims back to its # original position. invert_transpose_dims = array_ops.concat([ outer_batches_indices, batch_axis_indices + 1, [batch_dims], inner_axes_indices ], 0) params_grad = array_ops.transpose(params_grad, invert_transpose_dims) return [params_grad, None, None] @ops.RegisterGradient("GatherNd") def _GatherNdGrad(op, grad): ref = op.inputs[0] indices = op.inputs[1] ref_shape = array_ops.shape(ref, out_type=indices.dtype) if indices.shape.ndims == 2 and indices.shape.dims[-1].value == 1: ref_grad = ops.IndexedSlices(grad, array_ops.squeeze(indices, axis=-1), ref_shape) else: ref_grad = array_ops.scatter_nd(indices, grad, ref_shape) return [ref_grad, None] @ops.RegisterGradient("ResourceGatherNd") def _ResourceGatherNdGrad(op, grad): # pylint: disable=missing-docstring ref = op.inputs[0] indices = op.inputs[1] ref_shape = gen_resource_variable_ops.variable_shape(ref, indices.dtype) if indices.shape.ndims == 2 and indices.shape.dims[-1].value == 1: ref_grad = ops.IndexedSlices(grad, array_ops.squeeze(indices, axis=-1), ref_shape) else: ref_grad = array_ops.scatter_nd(indices, grad, ref_shape) return [ref_grad, None] @ops.RegisterGradient("CheckNumerics") def _CheckNumericsGrad(op, grad): """Gradient for check_numerics op.""" return array_ops.check_numerics( grad, "Not a number (NaN) or infinity (Inf) values detected in gradient. %s" % op.get_attr("message")) @ops.RegisterGradient("CheckNumericsV2") def _CheckNumericsV2Grad(op, grad): """Gradient for check_numerics op.""" return array_ops.check_numerics_v2( grad, "Not a number (NaN) or infinity (Inf) values detected in gradient. %s" % op.get_attr("message")) @ops.RegisterGradient("PlaceholderWithDefault") @ops.RegisterGradient("Identity") def _IdGrad(_, grad): return grad @ops.RegisterGradient("RefIdentity") def _RefIdGrad(_, grad): return grad @ops.RegisterGradient("IdentityN") def _IdNGrad(_, *grad): return grad ops.NotDifferentiable("StopGradient") @ops.RegisterGradient("Reshape") def _ReshapeGrad(op, grad): return [ array_ops.reshape( _IndexedSlicesToTensorNoWarning(grad), array_ops.shape(op.inputs[0])), None ] ops.NotDifferentiable("InvertPermutation") def _ReshapeToInput(op, grad): """Reshapes the gradient to the shape of the original input.""" return array_ops.reshape( _IndexedSlicesToTensorNoWarning(grad), array_ops.shape(op.inputs[0])) @ops.RegisterGradient("ExpandDims") def _ExpandDimsGrad(op, grad): return [_ReshapeToInput(op, grad), None] @ops.RegisterGradient("Squeeze") def _SqueezeGrad(op, grad): return _ReshapeToInput(op, grad) @ops.RegisterGradient("Transpose") def _TransposeGrad(op, grad): """Returns unshuffle(grad).""" p = op.inputs[1] return [array_ops.transpose(grad, array_ops.invert_permutation(p)), None] @ops.RegisterGradient("ConjugateTranspose") def _ConjugateTransposeGrad(op, grad): """Returns conj(unshuffle(grad)).""" p = op.inputs[1] return [ array_ops.transpose( grad, array_ops.invert_permutation(p), conjugate=True), None ] ops.NotDifferentiable("Shape") ops.NotDifferentiable("ShapeN") ops.NotDifferentiable("Rank") ops.NotDifferentiable("Size") @ops.RegisterGradient("Tile") def _TileGrad(op, grad): """Sum reduces grad along the tiled dimensions.""" input_shape = array_ops.shape(op.inputs[0], out_type=op.inputs[1].dtype) # We interleave multiples and input_shape to get split_shape, # reshape grad to split_shape, and reduce along all even # dimensions (the tiled dimensions) to get the result # with shape input_shape. For example # input_shape = [20, 30, 40] # multiples = [2, 3, 4] # split_shape = [2, 20, 3, 30, 4, 40] # axes = [0, 2, 4] split_shape = array_ops.reshape( array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1]) axes = math_ops.range(0, array_ops.size(split_shape), 2) # Sum reduces grad along the first dimension for IndexedSlices if isinstance(grad, ops.IndexedSlices): input_shape_0 = math_ops.cast(input_shape[0], grad.indices.dtype) grad = math_ops.unsorted_segment_sum( grad.values, math_ops.mod(grad.indices, input_shape_0), input_shape_0) split_shape = array_ops.concat([[1], split_shape[1:]], axis=0) input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes) # Fix shape inference if not context.executing_eagerly(): input_grad.set_shape(op.inputs[0].get_shape()) return [input_grad, None] ops.NotDifferentiable("BroadcastGradientArgs") def _PadGrad(op, grad): """Gradient for Pad.""" # Pad introduces values around the original tensor, so the gradient function # slices the original shape out of the gradient.""" x = op.inputs[0] a = op.inputs[1] # [Rank(x), 2] # Takes a slice of a. The 1st column. [Rank(x), 1]. pad_before = array_ops.slice(a, [0, 0], array_ops.stack([array_ops.rank(x), 1])) # Make it a 1-D tensor. begin = array_ops.reshape(pad_before, [-1]) sizes = array_ops.shape(x, out_type=begin.dtype) x_grad = array_ops.slice(grad, begin, sizes) if len(op.inputs) == 3: return x_grad, None, None else: return x_grad, None ops.RegisterGradient("Pad")(_PadGrad) ops.RegisterGradient("PadV2")(_PadGrad) # ReverseSequence is just a permutation. The gradient permutes back. @ops.RegisterGradient("ReverseSequence") def _ReverseSequenceGrad(op, grad): seq_lengths = op.inputs[1] return [ array_ops.reverse_sequence( grad, batch_axis=op.get_attr("batch_dim"), seq_axis=op.get_attr("seq_dim"), seq_lengths=seq_lengths), None ] @ops.RegisterGradient("Reverse") def _ReverseGrad(op, grad): reverse_dims = op.inputs[1] return gen_array_ops.reverse(grad, reverse_dims), None @ops.RegisterGradient("ReverseV2") def _ReverseV2Grad(op, grad): axis = op.inputs[1] return array_ops.reverse_v2(grad, axis), None @ops.RegisterGradient("SpaceToBatch") def _SpaceToBatchGrad(op, grad): # Its gradient is the opposite op: BatchToSpace. block_size = op.get_attr("block_size") return [ array_ops.batch_to_space(grad, op.inputs[1], block_size=block_size), None ] @ops.RegisterGradient("SpaceToBatchND") def _SpaceToBatchNDGrad(op, grad): # Its gradient is the opposite op: BatchToSpaceND. return [ array_ops.batch_to_space_nd(grad, op.inputs[1], op.inputs[2]), None, None ] @ops.RegisterGradient("BatchToSpace") def _BatchToSpaceGrad(op, grad): # Its gradient is the opposite op: SpaceToBatch. block_size = op.get_attr("block_size") return [ array_ops.space_to_batch(grad, op.inputs[1], block_size=block_size), None ] @ops.RegisterGradient("BatchToSpaceND") def _BatchToSpaceNDGrad(op, grad): # Its gradient is the opposite op: SpaceToBatchND. return [ array_ops.space_to_batch_nd(grad, op.inputs[1], op.inputs[2]), None, None ] @ops.RegisterGradient("SpaceToDepth") def _SpaceToDepthGrad(op, grad): # Its gradient is the opposite op: DepthToSpace. block_size = op.get_attr("block_size") data_format = op.get_attr("data_format") if data_format == "NCHW_VECT_C": raise ValueError("Cannot compute SpaceToDepth gradient with NCHW_VECT_C. " "NCHW_VECT_C requires qint8 data type.") return array_ops.depth_to_space(grad, block_size, data_format=data_format) @ops.RegisterGradient("DepthToSpace") def _DepthToSpaceGrad(op, grad): # Its gradient is the opposite op: SpaceToDepth. block_size = op.get_attr("block_size") data_format = op.get_attr("data_format") if data_format == "NCHW_VECT_C": raise ValueError("Cannot compute DepthToSpace gradient with NCHW_VECT_C. " "NCHW_VECT_C requires qint8 data type.") return array_ops.space_to_depth(grad, block_size, data_format=data_format) ops.NotDifferentiable("OneHot") @ops.RegisterGradient("MirrorPad") def _MirrorPadGrad(op, grad): mode = op.get_attr("mode") return [gen_array_ops.mirror_pad_grad(grad, op.inputs[1], mode=mode), None] @ops.RegisterGradient("MirrorPadGrad") def _MirrorPadGradGrad(op, grad): mode = op.get_attr("mode") return [gen_array_ops.mirror_pad(grad, op.inputs[1], mode=mode), None] @ops.RegisterGradient("QuantizeAndDequantize") def _QuantizeAndDequantizeGrad(_, grad): return grad @ops.RegisterGradient("QuantizeAndDequantizeV2") def _QuantizeAndDequantizeV2Grad(_, grad): return [grad, None, None] @ops.RegisterGradient("QuantizeAndDequantizeV3") def _QuantizeAndDequantizeV3Grad(_, grad): # Only propagate the gradient for the unquantized input. return [grad, None, None, None] @ops.RegisterGradient("ExtractImagePatches") def _ExtractImagePatchesGrad(op, grad): input_bhwc = array_ops.shape(op.inputs[0], out_type=dtypes.int64) batch_size, rows_in, cols_in, channels = input_bhwc[0], input_bhwc[1], \ input_bhwc[2], input_bhwc[3] # Create indices matrix for input tensor. # Note that 0 is preserved for padding location, # so indices for input start from 1 to 1 + rows_in * cols_in. input_indices_num = 1 + rows_in * cols_in input_idx = array_ops.reshape( math_ops.range(1, input_indices_num, dtype=ops.dtypes.int64), (1, rows_in, cols_in, 1)) input_idx_patched = gen_array_ops.extract_image_patches( input_idx, op.get_attr("ksizes"), op.get_attr("strides"), op.get_attr("rates"), op.get_attr("padding")) # Create indices matrix for output tensor. output_bhwc = array_ops.shape(op.outputs[0], out_type=dtypes.int64) rows_out, cols_out = output_bhwc[1], output_bhwc[2] _, ksize_r, ksize_c, _ = op.get_attr("ksizes") # Indices for output start from 0. output_indices_num = rows_out * cols_out * ksize_r * ksize_c output_idx = array_ops.reshape( math_ops.range(output_indices_num, dtype=ops.dtypes.int64), (1, rows_out, cols_out, ksize_r * ksize_c)) # Construct mapping table for indices: (input -> output). idx_matrix = array_ops.concat([ array_ops.expand_dims(input_idx_patched, axis=-1), array_ops.expand_dims(output_idx, axis=-1) ], axis=-1) idx_map = array_ops.reshape(idx_matrix, (-1, 2)) sp_shape = (input_indices_num, output_indices_num) sp_mat_full = sparse_tensor.SparseTensor( idx_map, array_ops.ones([output_indices_num], dtype=grad.dtype), sp_shape) # Remove all padding locations [0, :]. sp_mat = sparse_ops.sparse_slice(sp_mat_full, (1, 0), (input_indices_num - 1, output_indices_num)) grad_expanded = array_ops.transpose( array_ops.reshape( _IndexedSlicesToTensorNoWarning(grad), (batch_size, rows_out, cols_out, ksize_r, ksize_c, channels)), (1, 2, 3, 4, 0, 5)) grad_flat = array_ops.reshape(grad_expanded, (-1, batch_size * channels)) jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat) grad_out = array_ops.reshape(jac, (rows_in, cols_in, batch_size, channels)) grad_out = array_ops.transpose(grad_out, (2, 0, 1, 3)) return [grad_out] @ops.RegisterGradient("ExtractVolumePatches") def _ExtractVolumePatchesGrad(op, grad): batch_size, planes_in, rows_in, cols_in, channels = [ dim.value for dim in op.inputs[0].shape.dims ] input_bphwc = array_ops.shape(op.inputs[0]) batch_size = input_bphwc[0] channels = input_bphwc[4] # Create indices matrix for input tensor. # Note that 0 is preserved for padding location, # so indices for input start from 1 to 1 + rows_in * cols_in. input_indices_num = 1 + planes_in * rows_in * cols_in input_idx = array_ops.reshape( math_ops.range(1, input_indices_num, dtype=ops.dtypes.int64), (1, planes_in, rows_in, cols_in, 1)) input_idx_patched = gen_array_ops.extract_volume_patches( input_idx, op.get_attr("ksizes"), op.get_attr("strides"), op.get_attr("padding")) # Create indices matrix for output tensor. _, planes_out, rows_out, cols_out, _ = [ dim.value for dim in op.outputs[0].shape.dims ] _, ksize_p, ksize_r, ksize_c, _ = op.get_attr("ksizes") # Indices for output start from 0. prc_indices_num = planes_out * rows_out * cols_out output_indices_num = prc_indices_num * ksize_p * ksize_r * ksize_c output_idx = array_ops.reshape( math_ops.range(output_indices_num, dtype=ops.dtypes.int64), (1, planes_out, rows_out, cols_out, ksize_p * ksize_r * ksize_c)) # Construct mapping table for indices: (input -> output). idx_matrix = array_ops.concat([ array_ops.expand_dims(input_idx_patched, axis=-1), array_ops.expand_dims(output_idx, axis=-1) ], axis=-1) idx_map = array_ops.reshape(idx_matrix, (-1, 2)) sp_shape = (input_indices_num, output_indices_num) sp_mat_full = sparse_tensor.SparseTensor( idx_map, array_ops.ones([output_indices_num], dtype=grad.dtype), sp_shape) # Remove all padding locations [0, :]. sp_mat = sparse_ops.sparse_slice(sp_mat_full, (1, 0), (input_indices_num - 1, output_indices_num)) grad_expanded = array_ops.transpose( array_ops.reshape( _IndexedSlicesToTensorNoWarning(grad), (batch_size, planes_out, rows_out, cols_out, ksize_p, ksize_r, ksize_c, channels)), (1, 2, 3, 4, 5, 6, 0, 7)) grad_flat = array_ops.reshape(grad_expanded, (-1, batch_size * channels)) jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat) grad_out = array_ops.reshape( jac, (planes_in, rows_in, cols_in, batch_size, channels)) grad_out = array_ops.transpose(grad_out, (3, 0, 1, 2, 4)) return [grad_out] @ops.RegisterGradient("ScatterNd") def _ScatterNdGrad(op, grad): indices = op.inputs[0] updates_grad = array_ops.gather_nd(grad, indices) return [None, updates_grad, None] @ops.RegisterGradient("TensorScatterUpdate") def _TensorScatterUpdateGrad(op, grad): indices = op.inputs[1] updates_grad = array_ops.gather_nd(grad, indices) tensor_grad = array_ops.tensor_scatter_update( array_ops.identity(grad), indices, array_ops.zeros_like(op.inputs[2], dtype=grad.dtype)) return [tensor_grad, None, updates_grad] @ops.RegisterGradient("TensorScatterAdd") def _TensorScatterAddGrad(op, grad): indices = op.inputs[1] updates_grad = array_ops.gather_nd(grad, indices) tensor_grad = array_ops.identity(grad) return [tensor_grad, None, updates_grad] def _TensorScatterMinOrMaxGrad(op, grad): """Gradient for TensorScatterMin and TensorScatterMax.""" indices = op.inputs[1] x = op.inputs[0] y = op.inputs[2] output = op.outputs[0] x_indicators = math_ops.cast(math_ops.equal(x, output), grad.dtype) y_output = array_ops.gather_nd(output, indices) y_indicators = math_ops.cast(math_ops.equal(y, y_output), grad.dtype) ys_indicators = array_ops.scatter_nd(indices, y_indicators, array_ops.shape(x)) indicators = x_indicators + ys_indicators # All elements are >= 1. # If there are multiple minimum or maximum elements then the gradient will be # divided between them. x_grad = grad * x_indicators / indicators y_grad = array_ops.gather_nd(grad / indicators, indices) * y_indicators return [x_grad, None, y_grad] @ops.RegisterGradient("TensorScatterMax") def _TensorScatterMaxGrad(op, grad): """Gradient for TensorScatterMax op.""" return _TensorScatterMinOrMaxGrad(op, grad) @ops.RegisterGradient("TensorScatterMin") def _TensorScatterMinGrad(op, grad): """Gradient for TensorScatterMin op.""" return _TensorScatterMinOrMaxGrad(op, grad) @ops.RegisterGradient("TensorScatterSub") def _TensorScatterSubGrad(op, grad): indices = op.inputs[1] updates_grad = array_ops.gather_nd(grad, indices) tensor_grad = array_ops.identity(grad) return [tensor_grad, None, -updates_grad] @ops.RegisterGradient("ScatterNdNonAliasingAdd") def _ScatterNdNonAliasingAddGrad(op, grad): indices = op.inputs[1] updates_grad = array_ops.gather_nd(grad, indices) return [grad, None, updates_grad] @ops.RegisterGradient("BroadcastTo") def _BroadcastToGrad(op, grad): input_value = op.inputs[0] broadcast_shape = op.inputs[1] input_value_shape = array_ops.shape(input_value) if not isinstance(broadcast_shape, ops.EagerTensor): broadcast_shape_static = tensor_shape.TensorShape( pywrap_tf_session.TF_TryEvaluateConstant_wrapper( broadcast_shape.graph._c_graph, broadcast_shape._as_tf_output())) # pylint: disable=protected-access if broadcast_shape_static.is_fully_defined(): broadcast_shape = constant_op.constant( broadcast_shape_static.as_list(), dtype=dtypes.int32) _, reduction_axes = gen_array_ops.broadcast_gradient_args( broadcast_shape, input_value_shape) updates_grad_reshaped = math_ops.reduce_sum( grad, axis=reduction_axes, keepdims=True) updates_grad = array_ops.reshape(updates_grad_reshaped, input_value_shape) return [updates_grad, None]
aam-at/tensorflow
tensorflow/python/ops/array_grad.py
Python
apache-2.0
44,769
__author__ = 'matti' from common.database import db class VideoModel(db.Model): __tablename__ = 'video' id = db.Column(db.Integer, primary_key=True) views = db.Column(db.Integer) title = db.Column(db.String(80)) description = db.Column(db.Text) nicoid = db.Column(db.String(20), unique=True) videourl = db.Column(db.String(100), unique=True) originalupload = db.Column(db.Date) reupload = db.Column(db.Date) def __init__(self, title, description, nicoid, videourl, originalupload, reupload): self.title = title self.views = 0 self.description = description self.nicoid = nicoid self.videourl = videourl self.originalupload = originalupload self.reupload = reupload def tojson(self): return {'id': self.id, 'title': self.title, 'description': self.description, 'nicoid': self.nicoid, 'videourl': self.videourl, 'views': self.views, 'originalupload': str(self.originalupload), 'reupload': str(self.reupload)}
melonmanchan/gachimuch.io-api
api/models/video.py
Python
mit
1,086
# -*- coding: UTF-8 -*- # Copyright 2016 Rumma & Ko Ltd # License: GNU Affero General Public License v3 (see file COPYING for details) """The :xfile:`models.py` module for `lino.modlib.wkhtmltopf`. Does not define any modules. Just to have it as a Django app. """ from .choicelists import *
lino-framework/lino
lino/modlib/wkhtmltopdf/models.py
Python
bsd-2-clause
294
total = 0.0 with open('../data/portfolio.csv', 'r') as f: heades = next(f) # skip header for line in f: line = line.strip() parts = line.split(',') parts[0] = parts[0].strip('"') parts[1] = parts[1].strip('"') parts[2] = int(parts[2]) parts[3] = float(parts[3]) total += parts[2] * parts[3] print('Total cost:', total)
jcontesti/python-programming-language-livelessons-projects
03/port.py
Python
gpl-3.0
385
# -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals import calendar import fnmatch import logging import os from codecs import open from collections import defaultdict from functools import partial from itertools import chain, groupby from operator import attrgetter from jinja2 import (BaseLoader, ChoiceLoader, Environment, FileSystemLoader, PrefixLoader, TemplateNotFound) import six from pelican import signals from pelican.cache import FileStampDataCacher from pelican.contents import Article, Draft, Page, Static, is_valid_content from pelican.readers import Readers from pelican.utils import (DateFormatter, copy, copy_file_metadata, mkdir_p, posixize_path, process_translations, python_2_unicode_compatible) logger = logging.getLogger(__name__) class PelicanTemplateNotFound(Exception): pass @python_2_unicode_compatible class Generator(object): """Baseclass generator""" def __init__(self, context, settings, path, theme, output_path, readers_cache_name='', **kwargs): self.context = context self.settings = settings self.path = path self.theme = theme self.output_path = output_path for arg, value in kwargs.items(): setattr(self, arg, value) self.readers = Readers(self.settings, readers_cache_name) # templates cache self._templates = {} self._templates_path = [] self._templates_path.append(os.path.expanduser( os.path.join(self.theme, 'templates'))) self._templates_path += self.settings['EXTRA_TEMPLATES_PATHS'] theme_path = os.path.dirname(os.path.abspath(__file__)) simple_loader = FileSystemLoader(os.path.join(theme_path, "themes", "simple", "templates")) self.env = Environment( trim_blocks=True, lstrip_blocks=True, loader=ChoiceLoader([ FileSystemLoader(self._templates_path), simple_loader, # implicit inheritance PrefixLoader({'!simple': simple_loader}) # explicit one ]), extensions=self.settings['JINJA_EXTENSIONS'], ) logger.debug('Template list: %s', self.env.list_templates()) # provide utils.strftime as a jinja filter self.env.filters.update({'strftime': DateFormatter()}) # get custom Jinja filters from user settings custom_filters = self.settings['JINJA_FILTERS'] self.env.filters.update(custom_filters) signals.generator_init.send(self) def get_template(self, name): """Return the template by name. Use self.theme to get the templates to use, and return a list of templates ready to use with Jinja2. """ if name not in self._templates: try: self._templates[name] = self.env.get_template(name + '.html') except TemplateNotFound: raise PelicanTemplateNotFound( '[templates] unable to load {}.html from {}'.format( name, self._templates_path)) return self._templates[name] def _include_path(self, path, extensions=None): """Inclusion logic for .get_files(), returns True/False :param path: the path which might be including :param extensions: the list of allowed extensions, or False if all extensions are allowed """ if extensions is None: extensions = tuple(self.readers.extensions) basename = os.path.basename(path) # check IGNORE_FILES ignores = self.settings['IGNORE_FILES'] if any(fnmatch.fnmatch(basename, ignore) for ignore in ignores): return False ext = os.path.splitext(basename)[1][1:] if extensions is False or ext in extensions: return True return False def get_files(self, paths, exclude=[], extensions=None): """Return a list of files to use, based on rules :param paths: the list pf paths to search (relative to self.path) :param exclude: the list of path to exclude :param extensions: the list of allowed extensions (if False, all extensions are allowed) """ # backward compatibility for older generators if isinstance(paths, six.string_types): paths = [paths] # group the exclude dir names by parent path, for use with os.walk() exclusions_by_dirpath = {} for e in exclude: parent_path, subdir = os.path.split(os.path.join(self.path, e)) exclusions_by_dirpath.setdefault(parent_path, set()).add(subdir) files = [] ignores = self.settings['IGNORE_FILES'] for path in paths: # careful: os.path.join() will add a slash when path == ''. root = os.path.join(self.path, path) if path else self.path if os.path.isdir(root): for dirpath, dirs, temp_files in os.walk( root, followlinks=True): drop = [] excl = exclusions_by_dirpath.get(dirpath, ()) for d in dirs: if (d in excl or any(fnmatch.fnmatch(d, ignore) for ignore in ignores)): drop.append(d) for d in drop: dirs.remove(d) reldir = os.path.relpath(dirpath, self.path) for f in temp_files: fp = os.path.join(reldir, f) if self._include_path(fp, extensions): files.append(fp) elif os.path.exists(root) and self._include_path(path, extensions): files.append(path) # can't walk non-directories return files def add_source_path(self, content): """Record a source file path that a Generator found and processed. Store a reference to its Content object, for url lookups later. """ location = content.get_relative_source_path() self.context['filenames'][location] = content def _add_failed_source_path(self, path): """Record a source file path that a Generator failed to process. (For example, one that was missing mandatory metadata.) The path argument is expected to be relative to self.path. """ self.context['filenames'][posixize_path(os.path.normpath(path))] = None def _is_potential_source_path(self, path): """Return True if path was supposed to be used as a source file. (This includes all source files that have been found by generators before this method is called, even if they failed to process.) The path argument is expected to be relative to self.path. """ return (posixize_path(os.path.normpath(path)) in self.context['filenames']) def _update_context(self, items): """Update the context with the given items from the currrent processor. """ for item in items: value = getattr(self, item) if hasattr(value, 'items'): value = list(value.items()) # py3k safeguard for iterators self.context[item] = value def __str__(self): # return the name of the class for logging purposes return self.__class__.__name__ class CachingGenerator(Generator, FileStampDataCacher): '''Subclass of Generator and FileStampDataCacher classes enables content caching, either at the generator or reader level ''' def __init__(self, *args, **kwargs): '''Initialize the generator, then set up caching note the multiple inheritance structure ''' cls_name = self.__class__.__name__ Generator.__init__(self, *args, readers_cache_name=(cls_name + '-Readers'), **kwargs) cache_this_level = \ self.settings['CONTENT_CACHING_LAYER'] == 'generator' caching_policy = cache_this_level and self.settings['CACHE_CONTENT'] load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE'] FileStampDataCacher.__init__(self, self.settings, cls_name, caching_policy, load_policy ) def _get_file_stamp(self, filename): '''Get filestamp for path relative to generator.path''' filename = os.path.join(self.path, filename) return super(CachingGenerator, self)._get_file_stamp(filename) class _FileLoader(BaseLoader): def __init__(self, path, basedir): self.path = path self.fullpath = os.path.join(basedir, path) def get_source(self, environment, template): if template != self.path or not os.path.exists(self.fullpath): raise TemplateNotFound(template) mtime = os.path.getmtime(self.fullpath) with open(self.fullpath, 'r', encoding='utf-8') as f: source = f.read() return (source, self.fullpath, lambda: mtime == os.path.getmtime(self.fullpath)) class TemplatePagesGenerator(Generator): def generate_output(self, writer): for source, dest in self.settings['TEMPLATE_PAGES'].items(): self.env.loader.loaders.insert(0, _FileLoader(source, self.path)) try: template = self.env.get_template(source) rurls = self.settings['RELATIVE_URLS'] writer.write_file(dest, template, self.context, rurls, override_output=True) finally: del self.env.loader.loaders[0] class ArticlesGenerator(CachingGenerator): """Generate blog articles""" def __init__(self, *args, **kwargs): """initialize properties""" self.articles = [] # only articles in default language self.translations = [] self.dates = {} self.tags = defaultdict(list) self.categories = defaultdict(list) self.related_posts = [] self.authors = defaultdict(list) self.drafts = [] # only drafts in default language self.drafts_translations = [] super(ArticlesGenerator, self).__init__(*args, **kwargs) signals.article_generator_init.send(self) def generate_feeds(self, writer): """Generate the feeds from the current context, and output files.""" if self.settings.get('FEED_ATOM'): writer.write_feed(self.articles, self.context, self.settings['FEED_ATOM']) if self.settings.get('FEED_RSS'): writer.write_feed(self.articles, self.context, self.settings['FEED_RSS'], feed_type='rss') if (self.settings.get('FEED_ALL_ATOM') or self.settings.get('FEED_ALL_RSS')): all_articles = list(self.articles) for article in self.articles: all_articles.extend(article.translations) all_articles.sort(key=attrgetter('date'), reverse=True) if self.settings.get('FEED_ALL_ATOM'): writer.write_feed(all_articles, self.context, self.settings['FEED_ALL_ATOM']) if self.settings.get('FEED_ALL_RSS'): writer.write_feed(all_articles, self.context, self.settings['FEED_ALL_RSS'], feed_type='rss') for cat, arts in self.categories: arts.sort(key=attrgetter('date'), reverse=True) if self.settings.get('CATEGORY_FEED_ATOM'): writer.write_feed(arts, self.context, self.settings['CATEGORY_FEED_ATOM'] % cat.slug, feed_title=cat.name) if self.settings.get('CATEGORY_FEED_RSS'): writer.write_feed(arts, self.context, self.settings['CATEGORY_FEED_RSS'] % cat.slug, feed_title=cat.name, feed_type='rss') for auth, arts in self.authors: arts.sort(key=attrgetter('date'), reverse=True) if self.settings.get('AUTHOR_FEED_ATOM'): writer.write_feed(arts, self.context, self.settings['AUTHOR_FEED_ATOM'] % auth.slug, feed_title=auth.name) if self.settings.get('AUTHOR_FEED_RSS'): writer.write_feed(arts, self.context, self.settings['AUTHOR_FEED_RSS'] % auth.slug, feed_title=auth.name, feed_type='rss') if (self.settings.get('TAG_FEED_ATOM') or self.settings.get('TAG_FEED_RSS')): for tag, arts in self.tags.items(): arts.sort(key=attrgetter('date'), reverse=True) if self.settings.get('TAG_FEED_ATOM'): writer.write_feed(arts, self.context, self.settings['TAG_FEED_ATOM'] % tag.slug, feed_title=tag.name) if self.settings.get('TAG_FEED_RSS'): writer.write_feed(arts, self.context, self.settings['TAG_FEED_RSS'] % tag.slug, feed_title=tag.name, feed_type='rss') if (self.settings.get('TRANSLATION_FEED_ATOM') or self.settings.get('TRANSLATION_FEED_RSS')): translations_feeds = defaultdict(list) for article in chain(self.articles, self.translations): translations_feeds[article.lang].append(article) for lang, items in translations_feeds.items(): items.sort(key=attrgetter('date'), reverse=True) if self.settings.get('TRANSLATION_FEED_ATOM'): writer.write_feed( items, self.context, self.settings['TRANSLATION_FEED_ATOM'] % lang) if self.settings.get('TRANSLATION_FEED_RSS'): writer.write_feed( items, self.context, self.settings['TRANSLATION_FEED_RSS'] % lang, feed_type='rss') def generate_articles(self, write): """Generate the articles.""" for article in chain(self.translations, self.articles): signals.article_generator_write_article.send(self, content=article) write(article.save_as, self.get_template(article.template), self.context, article=article, category=article.category, override_output=hasattr(article, 'override_save_as'), blog=True) def generate_period_archives(self, write): """Generate per-year, per-month, and per-day archives.""" try: template = self.get_template('period_archives') except PelicanTemplateNotFound: template = self.get_template('archives') period_save_as = { 'year': self.settings['YEAR_ARCHIVE_SAVE_AS'], 'month': self.settings['MONTH_ARCHIVE_SAVE_AS'], 'day': self.settings['DAY_ARCHIVE_SAVE_AS'], } period_date_key = { 'year': attrgetter('date.year'), 'month': attrgetter('date.year', 'date.month'), 'day': attrgetter('date.year', 'date.month', 'date.day') } def _generate_period_archives(dates, key, save_as_fmt): """Generate period archives from `dates`, grouped by `key` and written to `save_as`. """ # `dates` is already sorted by date for _period, group in groupby(dates, key=key): archive = list(group) # arbitrarily grab the first date so that the usual # format string syntax can be used for specifying the # period archive dates date = archive[0].date save_as = save_as_fmt.format(date=date) context = self.context.copy() if key == period_date_key['year']: context["period"] = (_period,) else: month_name = calendar.month_name[_period[1]] if not six.PY3: month_name = month_name.decode('utf-8') if key == period_date_key['month']: context["period"] = (_period[0], month_name) else: context["period"] = (_period[0], month_name, _period[2]) write(save_as, template, context, dates=archive, blog=True) for period in 'year', 'month', 'day': save_as = period_save_as[period] if save_as: key = period_date_key[period] _generate_period_archives(self.dates, key, save_as) def generate_direct_templates(self, write): """Generate direct templates pages""" PAGINATED_TEMPLATES = self.settings['PAGINATED_DIRECT_TEMPLATES'] for template in self.settings['DIRECT_TEMPLATES']: paginated = {} if template in PAGINATED_TEMPLATES: paginated = {'articles': self.articles, 'dates': self.dates} save_as = self.settings.get("%s_SAVE_AS" % template.upper(), '%s.html' % template) if not save_as: continue write(save_as, self.get_template(template), self.context, blog=True, paginated=paginated, page_name=os.path.splitext(save_as)[0]) def generate_tags(self, write): """Generate Tags pages.""" tag_template = self.get_template('tag') for tag, articles in self.tags.items(): articles.sort(key=attrgetter('date'), reverse=True) dates = [article for article in self.dates if article in articles] write(tag.save_as, tag_template, self.context, tag=tag, articles=articles, dates=dates, paginated={'articles': articles, 'dates': dates}, blog=True, page_name=tag.page_name, all_articles=self.articles) def generate_categories(self, write): """Generate category pages.""" category_template = self.get_template('category') for cat, articles in self.categories: articles.sort(key=attrgetter('date'), reverse=True) dates = [article for article in self.dates if article in articles] write(cat.save_as, category_template, self.context, category=cat, articles=articles, dates=dates, paginated={'articles': articles, 'dates': dates}, blog=True, page_name=cat.page_name, all_articles=self.articles) def generate_authors(self, write): """Generate Author pages.""" author_template = self.get_template('author') for aut, articles in self.authors: articles.sort(key=attrgetter('date'), reverse=True) dates = [article for article in self.dates if article in articles] write(aut.save_as, author_template, self.context, author=aut, articles=articles, dates=dates, paginated={'articles': articles, 'dates': dates}, blog=True, page_name=aut.page_name, all_articles=self.articles) def generate_drafts(self, write): """Generate drafts pages.""" for draft in chain(self.drafts_translations, self.drafts): write(draft.save_as, self.get_template(draft.template), self.context, article=draft, category=draft.category, override_output=hasattr(draft, 'override_save_as'), blog=True, all_articles=self.articles) def generate_pages(self, writer): """Generate the pages on the disk""" write = partial(writer.write_file, relative_urls=self.settings['RELATIVE_URLS']) # to minimize the number of relative path stuff modification # in writer, articles pass first self.generate_articles(write) self.generate_period_archives(write) self.generate_direct_templates(write) # and subfolders after that self.generate_tags(write) self.generate_categories(write) self.generate_authors(write) self.generate_drafts(write) def generate_context(self): """Add the articles into the shared context""" all_articles = [] all_drafts = [] for f in self.get_files( self.settings['ARTICLE_PATHS'], exclude=self.settings['ARTICLE_EXCLUDES']): article_or_draft = self.get_cached_data(f, None) if article_or_draft is None: # TODO needs overhaul, maybe nomad for read_file # solution, unified behaviour try: article_or_draft = self.readers.read_file( base_path=self.path, path=f, content_class=Article, context=self.context, preread_signal=signals.article_generator_preread, preread_sender=self, context_signal=signals.article_generator_context, context_sender=self) except Exception as e: logger.error( 'Could not process %s\n%s', f, e, exc_info=self.settings.get('DEBUG', False)) self._add_failed_source_path(f) continue if not is_valid_content(article_or_draft, f): self._add_failed_source_path(f) continue if article_or_draft.status.lower() == "published": all_articles.append(article_or_draft) elif article_or_draft.status.lower() == "draft": article_or_draft = self.readers.read_file( base_path=self.path, path=f, content_class=Draft, context=self.context, preread_signal=signals.article_generator_preread, preread_sender=self, context_signal=signals.article_generator_context, context_sender=self) self.add_source_path(article_or_draft) all_drafts.append(article_or_draft) else: logger.error( "Unknown status '%s' for file %s, skipping it.", article_or_draft.status, f) self._add_failed_source_path(f) continue self.cache_data(f, article_or_draft) self.add_source_path(article_or_draft) self.articles, self.translations = process_translations( all_articles, order_by=self.settings['ARTICLE_ORDER_BY']) self.drafts, self.drafts_translations = \ process_translations(all_drafts) signals.article_generator_pretaxonomy.send(self) for article in self.articles: # only main articles are listed in categories and tags # not translations self.categories[article.category].append(article) if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) for author in getattr(article, 'authors', []): self.authors[author].append(article) self.dates = list(self.articles) self.dates.sort(key=attrgetter('date'), reverse=self.context['NEWEST_FIRST_ARCHIVES']) # and generate the output :) # order the categories per name self.categories = list(self.categories.items()) self.categories.sort( reverse=self.settings['REVERSE_CATEGORY_ORDER']) self.authors = list(self.authors.items()) self.authors.sort() self._update_context(('articles', 'dates', 'tags', 'categories', 'authors', 'related_posts', 'drafts')) self.save_cache() self.readers.save_cache() signals.article_generator_finalized.send(self) def generate_output(self, writer): self.generate_feeds(writer) self.generate_pages(writer) signals.article_writer_finalized.send(self, writer=writer) class PagesGenerator(CachingGenerator): """Generate pages""" def __init__(self, *args, **kwargs): self.pages = [] self.hidden_pages = [] self.hidden_translations = [] super(PagesGenerator, self).__init__(*args, **kwargs) signals.page_generator_init.send(self) def generate_context(self): all_pages = [] hidden_pages = [] for f in self.get_files( self.settings['PAGE_PATHS'], exclude=self.settings['PAGE_EXCLUDES']): page = self.get_cached_data(f, None) if page is None: try: page = self.readers.read_file( base_path=self.path, path=f, content_class=Page, context=self.context, preread_signal=signals.page_generator_preread, preread_sender=self, context_signal=signals.page_generator_context, context_sender=self) except Exception as e: logger.error( 'Could not process %s\n%s', f, e, exc_info=self.settings.get('DEBUG', False)) self._add_failed_source_path(f) continue if not is_valid_content(page, f): self._add_failed_source_path(f) continue if page.status.lower() == "published": all_pages.append(page) elif page.status.lower() == "hidden": hidden_pages.append(page) else: logger.error( "Unknown status '%s' for file %s, skipping it.", page.status, f) self._add_failed_source_path(f) continue self.cache_data(f, page) self.add_source_path(page) self.pages, self.translations = process_translations( all_pages, order_by=self.settings['PAGE_ORDER_BY']) self.hidden_pages, self.hidden_translations = \ process_translations(hidden_pages) self._update_context(('pages', 'hidden_pages')) self.save_cache() self.readers.save_cache() signals.page_generator_finalized.send(self) def generate_output(self, writer): for page in chain(self.translations, self.pages, self.hidden_translations, self.hidden_pages): writer.write_file( page.save_as, self.get_template(page.template), self.context, page=page, relative_urls=self.settings['RELATIVE_URLS'], override_output=hasattr(page, 'override_save_as')) signals.page_writer_finalized.send(self, writer=writer) class StaticGenerator(Generator): """copy static paths (what you want to copy, like images, medias etc. to output""" def __init__(self, *args, **kwargs): super(StaticGenerator, self).__init__(*args, **kwargs) signals.static_generator_init.send(self) def _copy_paths(self, paths, source, destination, output_path, final_path=None): """Copy all the paths from source to destination""" for path in paths: if final_path: copy(os.path.join(source, path), os.path.join(output_path, destination, final_path), self.settings['IGNORE_FILES']) else: copy(os.path.join(source, path), os.path.join(output_path, destination, path), self.settings['IGNORE_FILES']) def generate_context(self): self.staticfiles = [] for f in self.get_files(self.settings['STATIC_PATHS'], exclude=self.settings['STATIC_EXCLUDES'], extensions=False): # skip content source files unless the user explicitly wants them if self.settings['STATIC_EXCLUDE_SOURCES']: if self._is_potential_source_path(f): continue static = self.readers.read_file( base_path=self.path, path=f, content_class=Static, fmt='static', context=self.context, preread_signal=signals.static_generator_preread, preread_sender=self, context_signal=signals.static_generator_context, context_sender=self) self.staticfiles.append(static) self.add_source_path(static) self._update_context(('staticfiles',)) signals.static_generator_finalized.send(self) def generate_output(self, writer): self._copy_paths(self.settings['THEME_STATIC_PATHS'], self.theme, self.settings['THEME_STATIC_DIR'], self.output_path, os.curdir) # copy all Static files for sc in self.context['staticfiles']: source_path = os.path.join(self.path, sc.source_path) save_as = os.path.join(self.output_path, sc.save_as) mkdir_p(os.path.dirname(save_as)) logger.info('Copying %s to %s', sc.source_path, sc.save_as) copy_file_metadata(source_path, save_as) class SourceFileGenerator(Generator): def generate_context(self): self.output_extension = self.settings['OUTPUT_SOURCES_EXTENSION'] def _create_source(self, obj): output_path, _ = os.path.splitext(obj.save_as) dest = os.path.join(self.output_path, output_path + self.output_extension) copy(obj.source_path, dest) def generate_output(self, writer=None): logger.info('Generating source files...') for obj in chain(self.context['articles'], self.context['pages']): self._create_source(obj) for obj_trans in obj.translations: self._create_source(obj_trans)
Rogdham/pelican
pelican/generators.py
Python
agpl-3.0
31,246
#This file is distributed under the terms of the GNU General Public license. #Copyright (C) 2005-2006 Al Riddoch (See the file COPYING for details). from atlas import * from physics import * from physics import Quaternion from physics import Vector3D from random import * import server class Combat(server.Task): """A very simple combat system example.""" def attack_operation(self, op): """ The attack op is FROM the the character that initiated combat which we term the attacker, TO the character that is attacker which we term the defender. We store the IDs of both. """ # Check if the attacked characters stamina is too low for combat if self.character.stamina < 0.1: # print "Aborting defender stamina low" self.irrelevant() return assert(op.from_ != op.to) if op.to != self.character.id: self.oponent = op.to # print "Attack operation is not to this character" # We have initiative else: self.oponent = op.from_ # print "Attack operation is to this character" self.surprise = True # We do not have initiative # Attach this task to the attacker. Its already implicitly attached # to the defender who owns this task. a=server.world.get_object(self.oponent) # Check if the attacking characters stamina is too low for combat if not a or a.stamina < 0.1: self.irrelevant() return # a.set_task(self.cppthing) self.square_range = 25 def tick_operation(self, op): """ This method is called repeatedly, each time a combat turn occurs. In this example the interval is fixed, but it can be varied. self.attacker is the ID of the character that initiated the combat self.defender is the ID of the character that was initially attacked The self.attack flag is used to alternate the attack from one combatant to the other. """ # if self.count() < 2: # print "Someone has dropped out" # self.irrelevant() # return assert(self.character.id == op.to) if self.character.stamina <= 0: # print "I am exhausted" self.irrelevant() return attacker = self.character if not attacker: sys.stderr.write("Attacker owning combat task destroyed, but task still running") self.irrelevant() return if attacker.stamina <= 0: # print "Attacker exhausted" self.irrelevant() return defender = server.world.get_object(self.oponent) if not defender: # print "No defender" self.irrelevant() return if hasattr(self, 'surprise') and self.surprise: # print 'Surprised!' self.surprise = False return self.next_tick(0.75 + uniform(0,0.25)) if square_distance(self.character.location, defender.location) > self.square_range: return self.next_tick(1.75 + uniform(0,0.25)) a=self.character.id d=self.oponent # A very simple formula is used to determine the damage done damage = (attacker.statistics.attack / defender.statistics.defence) / uniform(2,10) # Damage is counted against stamina, to ensure combat is non lethal, # and make recovery easier. stamina=defender.stamina-damage if stamina<0: stamina=0 set_arg=Entity(self.oponent, stamina=stamina) # We send 3 operations to indicate what is going on. The imginary ops # provide emotes for the actions. The sight(attack) operation # indicates that a singleshot animation of attacking should be # triggered on the attacker. attacker.send_world(Operation("imaginary", Entity(description="hits for massive damage."), to=attacker)) attacker.send_world(Operation("sight", Operation("attack", to=d, from_=a))) defender.send_world(Operation("imaginary", Entity(description="defends skillfully."), to=defender)) # If the defenders stamina has reached zero, combat is over, and emotes # are sent to indicate this. if stamina <= 0: set_arg.status = defender.status - 0.1 defender.send_world(Operation("imaginary", Entity(description="has been defeated"), to=defender)) defender.send_world(Operation("sight", Operation("collapse", from_=d))) attacker.send_world(Operation("imaginary", Entity(description="is victorious"), to=attacker)) self.irrelevant() res=Oplist() # This set op makes the change to defenders stamina, and a small health # change if they have been defeated res.append(Operation("set", set_arg, to=defender)) # Turn the attacker to face the defender. This has to go through # the mind2body interface, so it does not interrupt what the # the character is doing. faceop=self.face(defender) if faceop: faceop=attacker.mind2body(faceop) if faceop: res.append(faceop) # Don't return the following tick op if this task is now complete if self.obsolete(): return res # Schedule a new tick op res.append(self.next_tick(1.75 + uniform(0,0.25))) return res def face(self, other): """ Turn to face that another character, ensuring that we are facing the character we are hitting """ vector = distance_to(self.character.location, other.location) vector.z = 0 if vector.square_mag() < 0.1: return vector = vector.unit_vector() newloc = Location(self.character.location.parent) newloc.orientation = Quaternion(Vector3D(1,0,0), vector) return Operation("move", Entity(self.character.id, location=newloc))
alriddoch/cyphesis
rulesets/mason/world/tasks/Combat.py
Python
gpl-2.0
6,055
""" A script to create some dummy users """ from django.core.management.base import BaseCommand from student.models import CourseEnrollment from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from opaque_keys.edx.locations import SlashSeparatedCourseKey from student.views import _do_create_account, get_random_post_override def create(num, course_key): """Create num users, enrolling them in course_key if it's not None""" for idx in range(num): (user, user_profile, __) = _do_create_account(get_random_post_override()) if course_key is not None: CourseEnrollment.enroll(user, course_key) class Command(BaseCommand): help = """Create N new users, with random parameters. Usage: create_random_users.py N [course_id_to_enroll_in]. Examples: create_random_users.py 1 create_random_users.py 10 MITx/6.002x/2012_Fall create_random_users.py 100 HarvardX/CS50x/2012 """ def handle(self, *args, **options): if len(args) < 1 or len(args) > 2: print Command.help return num = int(args[0]) if len(args) == 2: try: course_key = CourseKey.from_string(args[1]) except InvalidKeyError: course_key = SlashSeparatedCourseKey.from_deprecated_string(args[1]) else: course_key = None create(num, course_key)
geekaia/edx-platform
common/djangoapps/student/management/commands/create_random_users.py
Python
agpl-3.0
1,412
#!/usr/bin/env python # -*- coding:gbk -*- import sys import re import os import time import string import datetime import getopt import tushare as ts sys.path.append('.') from internal.common_inf import * from internal.dfcf_inf import * from internal.ts_common import * def rt_quotes(dtFrame, source, qt_stage): print(source) for index,row in dtFrame.iterrows(): #print(row) r1_len = len(row[1]) r1 = row[1].decode('gbk') for i in range(10-r1_len): r1 += ' ' if row['state']!='00': line = "%06s %-s -- --" %(row[0], r1) print (line) continue open = row['open'] pre_close = row['p_close'] price = row['price'] high = row['high'] low = row['low'] volume = int(row['volume']) price_f = float(price) pre_close_f = float(pre_close) bidb_f = float(row['bidb']) bidb_s = float(row['bids']) if float(price)==0 or float(high)==0: change = '-' change_l = '-' change_h = '-' change_o = '-' if bidb_f==0 and bidb_s==0: pass elif bidb_f!=0: price_f = bidb_f change = '%02.02f'%( ((price_f-pre_close_f)/pre_close_f)*100 ) elif bidb_s!=0: price_f = bids_f change = '%02.02f'%( ((price_f-pre_close_f)/pre_close_f)*100 ) else: print("Error: Special Case", price, bidb, bids) print(row) else: change = '%02.02f'%( ((price_f-pre_close_f)/pre_close_f)*100 ) change_l = '%02.02f'%( ((float(low)-pre_close_f)/pre_close_f)*100 ) change_h = '%02.02f'%( ((float(high)-pre_close_f)/pre_close_f)*100 ) change_o = '%02.02f'%( ((float(open)-pre_close_f)/pre_close_f)*100 ) str_fmt = "%06s %-s %6.2f(%6s%%) %8s(%6s) %8s(%6s)" line = str_fmt %(row[0], r1, price_f, change, low, change_l, high, change_h) print(line) def index_follow_ud(head, index_ud): if index_ud!='': obj = index_ud.split('|') up = obj[0] ping = obj[1] down = obj[2] print("%s %4s %4s %4s"%(head, up, ping, down)) else: print(head) def index_info(df, show_idx, qt_index): if df is None: return sh_info = '' sz_info = '' if qt_index is not None: qtObj = re.match(r'"(.*?)","(.*)"', qt_index) if qtObj is None: print("Invalid qt_index", qt_index) else: index_dt = qtObj.group(1) #print (index_dt) itemObj = index_dt.split(',') sh_info = itemObj[6] sz_info = itemObj[7] for index,row in df.iterrows(): if row[0] not in show_idx: continue open = float(row['open']) close = float(row['close']) preclose = float(row['preclose']) if row['code'] == '000001': head = "%8.2f(%6s)"%(close, row[2]) index_follow_ud(head, sh_info) elif row['code'] == '399001': head = "%8.2f(%6s)"%(close, row[2]) index_follow_ud(head, sz_info) else: print("%8.2f(%6s)"%(close, row[2])) def read_def(data_path, stockCode, stockCode_sn): file = open(data_path, 'r') if file is None: print("Error open file", data_path) return if '_self_define' in data_path: flag=0 lines = file.readlines(10000) for line in lines: line=line.strip() if line=='STK': flag=1 continue elif flag==1 and line=='END': break if flag==0: continue code=line.strip() if len(code)!=6: continue; if not code.isdigit(): continue; stockCode.append(code) ncode = sina_code(code) stockCode_sn.append(ncode) else: line = file.readline() while line: if len(line)>=6: code = line[0:6] if code.isdigit(): stockCode.append(code) ncode = sina_code(code) stockCode_sn.append(ncode) line = file.readline() file.close() def extract_code(file, key_title, index, dict): list = [] stockList = [] dayList = [] line = file.readline() max_val = 0 while line: if len(line)<=3: if index>2: if int(max_val)==0: return #print (stockList) #print (dayList) for idx in range(len(dayList)): #print(idx, dayList[idx], max_val) if max_val==dayList[idx]: list.append(stockList[idx]) stockList = list dict[key_title[index]] = stockList return #obj = line.split(' ') #print(obj[0], obj[1], obj[2]) obj = re.match(r' *([\d]+) ([\d]+).* ([-]?\d+\.[\d]+)[ \t]+(\d+) ', line) if obj is None: print("obj is None" + line) else: if int(obj.group(4))>2 and index<=2: stockList.append(obj.group(2)) elif index>=3: #print(key_title[index], obj.group(2), obj.group(4)) stockList.append(obj.group(2)) dayInt = int(obj.group(4)) dayList.append(dayInt) if max_val<dayInt: max_val = dayInt line = file.readline() #end while #Main curdate = '' data_path = "debug/_self_define.txt" exclude = 0 show_flag = 0 stockCode = [] stockCode_sn = [] qt_stage = 0 pre_day = 1 if __name__=="__main__": optlist, args = getopt.getopt(sys.argv[1:], '?d:') for option, value in optlist: if option in ["-d","--preday"]: pre_day=int(value) if pre_day<1: print("pre_day must greater than 0") exit() elif option in ["-?","--???"]: print("Usage:" + os.path.basename(sys.argv[0]) + " [-d pre_day]") exit() tradeList = [] get_pre_trade_date(tradeList, pre_day+3) #print("td list:", tradeList) data_path = "../data/entry/realtime/rt_" + tradeList[pre_day-1] + ".txt" #print(data_path) if not os.path.isfile(data_path): print("No file:" + data_path) exit(0) file = open(data_path, 'r') if file is None: print("Error open file" + data_path) exit() column = [] create_column(column) qt_stage = quotation_st() print(tradeList[pre_day-1] + " Info") p_flag = 0 index = 0 line = file.readline() dict = {} key_title = ['YZZT ', 'ZT ', 'ZTHL ', 'YZDT ', 'DT ', 'DTFT '] while line: if len(line)<=6: line = file.readline() continue if p_flag == 1: for i in range(len(key_title)): if line[:3]==key_title[i][:3]: index = i #print(i, key_title[i]) extract_code(file, key_title, i, dict) break if line[:6] == "TIME: ": p_flag=0 break #print line line = file.readline() continue if line[:6] == "TIME: ": obj = re.match(r'TIME: (.*) (\d+):(\d+)', line) hour = int(obj.group(2)) minute = int(obj.group(3)) if hour>=15 and minute>0: #print (line) p_flag = 1 line = file.readline() for item in key_title: if dict.has_key(item): #print(dict[item]) sn_code = [] for cd in dict[item]: ncode = sina_code(cd) sn_code.append(ncode) rt_list = [] realtime_price(sn_code, rt_list) df = pd.DataFrame(rt_list, columns=column) rt_quotes(df, item, qt_stage) #end for key_title #print(dict)
yudingding6197/fin_script
debug/qiang_ruo_trace.py
Python
gpl-2.0
6,469
import json import operator import os import postgres from fuzzywuzzy import fuzz def search_nhd(name, lat, lon, limit=10): """Search the National Hydrograpy Dataset (stored in postgres)""" # lat and lon must be able to be coerced to floats try: lat = float(lat) lon = float(lon) except(ValueError): return None # lat and lon must be within the -180 to 180 range if not all([-180 < lat < 180, -180 < lon < 180]): return None # We're using postgres.py and the DATABASE_URL environment variable so we # can maximize the reusability of this code db = postgres.Postgres(os.environ.get('DATABASE_URL')) # Query for the nearest objects to the given lat, lon query = \ """SELECT gnis_id, gnis_name, round(ST_Distance_Sphere(ST_Union(wkb_geometry), ST_SetSRID(ST_Point(%(lon)s, %(lat)s), 4269))) as meters, ST_AsGeoJSON(ST_Union(wkb_geometry)) as geojson FROM flowline WHERE gnis_id IN (SELECT gnis_id FROM flowline WHERE st_dwithin(wkb_geometry, ST_SetSRID(ST_Point(%(lon)s, %(lat)s), 4269), 0.025)) GROUP BY gnis_id, gnis_name ORDER BY meters LIMIT %(limit)s; """ query_dict = {'lat': lat, 'lon': lon, 'limit': limit} nearest_rivers = db.all(query, query_dict) # If there's no matches, return None if len(nearest_rivers) == 0: return None # Convert the returned records into a list of dictionaries rivers = [r._asdict() for r in nearest_rivers] for r in rivers: # Change the geojson string into a proper dictionary r['geojson'] = json.loads(r['geojson']) if name and r['gnis_name']: # Calculate name similarity of the search name and river name r['name_similarity'] = fuzz.WRatio(name, r['gnis_name']) return sorted(rivers, key=operator.itemgetter('meters'))[:limit]
JeffPaine/nhd_search
search/utils.py
Python
mit
2,035
import logging from typing import cast, List, Union import numpy from cephlib.common import float2str from cephlib.texttable import Texttable from cephlib.statistic import calc_norm_stat_props, calc_histo_stat_props from .stage import Stage, StepOrder from .test_run_class import TestRun from .result_classes import SuiteConfig from .suits.io.fio import FioTest from .suits.io.fio_job import FioJobParams from .suits.io.fio_hist import get_lat_vals from .data_selectors import get_aggregated from .result_storage import IWallyStorage logger = logging.getLogger("wally") console_report_headers = ["Description", "IOPS ~ Dev", "BW, MiBps", 'Skew/Kurt', 'lat med, ms', 'lat 95, ms'] console_report_align = ['l', 'r', 'r', 'r', 'r', 'r'] def get_console_report_table(suite: SuiteConfig, rstorage: IWallyStorage) -> List[Union[List[str], Texttable.HLINE]]: table: List[Union[List[str], Texttable.HLINE]] = [] prev_params = None for job in sorted(rstorage.iter_job(suite), key=lambda job: job.params): fparams = cast(FioJobParams, job.params) fparams['qd'] = None if prev_params is not None and fparams.char_tpl != prev_params: table.append(Texttable.HLINE) prev_params = fparams.char_tpl bw_ts = get_aggregated(rstorage, suite.storage_id, job.storage_id, metric='bw', trange=job.reliable_info_range_s) props = calc_norm_stat_props(bw_ts) avg_iops = props.average // job.params.params['bsize'] iops_dev = props.deviation // job.params.params['bsize'] lat_ts = get_aggregated(rstorage, suite.storage_id, job.storage_id, metric='lat', trange=job.reliable_info_range_s) bins_edges = numpy.array(get_lat_vals(lat_ts.data.shape[1]), dtype='float32') / 1000 # convert us to ms lat_props = calc_histo_stat_props(lat_ts, bins_edges) table.append([job.params.summary, f"{float2str(avg_iops):>6s} ~ {float2str(iops_dev):>6s}", float2str(props.average / 1024), # Ki -> Mi f"{props.skew:>5.1f}/{props.kurt:>5.1f}", float2str(lat_props.perc_50), float2str(lat_props.perc_95)]) return table class ConsoleReportStage(Stage): priority = StepOrder.REPORT def run(self, ctx: TestRun) -> None: for suite in ctx.rstorage.iter_suite(FioTest.name): table = Texttable(max_width=200) table.set_deco(Texttable.VLINES | Texttable.BORDER | Texttable.HEADER) tbl = ctx.rstorage.get_txt_report(suite) if tbl is None: table.header(console_report_headers) table.set_cols_align(console_report_align) for line in get_console_report_table(suite, ctx.rstorage): table.add_row(line) tbl = table.draw() ctx.rstorage.put_txt_report(suite, tbl) print(tbl)
Mirantis/disk_perf_test_tool
wally/console_report.py
Python
apache-2.0
2,980
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-04-21 11:39 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('collection', '0001_initial'), ] operations = [ migrations.AddField( model_name='missed', name='notes', field=models.TextField(blank=True), ), ]
Rawtechio/oscar
oscar/collection/migrations/0002_missed_notes.py
Python
mit
436
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-07-13 08:44 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('freelancers', '0044_auto_20170712_1521'), ] operations = [ migrations.RemoveField( model_name='project', name='type_of_contract', ), migrations.RemoveField( model_name='socialaccount', name='profile', ), migrations.AddField( model_name='expendedtime', name='user', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), preserve_default=False, ), migrations.AddField( model_name='expense', name='currency', field=models.CharField(choices=[('EUR', 'Euro'), ('USD', 'Dollar')], default='EUR', max_length=3), ), migrations.AddField( model_name='expense', name='user', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), preserve_default=False, ), migrations.AddField( model_name='invoice', name='user', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), preserve_default=False, ), migrations.AddField( model_name='socialaccount', name='user', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), preserve_default=False, ), migrations.AlterField( model_name='category', name='description', field=models.TextField(max_length=200), ), migrations.AlterField( model_name='company', name='description', field=models.TextField(max_length=200, null=True), ), migrations.AlterField( model_name='education', name='description', field=models.TextField(max_length=200, null=True), ), migrations.AlterField( model_name='expendedtime', name='notes', field=models.TextField(max_length=200, null=True), ), migrations.AlterField( model_name='expendedtime', name='start_time', field=models.DateTimeField(help_text='Start time of the task.', null=True), ), migrations.AlterField( model_name='expendedtime', name='stop_time', field=models.DateTimeField(help_text="Stop time of the task. This time needs to be of a higher date then start_time. Setting this fieldmeans the amount of time spent on the task will be added to the current 'time'.", null=True), ), migrations.AlterField( model_name='expendedtime', name='time', field=models.IntegerField(default=0, help_text='Time spent on the task in seconds. Time registered through start and stop time will be automatically added to this field'), ), migrations.AlterField( model_name='expense', name='notes', field=models.TextField(max_length=200, null=True), ), migrations.AlterField( model_name='experience', name='description', field=models.TextField(max_length=200), ), migrations.AlterField( model_name='kindoftask', name='description', field=models.TextField(max_length=200, null=True), ), migrations.AlterField( model_name='profiletype', name='description', field=models.TextField(max_length=200), ), migrations.AlterField( model_name='project', name='description', field=models.TextField(max_length=100), ), migrations.AlterField( model_name='skill', name='description', field=models.TextField(max_length=200), ), migrations.AlterField( model_name='socialaccount', name='web_address', field=models.CharField(max_length=255), ), migrations.AlterField( model_name='typeofcontract', name='description', field=models.TextField(max_length=200), ), ]
TheWebMonks/equipo
app/freelancers/migrations/0045_auto_20170713_0844.py
Python
apache-2.0
4,747
#!/usr/bin/env python # # Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Transport library for ProtoRPC. Contains underlying infrastructure used for communicating RPCs over low level transports such as HTTP. Includes HTTP transport built over urllib2. """ import httplib import logging import sys import urllib2 from . import messages from . import protobuf from . import remote from . import util try: from google.appengine.api import urlfetch except ImportError: urlfetch = None __all__ = [ 'RpcStateError', 'HttpTransport', 'Rpc', 'Transport', ] class RpcStateError(messages.Error): """Raised when trying to put RPC in to an invalid state.""" class Rpc(object): """Represents a client side RPC. An RPC is created by the transport class and is used with a single RPC. While an RPC is still in process, the response is set to None. When it is complete the response will contain the response message. """ def __init__(self, request): """Constructor. Args: request: Request associated with this RPC. """ self.__request = request self.__response = None self.__state = remote.RpcState.RUNNING self.__error_message = None self.__error_name = None @property def request(self): """Request associated with RPC.""" return self.__request @property def response(self): """Response associated with RPC.""" self.wait() self.__check_status() return self.__response @property def state(self): """State associated with RPC.""" return self.__state @property def error_message(self): """Error, if any, associated with RPC.""" self.wait() return self.__error_message @property def error_name(self): """Error name, if any, associated with RPC.""" self.wait() return self.__error_name def wait(self): """Wait for an RPC to finish.""" if self.__state == remote.RpcState.RUNNING: self._wait_impl() def _wait_impl(self): """Implementation for wait().""" raise NotImplementedError() def __check_status(self): error_class = remote.RpcError.from_state(self.__state) if error_class is not None: if error_class is remote.ApplicationError: raise error_class(self.__error_message, self.__error_name) else: raise error_class(self.__error_message) def __set_state(self, state, error_message=None, error_name=None): if self.__state != remote.RpcState.RUNNING: raise RpcStateError( 'RPC must be in RUNNING state to change to %s' % state) if state == remote.RpcState.RUNNING: raise RpcStateError('RPC is already in RUNNING state') self.__state = state self.__error_message = error_message self.__error_name = error_name def set_response(self, response): # TODO: Even more specific type checking. if not isinstance(response, messages.Message): raise TypeError('Expected Message type, received %r' % (response)) self.__response = response self.__set_state(remote.RpcState.OK) def set_status(self, status): status.check_initialized() self.__set_state(status.state, status.error_message, status.error_name) class Transport(object): """Transport base class. Provides basic support for implementing a ProtoRPC transport such as one that can send and receive messages over HTTP. Implementations override _start_rpc. This method receives a RemoteInfo instance and a request Message. The transport is expected to set the rpc response or raise an exception before termination. """ @util.positional(1) def __init__(self, protocol=protobuf): """Constructor. Args: protocol: The protocol implementation. Must implement encode_message and decode_message. Can also be an instance of remote.ProtocolConfig. """ self.__protocol = protocol if isinstance(protocol, remote.ProtocolConfig): self.__protocol = protocol.protocol self.__protocol_config = protocol else: self.__protocol = protocol self.__protocol_config = remote.ProtocolConfig(protocol, 'default') @property def protocol(self): """Protocol associated with this transport.""" return self.__protocol @property def protocol_config(self): """Protocol associated with this transport.""" return self.__protocol_config def send_rpc(self, remote_info, request): """Initiate sending an RPC over the transport. Args: remote_info: RemoteInfo instance describing remote method. request: Request message to send to service. Returns: An Rpc instance intialized with the request.. """ request.check_initialized() rpc = self._start_rpc(remote_info, request) return rpc def _start_rpc(self, remote_info, request): """Start a remote procedure call. Args: remote_info: RemoteInfo instance describing remote method. request: Request message to send to service. Returns: An Rpc instance initialized with the request. """ raise NotImplementedError() class HttpTransport(Transport): """Transport for communicating with HTTP servers.""" class __HttpRequest(object): """Base class for library-specific requests.""" def __init__(self, method_url, transport, encoded_request): """Constructor. Args: method_url: The URL where the method is located. transport: The Transport instance making the request. """ self._method_url = method_url self._transport = transport self._start_request(encoded_request) def _get_rpc_status(self, content_type, content): """Get an RpcStats from content. Args: content_type: Content-type of the provided content. content: Content of the http response. Returns: RpcStatus if found in content. If not, returns None. """ protocol = self._transport.protocol if content_type == protocol.CONTENT_TYPE: try: rpc_status = protocol.decode_message(remote.RpcStatus, content) except Exception, decode_err: logging.warning( 'An error occurred trying to parse status: %s\n%s', str(decode_err), content) return None else: return rpc_status def _start_request(self): raise NotImplementedError() def get_response(self): """Get the encoded response for the request. If an error occurs on the server and the server sends an RpcStatus as the response body, an RpcStatus will be returned as the second element in the response tuple. In cases where there is an error, but no RpcStatus is transmitted, we raise a ServerError with the response content. Returns: Tuple (encoded_response, rpc_status): encoded_response: Encoded message in protocols wire format. rpc_status: RpcStatus if returned by server. Raises: NetworkError if transport has issues communicating with the network. RequestError if transport receives an error constructing the HttpRequest. ServerError if the server responds with an http error code and does not send an encoded RpcStatus as the response content. """ raise NotImplementedError() class __UrlfetchRequest(__HttpRequest): """Request cycle for a remote call using urlfetch.""" __urlfetch_rpc = None def _start_request(self, encoded_request): """Initiate async call.""" self.__urlfetch_rpc = urlfetch.create_rpc() headers = { 'Content-type': self._transport.protocol.CONTENT_TYPE } urlfetch.make_fetch_call(self.__urlfetch_rpc, self._method_url, payload=encoded_request, method='POST', headers=headers) def get_response(self): try: http_response = self.__urlfetch_rpc.get_result() if http_response.status_code >= 400: status = self._get_rpc_status( http_response.headers.get('content-type'), http_response.content) if status: return http_response.content, status error_message = httplib.responses.get(http_response.status_code, 'Unknown Error') return (None, remote.RpcStatus( state=remote.RpcState.SERVER_ERROR, error_message='HTTP Error %d: %s' % ( http_response.status_code, error_message))) except urlfetch.DownloadError, err: raise remote.NetworkError, (str(err), err) except urlfetch.InvalidURLError, err: raise remote.RequestError, 'Invalid URL, received: %s' % ( self.__urlfetch.request.url()) except urlfetch.ResponseTooLargeError: raise remote.NetworkError( 'The response data exceeded the maximum allowed size.') return http_response.content, None class __UrllibRequest(__HttpRequest): """Request cycle for a remote call using Urllib.""" def _start_request(self, encoded_request): """Create the urllib2 request. """ http_request = urllib2.Request(self._method_url, encoded_request) http_request.add_header('Content-type', self._transport.protocol.CONTENT_TYPE) self.__http_request = http_request def get_response(self): try: http_response = urllib2.urlopen(self.__http_request) except urllib2.HTTPError, err: if err.code >= 400: status = self._get_rpc_status(err.hdrs.get('content-type'), err.read()) if status: return err.msg, status # TODO: Map other types of errors to appropriate exceptions. _, _, trace_back = sys.exc_info() return None, remote.RpcStatus(state=remote.RpcState.SERVER_ERROR, error_message='HTTP Error %s: %s' % ( err.code, err.msg)) except urllib2.URLError, err: _, _, trace_back = sys.exc_info() if isinstance(err, basestring): error_message = err else: error_message = err.args[0] return None, remote.RpcStatus(state=remote.RpcState.NETWORK_ERROR, error_message='Network Error: %s' % error_message) return http_response.read(), None @util.positional(2) def __init__(self, service_url, protocol=protobuf): """Constructor. Args: service_url: URL where the service is located. All communication via the transport will go to this URL. protocol: The protocol implementation. Must implement encode_message and decode_message. Can also be an instance of remote.ProtocolConfig. """ super(HttpTransport, self).__init__(protocol=protocol) self.__service_url = service_url if urlfetch: self.__request_type = self.__UrlfetchRequest else: self.__request_type = self.__UrllibRequest def _start_rpc(self, remote_info, request): """Start a remote procedure call. Args: remote_info: A RemoteInfo instance for this RPC. request: The request message for this RPC. Returns: An Rpc instance initialized with a Request. """ method_url = '%s.%s' % (self.__service_url, remote_info.method.func_name) encoded_request = self.protocol.encode_message(request) http_request = self.__request_type(method_url=method_url, transport=self, encoded_request=encoded_request) rpc = Rpc(request) def wait_impl(): """Implementation of _wait for an Rpc.""" encoded_response, status = http_request.get_response() if status: rpc.set_status(status) else: response = self.protocol.decode_message(remote_info.response_type, encoded_response) rpc.set_response(response) rpc._wait_impl = wait_impl return rpc
adviti/melange
thirdparty/google_appengine/lib/protorpc/protorpc/transport.py
Python
apache-2.0
12,773
import brain import data import backtest import time import logging # Windows # datadir = r'C:\Users\craig\Desktop\BOT Project' # keyfile = r'C:\Users\craig\Desktop\BOT Project\kraken.key' # Linux datadir = r'/home/craig/KrakenBot/Data/' keyfile = r'/home/craig/KrakenBot/Key/kraken.key' # Set up logging # logging.basicConfig(filename=datadir + '\\' + 'krakenbot.log',level=logging.DEBUG) # Trading Pairs: # XETH ZEUR # XXBT ZEUR # XLTC ZEUR # Set trading pair, OHLC timeframe and moving avg vals crypto = 'XXBT' fiat = 'ZEUR' timeframe = 240 ma_1 = 7 ma_2 = 16 type = 'EMA' # EMA / SMA # Set up data object and import / refresh data d = data.Data(datadir, keyfile, crypto, fiat, timeframe) d.import_ohlc() d.refresh_ohlc() # Update SMA calcs for imported data d.update_sma(ma_1) d.update_sma(ma_2) # Update EMA calcs for imported data d.update_ema(ma_1) d.update_ema(ma_2) # Loop for x minutes # for mins in range(720): # d.refresh_ohlc() # print('Refreshing OHLC data. Last ID: ' + str(d.ohlc_last_id)) # time.sleep(60) # Set up brain object with data and trade vol b = brain.Brain(d) # Backtest - pass data and brain object parameters bt = backtest.Backtest(d, b) # Monte carlo simulation from min to max MA bt.run_ma_sim(type, 1, 50) # Simulate single MA combo and export simulated trades # bt.ma_sim(type, ma_1, ma_2) # bt.export_trades() # Save data # d.export_ohlc() # d.export_ohlc_csv() # Testing # d.get_cryptobal() # d.get_fiatbal() # print(str(b.check_min_f())) # Random stuff # Get tradable asset pairs # AP = d.k.query_public('AssetPairs') # print(AP)
CeramicDingo/KrakenBot
local_app.py
Python
mit
1,598
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlow estimators for Linear and DNN joined training models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import re import six from tensorflow.contrib import layers from tensorflow.contrib.framework import deprecated from tensorflow.contrib.framework import deprecated_arg_values from tensorflow.contrib.framework.python.ops import variables as contrib_variables from tensorflow.contrib.layers.python.layers import feature_column_ops from tensorflow.contrib.layers.python.layers import optimizers from tensorflow.contrib.learn.python.learn import evaluable from tensorflow.contrib.learn.python.learn import session_run_hook from tensorflow.contrib.learn.python.learn import trainable from tensorflow.contrib.learn.python.learn.estimators import composable_model from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators import head as head_lib from tensorflow.contrib.learn.python.learn.estimators import prediction_key from tensorflow.contrib.learn.python.learn.utils import export from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import logging_ops from tensorflow.python.ops import nn from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope class _DNNLinearCombinedBaseEstimator(estimator.BaseEstimator): """An estimator for TensorFlow Linear and DNN joined training models. Input of `fit`, `train`, and `evaluate` should have following features, otherwise there will be a `KeyError`: if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. for each `column` in `dnn_feature_columns` + `linear_feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `WeightedSparseColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `RealValuedColumn, a feature with `key=column.name` whose `value` is a `Tensor`. """ def __init__(self, # _joint_linear_weights pylint: disable=invalid-name head, model_dir=None, linear_feature_columns=None, linear_optimizer=None, _joint_linear_weights=False, dnn_feature_columns=None, dnn_optimizer=None, dnn_hidden_units=None, dnn_activation_fn=nn.relu, dnn_dropout=None, gradient_clip_norm=None, config=None, feature_engineering_fn=None, default_prediction_key=None, enable_centered_bias=False): """Initializes a _DNNLinearCombinedBaseEstimator instance. Args: head: A _Head object. model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. linear_feature_columns: An iterable containing all the feature columns used by linear part of the model. All items in the set should be instances of classes derived from `FeatureColumn`. linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to the linear part of the model. If `None`, will use a FTRL optimizer. _joint_linear_weights: If True will use a single (possibly partitioned) variable to store all weights for the linear model. More efficient if there are many columns, however requires all columns are sparse and have the 'sum' combiner. dnn_feature_columns: An iterable containing all the feature columns used by deep part of the model. All items in the set should be instances of classes derived from `FeatureColumn`. dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the deep part of the model. If `None`, will use an Adagrad optimizer. dnn_hidden_units: List of hidden units per layer. All layers are fully connected. dnn_activation_fn: Activation function applied to each layer. If `None`, will use `tf.nn.relu`. dnn_dropout: When not None, the probability we will drop out a given coordinate. gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See tf.clip_by_global_norm for more details. config: RunConfig object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. default_prediction_key: Default prediction key to use with metrics. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. Raises: ValueError: If both linear_feature_columns and dnn_features_columns are empty at the same time. """ super(_DNNLinearCombinedBaseEstimator, self).__init__( model_dir=model_dir, config=config) num_ps_replicas = config.num_ps_replicas if config else 0 self._linear_model = composable_model.LinearComposableModel( num_label_columns=head.logits_dimension, optimizer=linear_optimizer, _joint_weights=_joint_linear_weights, gradient_clip_norm=gradient_clip_norm, num_ps_replicas=num_ps_replicas) self._dnn_model = composable_model.DNNComposableModel( num_label_columns=head.logits_dimension, hidden_units=dnn_hidden_units, optimizer=dnn_optimizer, activation_fn=dnn_activation_fn, dropout=dnn_dropout, gradient_clip_norm=gradient_clip_norm, num_ps_replicas=num_ps_replicas) if dnn_hidden_units else None self._linear_feature_columns = linear_feature_columns self._linear_optimizer = linear_optimizer self._dnn_feature_columns = dnn_feature_columns self._dnn_hidden_units = dnn_hidden_units self._head = head self._default_prediction_key = default_prediction_key self._feature_engineering_fn = ( feature_engineering_fn or (lambda features, labels: (features, labels))) self._enable_centered_bias = enable_centered_bias @property @deprecated("2016-10-30", "This method will be removed after the deprecation date. " "To inspect variables, use get_variable_names() and " "get_variable_value().") def linear_weights_(self): """Returns weights per feature of the linear part.""" return self._linear_model.get_weights(model_dir=self._model_dir) @property @deprecated("2016-10-30", "This method will be removed after the deprecation date. " "To inspect variables, use get_variable_names() and " "get_variable_value().") def linear_bias_(self): """Returns bias of the linear part.""" if not self._enable_centered_bias: return self._linear_model.get_bias(model_dir=self._model_dir) return (self._linear_model.get_bias(model_dir=self._model_dir) + self.get_variable_value("centered_bias_weight")) @property @deprecated("2016-10-30", "This method will be removed after the deprecation date. " "To inspect variables, use get_variable_names() and " "get_variable_value().") def dnn_weights_(self): """Returns weights of deep neural network part.""" return self._dnn_model.get_weights(model_dir=self._model_dir) @property @deprecated("2016-10-30", "This method will be removed after the deprecation date. " "To inspect variables, use get_variable_names() and " "get_variable_value().") def dnn_bias_(self): """Returns bias of deep neural network part.""" if not self._enable_centered_bias: return self._dnn_model.get_bias(model_dir=self._model_dir) return (self._dnn_model.get_bias(model_dir=self._model_dir) + [self._get_centered_bias_value()]) # TODO(zakaria): Remove this function once export. export_estimator is # obsolete. def _create_signature_fn(self): """Returns a function to create export signature of this Estimator.""" # pylint: disable=protected-access return self._head._create_signature_fn() def _get_feature_dict(self, features): if isinstance(features, dict): return features return {"": features} def _get_train_ops(self, features, labels): """See base class.""" features = self._get_feature_dict(features) features, labels = self._feature_engineering_fn(features, labels) logits = self._logits(features, is_training=True) def _make_training_op(training_loss): global_step = contrib_variables.get_global_step() assert global_step linear_train_step = self._linear_model.get_train_step(training_loss) dnn_train_step = (self._dnn_model.get_train_step(training_loss) if self._dnn_model else []) with ops.control_dependencies(linear_train_step + dnn_train_step): with ops.get_default_graph().colocate_with(global_step): return state_ops.assign_add(global_step, 1).op model_fn_ops = self._head.head_ops(features, labels, estimator.ModeKeys.TRAIN, _make_training_op, logits=logits) return model_fn_ops.training_op, model_fn_ops.loss def _get_eval_ops(self, features, labels, metrics=None): """See base class.""" features = self._get_feature_dict(features) features, labels = self._feature_engineering_fn(features, labels) logits = self._logits(features) model_fn_ops = self._head.head_ops(features, labels, estimator.ModeKeys.EVAL, None, logits=logits) all_metrics = model_fn_ops.default_metrics if metrics: for name, metric in six.iteritems(metrics): if not isinstance(name, tuple): # TODO(zakaria): remove once deprecation is finished (b/31229024) all_metrics[(name, self._default_prediction_key)] = metric else: all_metrics[name] = metric # TODO(zakaria): Remove this once we refactor this class to delegate # to estimator. # pylint: disable=protected-access result = estimator._make_metrics_ops(all_metrics, features, labels, model_fn_ops.predictions) return result def _get_predict_ops(self, features): """See base class.""" features = self._get_feature_dict(features) features, _ = self._feature_engineering_fn(features, None) logits = self._logits(features) model_fn_ops = self._head.head_ops(features, None, estimator.ModeKeys.INFER, None, logits=logits) return model_fn_ops.predictions @deprecated( "2016-09-23", "The signature of the input_fn accepted by export is changing to be " "consistent with what's used by tf.Learn Estimator's train/evaluate, " "which makes this function useless. This will be removed after the " "deprecation date.") def _get_feature_ops_from_example(self, examples_batch): column_types = layers.create_feature_spec_for_parsing(( self._get_linear_feature_columns() or []) + ( self._get_dnn_feature_columns() or [])) features = parsing_ops.parse_example(examples_batch, column_types) return features def _get_linear_feature_columns(self): if not self._linear_feature_columns: return None feature_column_ops.check_feature_columns(self._linear_feature_columns) return sorted(set(self._linear_feature_columns), key=lambda x: x.key) def _get_dnn_feature_columns(self): if not self._dnn_feature_columns: return None feature_column_ops.check_feature_columns(self._dnn_feature_columns) return sorted(set(self._dnn_feature_columns), key=lambda x: x.key) def _dnn_logits(self, features, is_training): return self._dnn_model.build_model( features, self._dnn_feature_columns, is_training) def _linear_logits(self, features, is_training): return self._linear_model.build_model( features, self._linear_feature_columns, is_training) def _logits(self, features, is_training=False): linear_feature_columns = self._get_linear_feature_columns() dnn_feature_columns = self._get_dnn_feature_columns() if not (linear_feature_columns or dnn_feature_columns): raise ValueError("Either linear_feature_columns or dnn_feature_columns " "should be defined.") if linear_feature_columns and dnn_feature_columns: logits = (self._linear_logits(features, is_training) + self._dnn_logits(features, is_training)) elif dnn_feature_columns: logits = self._dnn_logits(features, is_training) else: logits = self._linear_logits(features, is_training) return logits _CENTERED_BIAS_WEIGHT = "centered_bias_weight" # The default learning rates are a historical artifact of the initial # implementation, but seem a reasonable choice. _DNN_LEARNING_RATE = 0.05 _LINEAR_LEARNING_RATE = 0.2 def _as_iterable(preds, output): for pred in preds: yield pred[output] def _get_feature_dict(features): if isinstance(features, dict): return features return {"": features} def _get_optimizer(optimizer): if callable(optimizer): return optimizer() else: return optimizer def _linear_learning_rate(num_linear_feature_columns): """Returns the default learning rate of the linear model. The calculation is a historical artifact of this initial implementation, but has proven a reasonable choice. Args: num_linear_feature_columns: The number of feature columns of the linear model. Returns: A float. """ default_learning_rate = 1. / math.sqrt(num_linear_feature_columns) return min(_LINEAR_LEARNING_RATE, default_learning_rate) def _add_hidden_layer_summary(value, tag): logging_ops.scalar_summary("%s:fraction_of_zero_values" % tag, nn.zero_fraction(value)) logging_ops.histogram_summary("%s:activation" % tag, value) def _dnn_linear_combined_model_fn(features, labels, mode, params): """Deep Neural Net and Linear combined model_fn. Args: features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`). labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64` in the range `[0, n_classes)`. mode: Defines whether this is training, evaluation or prediction. See `ModeKeys`. params: A dict of hyperparameters. The following hyperparameters are expected: * head: A `Head` instance. * linear_feature_columns: An iterable containing all the feature columns used by the Linear model. * linear_optimizer: string, `Optimizer` object, or callable that defines the optimizer to use for training the Linear model. * joint_linear_weights: If True a single (possibly partitioned) variable will be used to store the linear model weights. It's faster, but requires all columns are sparse and have the 'sum' combiner. * dnn_feature_columns: An iterable containing all the feature columns used by the DNN model. * dnn_optimizer: string, `Optimizer` object, or callable that defines the optimizer to use for training the DNN model. * dnn_hidden_units: List of hidden units per DNN layer. * dnn_activation_fn: Activation function applied to each DNN layer. If `None`, will use `tf.nn.relu`. * dnn_dropout: When not `None`, the probability we will drop out a given DNN coordinate. * gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. * num_ps_replicas: The number of parameter server replicas. Returns: `estimator.ModelFnOps` Raises: ValueError: If both `linear_feature_columns` and `dnn_features_columns` are empty at the same time. """ head = params["head"] linear_feature_columns = params.get("linear_feature_columns") linear_optimizer = params.get("linear_optimizer") joint_linear_weights = params.get("joint_linear_weights") dnn_feature_columns = params.get("dnn_feature_columns") dnn_optimizer = params.get("dnn_optimizer") dnn_hidden_units = params.get("dnn_hidden_units") dnn_activation_fn = params.get("dnn_activation_fn") dnn_dropout = params.get("dnn_dropout") gradient_clip_norm = params.get("gradient_clip_norm") num_ps_replicas = params["num_ps_replicas"] if not linear_feature_columns and not dnn_feature_columns: raise ValueError( "Either linear_feature_columns or dnn_feature_columns must be defined.") features = _get_feature_dict(features) # Build DNN Logits. dnn_parent_scope = "dnn" if not dnn_feature_columns: dnn_logits = None else: input_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas, min_slice_size=64 << 20)) with variable_scope.variable_scope( dnn_parent_scope + "/input_from_feature_columns", values=features.values(), partitioner=input_layer_partitioner) as scope: net = layers.input_from_feature_columns( columns_to_tensors=features, feature_columns=dnn_feature_columns, weight_collections=[dnn_parent_scope], scope=scope) hidden_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas)) for layer_id, num_hidden_units in enumerate(dnn_hidden_units): with variable_scope.variable_scope( dnn_parent_scope + "/hiddenlayer_%d" % layer_id, values=[net], partitioner=hidden_layer_partitioner) as scope: net = layers.fully_connected( net, num_hidden_units, activation_fn=dnn_activation_fn, variables_collections=[dnn_parent_scope], scope=scope) if dnn_dropout is not None and mode == estimator.ModeKeys.TRAIN: net = layers.dropout( net, keep_prob=(1.0 - dnn_dropout)) # TODO(b/31209633): Consider adding summary before dropout. _add_hidden_layer_summary(net, scope.name) with variable_scope.variable_scope( dnn_parent_scope + "/logits", values=[net], partitioner=hidden_layer_partitioner) as scope: dnn_logits = layers.fully_connected( net, head.logits_dimension, activation_fn=None, variables_collections=[dnn_parent_scope], scope=scope) _add_hidden_layer_summary(dnn_logits, scope.name) # Build Linear logits. linear_parent_scope = "linear" if not linear_feature_columns: linear_logits = None else: linear_partitioner = partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas, min_slice_size=64 << 20) with variable_scope.variable_scope( linear_parent_scope, values=features.values(), partitioner=linear_partitioner) as scope: if joint_linear_weights: linear_logits, _, _ = layers.joint_weighted_sum_from_feature_columns( columns_to_tensors=features, feature_columns=linear_feature_columns, num_outputs=head.logits_dimension, weight_collections=[linear_parent_scope], scope=scope) else: linear_logits, _, _ = layers.weighted_sum_from_feature_columns( columns_to_tensors=features, feature_columns=linear_feature_columns, num_outputs=head.logits_dimension, weight_collections=[linear_parent_scope], scope=scope) # Combine logits and build full model. if dnn_logits is not None and linear_logits is not None: logits = dnn_logits + linear_logits elif dnn_logits is not None: logits = dnn_logits else: logits = linear_logits def _make_training_op(training_loss): """Training op for the DNN linear combined model.""" train_ops = [] if dnn_logits is not None: train_ops.append( optimizers.optimize_loss( loss=training_loss, global_step=contrib_variables.get_global_step(), learning_rate=_DNN_LEARNING_RATE, optimizer=_get_optimizer(dnn_optimizer), clip_gradients=gradient_clip_norm, variables=ops.get_collection(dnn_parent_scope), name=dnn_parent_scope, # Empty summaries, because head already logs "loss" summary. summaries=[])) if linear_logits is not None: train_ops.append( optimizers.optimize_loss( loss=training_loss, global_step=contrib_variables.get_global_step(), learning_rate=_linear_learning_rate(len(linear_feature_columns)), optimizer=_get_optimizer(linear_optimizer), clip_gradients=gradient_clip_norm, variables=ops.get_collection(linear_parent_scope), name=linear_parent_scope, # Empty summaries, because head already logs "loss" summary. summaries=[])) return control_flow_ops.group(*train_ops) return head.head_ops( features, labels, mode, _make_training_op, logits=logits) class DNNLinearCombinedClassifier(evaluable.Evaluable, trainable.Trainable): """A classifier for TensorFlow Linear and DNN joined training models. Example: ```python education = sparse_column_with_hash_bucket(column_name="education", hash_bucket_size=1000) occupation = sparse_column_with_hash_bucket(column_name="occupation", hash_bucket_size=1000) education_x_occupation = crossed_column(columns=[education, occupation], hash_bucket_size=10000) education_emb = embedding_column(sparse_id_column=education, dimension=16, combiner="sum") occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16, combiner="sum") estimator = DNNLinearCombinedClassifier( # common settings n_classes=n_classes, weight_column_name=weight_column_name, # wide settings linear_feature_columns=[education_x_occupation], linear_optimizer=tf.train.FtrlOptimizer(...), # deep settings dnn_feature_columns=[education_emb, occupation_emb], dnn_hidden_units=[1000, 500, 100], dnn_optimizer=tf.train.AdagradOptimizer(...)) # Input builders def input_fn_train: # returns x, y ... def input_fn_eval: # returns x, y ... estimator.fit(input_fn=input_fn_train) estimator.evaluate(input_fn=input_fn_eval) estimator.predict(x=x) ``` Input of `fit` and `evaluate` should have following features, otherwise there will be a `KeyError`: if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. for each `column` in `dnn_feature_columns` + `linear_feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `WeightedSparseColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `RealValuedColumn, a feature with `key=column.name` whose `value` is a `Tensor`. """ def __init__(self, # _joint_linear_weights pylint: disable=invalid-name model_dir=None, n_classes=2, weight_column_name=None, linear_feature_columns=None, linear_optimizer=None, _joint_linear_weights=False, dnn_feature_columns=None, dnn_optimizer=None, dnn_hidden_units=None, dnn_activation_fn=nn.relu, dnn_dropout=None, gradient_clip_norm=None, enable_centered_bias=False, config=None, feature_engineering_fn=None): """Constructs a DNNLinearCombinedClassifier instance. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. n_classes: number of label classes. Default is binary classification. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. linear_feature_columns: An iterable containing all the feature columns used by linear part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to the linear part of the model. If `None`, will use a FTRL optimizer. _joint_linear_weights: If True a single (possibly partitioned) variable will be used to store the linear model weights. It's faster, but requires all columns are sparse and have the 'sum' combiner. dnn_feature_columns: An iterable containing all the feature columns used by deep part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the deep part of the model. If `None`, will use an Adagrad optimizer. dnn_hidden_units: List of hidden units per layer. All layers are fully connected. dnn_activation_fn: Activation function applied to each layer. If `None`, will use `tf.nn.relu`. dnn_dropout: When not None, the probability we will drop out a given coordinate. gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See tf.clip_by_global_norm for more details. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. config: RunConfig object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. Raises: ValueError: If `n_classes` < 2. ValueError: If both `linear_feature_columns` and `dnn_features_columns` are empty at the same time. """ if n_classes < 2: raise ValueError("n_classes should be greater than 1. Given: {}".format( n_classes)) self._linear_optimizer = linear_optimizer or "Ftrl" linear_feature_columns = linear_feature_columns or [] dnn_feature_columns = dnn_feature_columns or [] self._feature_columns = linear_feature_columns + dnn_feature_columns if not self._feature_columns: raise ValueError("Either linear_feature_columns or dnn_feature_columns " "must be defined.") self._dnn_hidden_units = dnn_hidden_units self._enable_centered_bias = enable_centered_bias head = head_lib._multi_class_head( # pylint: disable=protected-access n_classes=n_classes, weight_column_name=weight_column_name, enable_centered_bias=enable_centered_bias) self._estimator = estimator.Estimator( model_fn=_dnn_linear_combined_model_fn, model_dir=model_dir, config=config, params={ "head": head, "linear_feature_columns": linear_feature_columns, "linear_optimizer": self._linear_optimizer, "joint_linear_weights": _joint_linear_weights, "dnn_feature_columns": dnn_feature_columns, "dnn_optimizer": dnn_optimizer or "Adagrad", "dnn_hidden_units": dnn_hidden_units, "dnn_activation_fn": dnn_activation_fn, "dnn_dropout": dnn_dropout, "gradient_clip_norm": gradient_clip_norm, "num_ps_replicas": config.num_ps_replicas if config else 0, }, feature_engineering_fn=feature_engineering_fn) def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None, monitors=None, max_steps=None): """See trainable.Trainable.""" # TODO(roumposg): Remove when deprecated monitors are removed. if monitors is not None: deprecated_monitors = [ m for m in monitors if not isinstance(m, session_run_hook.SessionRunHook) ] for monitor in deprecated_monitors: monitor.set_estimator(self) monitor._lock_estimator() # pylint: disable=protected-access result = self._estimator.fit(x=x, y=y, input_fn=input_fn, steps=steps, batch_size=batch_size, monitors=monitors, max_steps=max_steps) if monitors is not None: for monitor in deprecated_monitors: monitor._unlock_estimator() # pylint: disable=protected-access return result def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None, steps=None, metrics=None, name=None): """See evaluable.Evaluable.""" return self._estimator.evaluate( x=x, y=y, input_fn=input_fn, feed_fn=feed_fn, batch_size=batch_size, steps=steps, metrics=metrics, name=name) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True): """Returns predicted classes for given features. Args: x: features. input_fn: Input function. If set, x must be None. batch_size: Override default batch size. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: Numpy array of predicted classes (or an iterable of predicted classes if as_iterable is True). """ key = prediction_key.PredictionKey.CLASSES preds = self._estimator.predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return _as_iterable(preds, output=key) return preds[key].reshape(-1) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict_proba( self, x=None, input_fn=None, batch_size=None, as_iterable=True): """Returns prediction probabilities for given features. Args: x: features. input_fn: Input function. If set, x and y must be None. batch_size: Override default batch size. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: Numpy array of predicted probabilities (or an iterable of predicted probabilities if as_iterable is True). """ key = prediction_key.PredictionKey.PROBABILITIES preds = self._estimator.predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return _as_iterable(preds, output=key) return preds[key] def _get_predict_ops(self, features): """See `Estimator` class.""" # pylint: disable=protected-access return self._estimator._get_predict_ops(features)[ prediction_key.PredictionKey.PROBABILITIES] def get_variable_names(self): """Returns list of all variable names in this model. Returns: List of names. """ return self._estimator.get_variable_names() def get_variable_value(self, name): """Returns value of the variable given by name. Args: name: string, name of the tensor. Returns: `Tensor` object. """ return self._estimator.get_variable_value(name) def export(self, export_dir, input_fn=None, input_feature_key=None, use_deprecated_input_fn=True, signature_fn=None, default_batch_size=1, exports_to_keep=None): """See BasEstimator.export.""" def default_input_fn(unused_estimator, examples): return layers.parse_feature_columns_from_examples( examples, self._feature_columns) self._estimator.export( export_dir=export_dir, input_fn=input_fn or default_input_fn, input_feature_key=input_feature_key, use_deprecated_input_fn=use_deprecated_input_fn, signature_fn=(signature_fn or export.classification_signature_fn_with_prob), prediction_key=prediction_key.PredictionKey.PROBABILITIES, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep) @property def model_dir(self): return self._estimator.model_dir @property @deprecated("2016-10-30", "This method will be removed after the deprecation date. " "To inspect variables, use get_variable_names() and " "get_variable_value().") def dnn_weights_(self): hiddenlayer_weights = [ self.get_variable_value("dnn/hiddenlayer_%d/weights" % i) for i, _ in enumerate(self._dnn_hidden_units) ] logits_weights = [self.get_variable_value("dnn/logits/weights")] return hiddenlayer_weights + logits_weights @property @deprecated("2016-10-30", "This method will be removed after the deprecation date. " "To inspect variables, use get_variable_names() and " "get_variable_value().") def linear_weights_(self): values = {} if isinstance(self._linear_optimizer, str): optimizer_name = self._linear_optimizer else: optimizer_name = self._linear_optimizer.get_name() optimizer_regex = r".*/"+optimizer_name + r"(_\d)?$" for name in self.get_variable_names(): if (name.startswith("linear/") and name != "linear/bias_weight" and name != "linear/learning_rate" and not re.match(optimizer_regex, name)): values[name] = self.get_variable_value(name) if len(values) == 1: return values[list(values.keys())[0]] return values @property @deprecated("2016-10-30", "This method will be removed after the deprecation date. " "To inspect variables, use get_variable_names() and " "get_variable_value().") def dnn_bias_(self): hiddenlayer_bias = [self.get_variable_value("dnn/hiddenlayer_%d/biases" % i) for i, _ in enumerate(self._dnn_hidden_units)] logits_bias = [self.get_variable_value("dnn/logits/biases")] if not self._enable_centered_bias: return hiddenlayer_bias + logits_bias centered_bias = [self.get_variable_value(_CENTERED_BIAS_WEIGHT)] return hiddenlayer_bias + logits_bias + centered_bias @property @deprecated("2016-10-30", "This method will be removed after the deprecation date. " "To inspect variables, use get_variable_names() and " "get_variable_value().") def linear_bias_(self): linear_bias = self.get_variable_value("linear/bias_weight") if not self._enable_centered_bias: return linear_bias centered_bias = [self.get_variable_value(_CENTERED_BIAS_WEIGHT)] return linear_bias + centered_bias @property def config(self): return self._estimator.config class DNNLinearCombinedRegressor(_DNNLinearCombinedBaseEstimator): """A regressor for TensorFlow Linear and DNN joined training models. Example: ```python education = sparse_column_with_hash_bucket(column_name="education", hash_bucket_size=1000) occupation = sparse_column_with_hash_bucket(column_name="occupation", hash_bucket_size=1000) education_x_occupation = crossed_column(columns=[education, occupation], hash_bucket_size=10000) education_emb = embedding_column(sparse_id_column=education, dimension=16, combiner="sum") occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16, combiner="sum") estimator = DNNLinearCombinedRegressor( # common settings weight_column_name=weight_column_name, # wide settings linear_feature_columns=[education_x_occupation], linear_optimizer=tf.train.FtrlOptimizer(...), # deep settings dnn_feature_columns=[education_emb, occupation_emb], dnn_hidden_units=[1000, 500, 100], dnn_optimizer=tf.train.ProximalAdagradOptimizer(...)) # To apply L1 and L2 regularization, you can set optimizers as follows: tf.train.ProximalAdagradOptimizer( learning_rate=0.1, l1_regularization_strength=0.001, l2_regularization_strength=0.001) # It is same for FtrlOptimizer. # Input builders def input_fn_train: # returns x, y ... def input_fn_eval: # returns x, y ... estimator.train(input_fn_train) estimator.evaluate(input_fn_eval) estimator.predict(x) ``` Input of `fit`, `train`, and `evaluate` should have following features, otherwise there will be a `KeyError`: if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. for each `column` in `dnn_feature_columns` + `linear_feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `WeightedSparseColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `RealValuedColumn, a feature with `key=column.name` whose `value` is a `Tensor`. """ def __init__(self, # _joint_linear_weights pylint: disable=invalid-name model_dir=None, weight_column_name=None, linear_feature_columns=None, linear_optimizer=None, _joint_linear_weights=False, dnn_feature_columns=None, dnn_optimizer=None, dnn_hidden_units=None, dnn_activation_fn=nn.relu, dnn_dropout=None, gradient_clip_norm=None, enable_centered_bias=False, label_dimension=1, config=None, feature_engineering_fn=None): """Initializes a DNNLinearCombinedRegressor instance. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. linear_feature_columns: An iterable containing all the feature columns used by linear part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to the linear part of the model. If `None`, will use a FTRL optimizer. _joint_linear_weights: If True a single (possibly partitioned) variable will be used to store the linear model weights. It's faster, but requires that all columns are sparse and have the 'sum' combiner. dnn_feature_columns: An iterable containing all the feature columns used by deep part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the deep part of the model. If `None`, will use an Adagrad optimizer. dnn_hidden_units: List of hidden units per layer. All layers are fully connected. dnn_activation_fn: Activation function applied to each layer. If None, will use `tf.nn.relu`. dnn_dropout: When not None, the probability we will drop out a given coordinate. gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See tf.clip_by_global_norm for more details. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. label_dimension: TODO(zakaria): dimension of the label for multilabels. config: RunConfig object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. Raises: ValueError: If both linear_feature_columns and dnn_features_columns are empty at the same time. """ head = head_lib._regression_head( # pylint: disable=protected-access weight_column_name=weight_column_name, label_dimension=label_dimension, enable_centered_bias=enable_centered_bias) super(DNNLinearCombinedRegressor, self).__init__( model_dir=model_dir, linear_feature_columns=linear_feature_columns, linear_optimizer=linear_optimizer, _joint_linear_weights=_joint_linear_weights, dnn_feature_columns=dnn_feature_columns, dnn_optimizer=dnn_optimizer, dnn_hidden_units=dnn_hidden_units, dnn_activation_fn=dnn_activation_fn, dnn_dropout=dnn_dropout, gradient_clip_norm=gradient_clip_norm, head=head, config=config, feature_engineering_fn=feature_engineering_fn, default_prediction_key=prediction_key.PredictionKey.SCORES, enable_centered_bias=enable_centered_bias) def _get_predict_ops(self, features): """See base class.""" return super( DNNLinearCombinedRegressor, self)._get_predict_ops(features)[prediction_key.PredictionKey.SCORES]
tongwang01/tensorflow
tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py
Python
apache-2.0
44,822
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import absolute_import import unittest from builtins import object from apache_beam.metrics.cells import DistributionData from apache_beam.metrics.execution import MetricKey from apache_beam.metrics.execution import MetricsContainer from apache_beam.metrics.execution import MetricsEnvironment from apache_beam.metrics.metric import MetricResults from apache_beam.metrics.metric import Metrics from apache_beam.metrics.metric import MetricsFilter from apache_beam.metrics.metricbase import MetricName from apache_beam.runners.worker import statesampler from apache_beam.utils import counters class NameTest(unittest.TestCase): def test_basic_metric_name(self): name = MetricName('namespace1', 'name1') self.assertEqual(name.namespace, 'namespace1') self.assertEqual(name.name, 'name1') self.assertEqual(name, MetricName('namespace1', 'name1')) key = MetricKey('step1', name) self.assertEqual(key.step, 'step1') self.assertEqual(key.metric.namespace, 'namespace1') self.assertEqual(key.metric.name, 'name1') self.assertEqual(key, MetricKey('step1', MetricName('namespace1', 'name1'))) class MetricResultsTest(unittest.TestCase): def test_metric_filter_namespace_matching(self): filter = MetricsFilter().with_namespace('ns1') name = MetricName('ns1', 'name1') key = MetricKey('step1', name) self.assertTrue(MetricResults.matches(filter, key)) def test_metric_filter_name_matching(self): filter = MetricsFilter().with_name('name1').with_namespace('ns1') name = MetricName('ns1', 'name1') key = MetricKey('step1', name) self.assertTrue(MetricResults.matches(filter, key)) filter = MetricsFilter().with_name('name1') name = MetricName('ns1', 'name1') key = MetricKey('step1', name) self.assertTrue(MetricResults.matches(filter, key)) def test_metric_filter_step_matching(self): filter = MetricsFilter().with_step('Top1/Outer1/Inner1') name = MetricName('ns1', 'name1') key = MetricKey('Top1/Outer1/Inner1', name) self.assertTrue(MetricResults.matches(filter, key)) filter = MetricsFilter().with_step('step1') name = MetricName('ns1', 'name1') key = MetricKey('step1', name) self.assertTrue(MetricResults.matches(filter, key)) filter = MetricsFilter().with_step('Top1/Outer1') name = MetricName('ns1', 'name1') key = MetricKey('Top1/Outer1/Inner1', name) self.assertTrue(MetricResults.matches(filter, key)) filter = MetricsFilter().with_step('Top1/Inner1') name = MetricName('ns1', 'name1') key = MetricKey('Top1/Outer1/Inner1', name) self.assertFalse(MetricResults.matches(filter, key)) class MetricsTest(unittest.TestCase): def test_get_namespace_class(self): class MyClass(object): pass self.assertEqual('{}.{}'.format(MyClass.__module__, MyClass.__name__), Metrics.get_namespace(MyClass)) def test_get_namespace_string(self): namespace = 'MyNamespace' self.assertEqual(namespace, Metrics.get_namespace(namespace)) def test_get_namespace_error(self): with self.assertRaises(ValueError): Metrics.get_namespace(object()) def test_counter_empty_name(self): with self.assertRaises(ValueError): Metrics.counter("namespace", "") def test_counter_empty_namespace(self): with self.assertRaises(ValueError): Metrics.counter("", "names") def test_distribution_empty_name(self): with self.assertRaises(ValueError): Metrics.distribution("namespace", "") def test_distribution_empty_namespace(self): with self.assertRaises(ValueError): Metrics.distribution("", "names") def test_create_counter_distribution(self): sampler = statesampler.StateSampler('', counters.CounterFactory()) statesampler.set_current_tracker(sampler) state1 = sampler.scoped_state('mystep', 'myState', metrics_container=MetricsContainer('mystep')) sampler.start() with state1: counter_ns = 'aCounterNamespace' distro_ns = 'aDistributionNamespace' name = 'a_name' counter = Metrics.counter(counter_ns, name) distro = Metrics.distribution(distro_ns, name) counter.inc(10) counter.dec(3) distro.update(10) distro.update(2) self.assertTrue(isinstance(counter, Metrics.DelegatingCounter)) self.assertTrue(isinstance(distro, Metrics.DelegatingDistribution)) del distro del counter container = MetricsEnvironment.current_container() self.assertEqual( container.counters[MetricName(counter_ns, name)].get_cumulative(), 7) self.assertEqual( container.distributions[MetricName(distro_ns, name)].get_cumulative(), DistributionData(12, 2, 2, 10)) sampler.stop() if __name__ == '__main__': unittest.main()
mxm/incubator-beam
sdks/python/apache_beam/metrics/metric_test.py
Python
apache-2.0
5,639
from django.conf.urls import patterns, include, url from django.contrib import admin admin.autodiscover() from app import settings from home import views urlpatterns = patterns('', url(r'^$', views.login, name='login'), url(r'^home$', views.home, name='home'), url(r'^profile', views.profile, name='profile'), url(r'^tweet', views.tweet, name='tweet'), url(r'^query', views.query, name='query'), url(r'^media/photo', views.media_photo, name='media_photo'), url(r'^media/video', views.media_video, name='media_video'), url(r'^media/inspector', views.media_inspector, name='media_inspector'), url(r'^media', views.media_video, name='media_video'), url(r'^logout$', views.logout, name='logout'), url('', include('social_django.urls', namespace='social')), url(r'^admin/', admin.site.urls), ) urlpatterns += patterns('', (r'^static/(.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}), )
twitterdev/django-rest-apis
app/urls.py
Python
mit
963
from collections import defaultdict import nltk from src.Documents import Document from src.Tokenizers.Tokenizer import Tokenizer class BigramTokenizer(Tokenizer): @property def name(self): return 'BigramTokenizer' def __init__(self): self.tokenizer = nltk.tokenize.TreebankWordTokenizer() def __call__(self, doc): return self.tokenize_content(doc) def tokenize_content(self, content): tokens = self.tokenizer.tokenize(content) lowered_tokens = map(lambda t: t.lower(), tokens) bigrams = nltk.bigrams(lowered_tokens) return list(bigrams) def tokenize(self, document: Document): bow = defaultdict(float) content = document.getContent() bigrams = self.tokenize_content(content) for bigram in bigrams: bow[bigram] += 1.0 return bow
xyryx/SentimentAnalysis
src/Tokenizers/BigramTokenizer.py
Python
mit
868
#! /usr/bin/env python """ File: fit_pendulum_data.py Copyright (c) 2016 Austin Ayers License: MIT Course: PHYS227 Assignment: 5.18 Date: Feb 11, 2016 Email: ayers111@mail.chapman.edu Name: Austin Ayers Description: Implementing a prime sieve """ import sympy as sp import matplotlib.pyplot as plt from numpy import polyfit, poly1d def part_a(): """ Part a, plots L versus T using circles as the data points """ f = open('pendulum.dat', 'r') x = [] y = [] for i,line in enumerate(f): if(i < 1): continue split = line.split() x.append(float(split[0])) y.append(float(split[1])) plt.plot(x,y, 'ro') return x,y def part_b(): """ Attempts to fit polynomials of different degrees to part (a) """ graph = part_a() coeff1 = polyfit(graph[0], graph[1], 1) coeff2 = polyfit(graph[0], graph[1], 2) coeff3 = polyfit(graph[0], graph[1], 3) p1 = poly1d(coeff1) p2 = poly1d(coeff2) p3 = poly1d(coeff3) y1_fitted = p1(graph[0]) y2_fitted = p2(graph[0]) y3_fitted = p3(graph[0]) plt.plot(graph[0], graph[1], 'ro', graph[0], y1_fitted, 'b-') plt.plot(graph[0], graph[1], 'ro', graph[0], y2_fitted, 'b-') plt.plot(graph[0], graph[1], 'ro', graph[0], y3_fitted, 'b-')
chapman-phys227-2016s/hw-2-C0deMonkee
fit_pendulum_data.py
Python
mit
1,298
# rndpic - an App Engine app to display random pictures from Picasa Web. # Copyright (C) 2011 Patrick Moor <patrick@moor.ws> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import webapp2 from django.utils import simplejson class JsonHandler(webapp2.RequestHandler): def get(self, user_name): self.response.headers["Content-Type"] = "application/json" self.response.headers["Access-Control-Allow-Origin"] = "*" size = self.request.get("size", "200u") album_id = int(self.request.get("album_id", 0)) picture = webapp2.get_app().registry["picker"].Pick( user_name, size, album_id) if picture: content = { "height": picture.GetHeight(), "width": picture.GetWidth(), "thumbnail_url": picture.GetThumbnailUrl(), "target_url": picture.GetLink(), } simplejson.dump(content, self.response.out) else: self.response.set_status(204)
pmoor/rndpic
json_handler.py
Python
gpl-3.0
1,508
from pyb import RTC import os from utils.airpy_config_utils import load_config_file AIRPY_SYSTEM = 4 AIRPY_ERROR = 3 AIRPY_WARNING = 2 AIRPY_DEBUG = 1 AIRPY_INFO = 0 LOGGER_GLOBAL_REF = 0 class airpy_logger: def __init__(self, priority, caching_enabled=False): """ Logger entry point :param priority: logger default priority :param caching_enabled: caching toggle :return: """ self.__LOGGER_PRIORITY = priority self.__CACHING_ENABLED = caching_enabled self.__CACHE = [] self.__CACHE_MAX_LENGTH = 5 self.__RTC = RTC() datetime = self.__RTC.datetime() app_config = {"serial_only": False, "fs_root": ""} try: app_config = load_config_file("app_config.json") os.mkdir("log") except: pass fs_root = app_config['fs_root'] self.__AIR_PY_LOG = ("%slog/airpy-airpy-log-D%02d-H%02d-m%02d.txt" % (fs_root, datetime[2], datetime[4], datetime[5])) self.__SYSTEM_LOG = ("%slog/airpy-system-log-D%02d-H%02d-m%02d.txt" % (fs_root, datetime[2], datetime[4], datetime[5])) self.__MISSION_LOG = ("%slog/airpy-mission-log-D%02d-H%02d-m%02d.txt" % (fs_root, datetime[2], datetime[4], datetime[5])) self.__FILESYSTEM_AVAILABLE = app_config["serial_only"] self.__IN_MISSION = False info("AirPy logger. File sytem available {}".format(app_config['serial_only'])) def __validate_priority(self, priority): """ Determines if log can be printed :param priority: priority to be matched :return: """ return priority >= self.__LOGGER_PRIORITY def __write_on_sd(self, priority, text): """ Writes on sd :param priority: selected log priority :param text: text to be written :return: """ try: if self.__IN_MISSION: self.mission_log.write("%s\n" % text) else: if priority == AIRPY_SYSTEM and self.__FILESYSTEM_AVAILABLE: system_log = open(self.__SYSTEM_LOG, "a") system_log.write("%s\n" % text) system_log.close() print("Serial log:{}".format(text)) if self.__FILESYSTEM_AVAILABLE: self.__cache_log(text) except OSError: pass def __cache_log(self, text): """ Caches log :param text: text to be cached :return: """ if len(self.__CACHE) == self.__CACHE_MAX_LENGTH: self.flush() self.__CACHE.append(text) if not self.__CACHING_ENABLED: self.flush() def flush(self): """ Flushes the content of cache to file log """ air_py_log = open(self.__AIR_PY_LOG, "a") for text in self.__CACHE: air_py_log.write("%s\n" % text) air_py_log.close() self.__CACHE = [] def airpy_log(self, priority, text): """ Final gateway before writing the log :param priority: text priority :param text: text to be written :return: """ if not self.__validate_priority(priority): return datetime = self.__RTC.datetime() time = ("%02d-%02d-%02d:%03d" % (datetime[4], datetime[5], datetime[6], datetime[7])) log_line = ("%s\t%s" % (time, text)) self.__write_on_sd(priority, log_line) def set_logger_priority(self, priority): """ Sets logging priority :param priority: new priority value :return: """ self.__LOGGER_PRIORITY = priority def set_mission_status(self, enabled): """ Changes mission status :param enabled: true means in mission """ self.__IN_MISSION = enabled if enabled: self.mission_log = open(self.__MISSION_LOG, "a") else: self.mission_log.close() def init(priority, caching_enabled=False): """ Initialize logger :param priority: priority to assign to airpy logger :param caching_enabled: caching toggle :return: """ global LOGGER_GLOBAL_REF if not LOGGER_GLOBAL_REF: LOGGER_GLOBAL_REF = airpy_logger(priority, caching_enabled) def system(text): """ Prints text with system priority :param text: text that will e printed :return: """ global LOGGER_GLOBAL_REF if LOGGER_GLOBAL_REF: LOGGER_GLOBAL_REF.airpy_log(AIRPY_SYSTEM, "SYSTEM\t{}".format(text)) def error(text): """ Prints text with error priority :param text: text that will e printed :return: """ global LOGGER_GLOBAL_REF if LOGGER_GLOBAL_REF: LOGGER_GLOBAL_REF.airpy_log(AIRPY_ERROR, "ERROR\t{}".format(text)) def warning(text): """ Prints text with warning priority :param text: text that will e printed :return: """ global LOGGER_GLOBAL_REF if LOGGER_GLOBAL_REF: LOGGER_GLOBAL_REF.airpy_log(AIRPY_WARNING, "WARNING\t{}".format(text)) def debug(text): """ Prints text with debug priority :param text: text that will e printed :return: """ global LOGGER_GLOBAL_REF if LOGGER_GLOBAL_REF: LOGGER_GLOBAL_REF.airpy_log(AIRPY_DEBUG, "DEBUG\t{}".format(text)) def info(text): """ Prints text with info priority :param text: text that will e printed :return: """ global LOGGER_GLOBAL_REF if LOGGER_GLOBAL_REF: LOGGER_GLOBAL_REF.airpy_log(AIRPY_INFO, "INFO\t{}".format(text)) def mission_logging_control(enable): """ Start/stop mission logging :param enable: true when mission starts """ global LOGGER_GLOBAL_REF if LOGGER_GLOBAL_REF: LOGGER_GLOBAL_REF.set_mission_status(enable)
Sokrates80/air-py
utils/airpy_logger.py
Python
mit
5,862
# This file is part of Py6S. # # Copyright 2012 Robin Wilson and contributors listed in the CONTRIBUTORS file. # # Py6S is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Py6S is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Py6S. If not, see <http://www.gnu.org/licenses/>. import sys from collections import defaultdict # from sixs_exceptions import * from Py6S.sixs_exceptions import ParameterError class AeroProfile: """Class representing options for Aerosol Profiles""" NoAerosols = 0 Continental = 1 Maritime = 2 Urban = 3 Desert = 5 BiomassBurning = 6 Stratospheric = 7 @classmethod def PredefinedType(cls, type): """Set 6S to use a predefined aerosol type, one of the constants defined in this class. Arguments: * ``type`` -- the predefined aerosol type, one of the constants defined in this class Example usage:: s.aero_profile = AeroProfile.PredefinedType(AeroProfile.Urban) """ return "%d" % type @classmethod def User(cls, **kwargs): """Set 6S to use a user-defined aerosol profile based on proportions of standard aerosol components. The profile is set as a mixture of pre-defined components, each given as an optional keyword. Not all keywords need to be given, but the values for the keywords given must sum to 1, or a ParameterError will be raised. Optional keywords: * ``dust`` -- The proportion of dust-like aerosols * ``water`` -- The proportion of water-like aerosols * ``oceanic`` -- The proportion of oceanic aerosols * ``soot`` -- The proportion of soot-like aerosols Example usage:: s.aero_profile = AeroProfile.User(dust=0.3, oceanic=0.7) s.aero_profile = AeroProfile.User(soot = 0.1, water = 0.3, oceanic = 0.05, dust = 0.55) """ d = defaultdict(lambda: 0, kwargs) dust = d["dust"] water = d["water"] oceanic = d["oceanic"] soot = d["soot"] if ((dust + water + oceanic + soot) - 1) > 0.01: raise ParameterError("Aerosol Profile", "User aerosol components don't sum to 1.0") return "4 (User's Components)\n%f, %f, %f, %f" % (dust, water, oceanic, soot) @classmethod def MultimodalLogNormalDistribution(cls, rmin, rmax): """Set 6S to use a Multimodal Log-Normal distribution. Arguments: * ``rmin`` -- The minimum aerosol radius * ``rmax`` -- The maximum aerosol radius This returns an :class:`.AerosolDistribution` object. Components can then be added to this distribution using the :meth:`.add_component` method of the returned class. Example usage:: s.aero_profile = AeroProfile.MultimodalLogNormalDistribution(0.1, 0.3) s.aero_profile.add_component(...) """ return cls.AerosolDistribution(rmin, rmax, 8) @classmethod def ModifiedGammaDistribution(cls, rmin, rmax): """Set 6S to use a Modified Gamma distribution. Arguments: * ``rmin`` -- The minimum aerosol radius * ``rmax`` -- The maximum aerosol radius This returns an :class:`.AerosolDistribution` object. Components can then be added to this distribution using the :meth:`.add_component` method of the returned class. Example usage:: s.aero_profile = AeroProfile.ModifiedGammaDistribution(0.1, 0.3) s.aero_profile.add_component(...) """ return cls.AerosolDistribution(rmin, rmax, 9) @classmethod def JungePowerLawDistribution(cls, rmin, rmax): """Set 6S to use a Junge Power Law distribution. Arguments: * ``rmin`` -- The minimum aerosol radius * ``rmax`` -- The maximum aerosol radius This returns an :class:`.AerosolDistribution` object. Components can then be added to this distribution using the :meth:`.add_component` method of the returned class. Example usage:: s.aero_profile = AeroProfile.JungePowerLawDistribution(0.1, 0.3) s.aero_profile.add_component(...) """ return cls.AerosolDistribution(rmin, rmax, 10) @classmethod def SunPhotometerDistribution(cls, r, dvdlogr, refr_real, refr_imag): """Set 6S to use an aerosol parameterisation from Sun Photometer measurements. The real and imaginary parts of the refractive indices must be input at the following wavelengths (given in micrometers): 0.350, 0.400, 0.412, 0.443, 0.470, 0.488, 0.515, 0.550, 0.590, 0.633, 0.670, 0.694, 0.760, 0.860, 1.240, 1.536, 1.650, 1.950, 2.250, 3.750 Arguments: * ``r`` -- A list of radius measurements from a sun photometer (microns) * ``dvdlogr`` -- A list of dV/d(logr) measurements from a sun photometer, for the radiuses as above (cm^3/cm^2/micron) * ``refr_real`` -- A list containing the real part of the refractive indices for each of the 20 wavelengths (above). If a single float value is given then the value is treated as constant for all wavelengths. * ``refr_imag`` -- A list containing the imaginary part of the refractive indices for each of the 20 wavelengths (above). If a single float value is given then the value is treated as constant for all wavelengths. """ header = "11 (Sun Photometric Distribution)\n" # Check lengths of r and dvdlorg are the same if len(r) != len(dvdlogr): raise ParameterError("R and dV/d(log r)", "These must be the same length") num = "%d\n" % len(r) ds = "" comp = "" for i in range(len(r)): ds += "%f %f\n" % (r[i], dvdlogr[i]) try: if type(refr_real) is float: refr_real = [refr_real] * 20 elif len(refr_real) != 20: raise ParameterError( "Aerosol Distribution Real Refractive Index", "You must specify the real part of the Refractive Index at 20 wavelengths.", ) except TypeError: raise ParameterError( "Aerosol Distribution Imaginary Refractive Index", "You must specify the imaginary part of the Refractive Index at 20 wavelengths.", ) try: if type(refr_imag) is float: refr_imag = [refr_imag] * 20 elif len(refr_imag) != 20: raise ParameterError( "Aerosol Distribution Imaginary Refractive Index", "You must specify the imaginary part of the Refractive Index at 20 wavelengths.", ) except TypeError: raise ParameterError( "Aerosol Distribution Imaginary Refractive Index", "You must specify the imaginary part of the Refractive Index at 20 wavelengths.", ) real = map(str, refr_real) imag = map(str, refr_imag) if sys.version_info[0] >= 3: real = list(real) imag = list(imag) comp += " ".join(real) + "\n" comp += " ".join(imag) + "\n" return header + num + ds + comp + "0 no results saved" class AerosolDistribution: """Stores data regarding a specific Aerosol Distribution. Used by the following methods: * :meth:`.MultimodalLogNormalDistribution` * :meth:`.ModifiedGammaDistribution` * :meth:`.JungePowerLawDistribution` """ def __init__(self, rmin, rmax, numtype): """Initialise an Aerosol Distribution with various parameters. Should not be called directly - use one of the methods like AeroProfile.MultimodalLogNormalDistribution() instead. Arguments: * ``rmin`` -- The minimum aerosol radius * ``rmax`` -- The maximum aerosol radius * ``numtype`` -- The type of aerosol distribution (eg. 8 for Multimodal Log Normal) """ self.rmin = rmin self.rmax = rmax self.numtype = numtype self.values = [] def add_component(self, rmean, sigma, percentage_density, refr_real, refr_imag): """Adds a component to the aerosol distribution. Wavelength dependent values must be input at the following wavelengths (given in micrometers): 0.350, 0.400, 0.412, 0.443, 0.470, 0.488, 0.515, 0.550, 0.590, 0.633, 0.670, 0.694, 0.760, 0.860, 1.240, 1.536, 1.650, 1.950, 2.250, 3.750 Arguments: * ``rmean`` -- The mean radius of the aerosols * ``sigma`` -- Sigma, as defined by the distribution (Log Normal etc) * ``percentage_density`` -- The percentage density of the aerosol * ``refr_real`` -- A 20-element iterable giving the real part of the refractive indices at the specified wavelengths (see above) * ``refr_imag`` -- A 20-element iterable giving the imaginary part of the refractive indices at the specified wavelengths (see above) """ if len(self.values) >= 4: raise ParameterError( "Aerosol Distribution components", "You can only add a maximum of 4 components", ) if len(refr_real) != 20: raise ParameterError( "Aerosol Distribution Real Refractive Index", "You must specify the real part of the Refractive Index at 20 wavelengths.", ) if len(refr_imag) != 20: raise ParameterError( "Aerosol Distribution Imaginary Refractive Index", "You must specify the imaginary part of the Refractive Index at 20 wavelengths.", ) comp = "%f %f %f\n" % (rmean, sigma, percentage_density) real = map(str, refr_real) imag = map(str, refr_imag) if sys.version_info[0] >= 3: real = list(real) imag = list(imag) comp += " ".join(real) + "\n" comp += " ".join(imag) + "\n" self.values.append(comp) def __str__(self): result = "%d\n%f %f %d\n" % ( self.numtype, self.rmin, self.rmax, len(self.values), ) components = "".join(self.values) return result + components + "0 no results saved" class UserProfile: """Set 6S to use a user-defined aerosol profile, with differing AOTs over the height of the profile. Arguments: * ``atype`` -- Aerosol type to be used for all layers. Must be one of the pre-defined types defined in this class. Methods: * :meth:`.add_layer` -- Adds a layer to the user-defined aerosol profile, with the specified height and aerosol optical thickness. Example usage:: s.aero_profile = AeroProfile.UserProfile(AeroProfile.Maritime) s.aero_profile.add_layer(5, 0.34) # Add a 5km-thick layer with an AOT of 0.34 s.aero_profile.add_layer(10, 0.7) # Add a 10km-thick layer with an AOT of 0.7 s.aero_profile.add_layer(100, 0.01) # Add a 100km-thick layer with an AOT of 0.01 """ def __init__(self, atype): """Initialises the user-defined aerosol profile to a specific aerosol type. Arguments: * ``atype`` -- Aerosol type to be used for all layers. Must be one of the pre-defined types defined in this class. """ self.aerotype = atype self.values = [] def add_layer(self, height, optical_thickness): """Adds a layer to the user-defined profile. Arguments: * ``height`` -- Height of the layer (in km) * ``optical_thickness`` -- Optical thickness of the layer Example usage:: s.aero_profile.add_layer(5, 0.34) # Add a 5km-thick layer with an AOT of 0.34 """ self.values.append((height, optical_thickness)) def __str__(self): res = "-1 Aerosol model (type) and profile\n%d\n" % len(self.values) for val in self.values: res += "%f %f %d\n" % (val[0], val[1], self.aerotype) return res
robintw/Py6S
Py6S/Params/aeroprofile.py
Python
lgpl-3.0
12,953
#!/usr/bin/env python # -*- coding: utf-8 -*- import warnings import pytest from translate.convert import dtd2po, po2dtd, test_convert from translate.misc import wStringIO from translate.storage import dtd, po class TestPO2DTD: def setup_method(self, method): warnings.resetwarnings() def teardown_method(self, method): warnings.resetwarnings() def po2dtd(self, posource, remove_untranslated=False): """helper that converts po source to dtd source without requiring files""" inputfile = wStringIO.StringIO(posource) inputpo = po.pofile(inputfile) convertor = po2dtd.po2dtd(remove_untranslated=remove_untranslated) outputdtd = convertor.convertstore(inputpo) return outputdtd def merge2dtd(self, dtdsource, posource): """helper that merges po translations to dtd source without requiring files""" inputfile = wStringIO.StringIO(posource) inputpo = po.pofile(inputfile) templatefile = wStringIO.StringIO(dtdsource) templatedtd = dtd.dtdfile(templatefile) convertor = po2dtd.redtd(templatedtd) outputdtd = convertor.convertstore(inputpo) return outputdtd def convertdtd(self, posource, dtdtemplate, remove_untranslated=False): """helper to exercise the command line function""" inputfile = wStringIO.StringIO(posource) outputfile = wStringIO.StringIO() templatefile = wStringIO.StringIO(dtdtemplate) assert po2dtd.convertdtd(inputfile, outputfile, templatefile, remove_untranslated=remove_untranslated) return outputfile.getvalue() def roundtripsource(self, dtdsource): """converts dtd source to po and back again, returning the resulting source""" dtdinputfile = wStringIO.StringIO(dtdsource) dtdinputfile2 = wStringIO.StringIO(dtdsource) pooutputfile = wStringIO.StringIO() dtd2po.convertdtd(dtdinputfile, pooutputfile, dtdinputfile2) posource = pooutputfile.getvalue() poinputfile = wStringIO.StringIO(posource) dtdtemplatefile = wStringIO.StringIO(dtdsource) dtdoutputfile = wStringIO.StringIO() po2dtd.convertdtd(poinputfile, dtdoutputfile, dtdtemplatefile) dtdresult = dtdoutputfile.getvalue() print_string = "Original DTD:\n%s\n\nPO version:\n%s\n\n" print_string = print_string + "Output DTD:\n%s\n################" print(print_string % (dtdsource, posource, dtdresult)) return dtdresult def roundtripstring(self, entitystring): """Just takes the contents of a ENTITY definition (with quotes) and does a roundtrip on that""" dtdintro, dtdoutro = '<!ENTITY Test.RoundTrip ', '>\n' dtdsource = dtdintro + entitystring + dtdoutro dtdresult = self.roundtripsource(dtdsource) assert dtdresult.startswith(dtdintro) and dtdresult.endswith(dtdoutro) return dtdresult[len(dtdintro):-len(dtdoutro)] def check_roundtrip(self, dtdsource, dtdcompare=None): """Checks that the round-tripped string is the same as dtdcompare. If no dtdcompare string is provided then the round-tripped string is compared with the original string. The reason why sometimes another string is provided to compare with the resulting string from the roundtrip is that if the original string contains some characters, like " character, or escapes like &quot;, then when the roundtrip is performed those characters or escapes are escaped, rendering a round-tripped string which differs from the original one. """ if not dtdcompare: dtdcompare = dtdsource assert self.roundtripstring(dtdsource) == dtdcompare def test_joinlines(self): """tests that po lines are joined seamlessly (bug 16)""" multilinepo = '''#: pref.menuPath\nmsgid ""\n"<span>Tools &gt; Options</"\n"span>"\nmsgstr ""\n''' dtdfile = self.po2dtd(multilinepo) dtdsource = str(dtdfile) assert "</span>" in dtdsource def test_escapedstr(self): """tests that \n in msgstr is escaped correctly in dtd""" multilinepo = '''#: pref.menuPath\nmsgid "Hello\\nEveryone"\nmsgstr "Good day\\nAll"\n''' dtdfile = self.po2dtd(multilinepo) dtdsource = str(dtdfile) assert "Good day\nAll" in dtdsource def test_missingaccesskey(self): """tests that proper warnings are given if access key is missing""" simplepo = '''#: simple.label #: simple.accesskey msgid "Simple &String" msgstr "Dimpled Ring" ''' simpledtd = '''<!ENTITY simple.label "Simple String"> <!ENTITY simple.accesskey "S">''' warnings.simplefilter("error") assert pytest.raises(Warning, self.merge2dtd, simpledtd, simplepo) def test_accesskeycase(self): """tests that access keys come out with the same case as the original, regardless""" simplepo_template = '''#: simple.label\n#: simple.accesskey\nmsgid "%s"\nmsgstr "%s"\n''' simpledtd_template = '''<!ENTITY simple.label "Simple %s">\n<!ENTITY simple.accesskey "%s">''' possibilities = [ #(en label, en akey, en po, af po, af label, expected af akey) ("Sis", "S", "&Sis", "&Sies", "Sies", "S"), ("Sis", "s", "Si&s", "&Sies", "Sies", "S"), ("Sis", "S", "&Sis", "Sie&s", "Sies", "s"), ("Sis", "s", "Si&s", "Sie&s", "Sies", "s"), # untranslated strings should have the casing of the source ("Sis", "S", "&Sis", "", "Sis", "S"), ("Sis", "s", "Si&s", "", "Sis", "s"), ("Suck", "S", "&Suck", "", "Suck", "S"), ("Suck", "s", "&Suck", "", "Suck", "s"), ] for (en_label, en_akey, po_source, po_target, target_label, target_akey) in possibilities: simplepo = simplepo_template % (po_source, po_target) simpledtd = simpledtd_template % (en_label, en_akey) dtdfile = self.merge2dtd(simpledtd, simplepo) dtdfile.makeindex() accel = dtd.unquotefromdtd(dtdfile.id_index["simple.accesskey"].definition) assert accel == target_akey def test_accesskey_types(self): """tests that we can detect the various styles of accesskey""" simplepo_template = '''#: simple.%s\n#: simple.%s\nmsgid "&File"\nmsgstr "F&aele"\n''' simpledtd_template = '''<!ENTITY simple.%s "File">\n<!ENTITY simple.%s "a">''' for label in ("label", "title"): for accesskey in ("accesskey", "accessKey", "akey"): simplepo = simplepo_template % (label, accesskey) simpledtd = simpledtd_template % (label, accesskey) dtdfile = self.merge2dtd(simpledtd, simplepo) dtdfile.makeindex() assert dtd.unquotefromdtd(dtdfile.id_index["simple.%s" % accesskey].definition) == "a" def test_ampersandfix(self): """tests that invalid ampersands are fixed in the dtd""" simplestring = '''#: simple.string\nmsgid "Simple String"\nmsgstr "Dimpled &Ring"\n''' dtdfile = self.po2dtd(simplestring) dtdsource = str(dtdfile) assert "Dimpled Ring" in dtdsource po_snippet = r'''#: searchIntegration.label #: searchIntegration.accesskey msgid "Allow &searchIntegration.engineName; to &search messages" msgstr "&searchIntegration.engineName; &ileti aramasına izin ver" ''' dtd_snippet = r'''<!ENTITY searchIntegration.accesskey "s"> <!ENTITY searchIntegration.label "Allow &searchIntegration.engineName; to search messages">''' dtdfile = self.merge2dtd(dtd_snippet, po_snippet) dtdsource = str(dtdfile) print(dtdsource) assert '"&searchIntegration.engineName; ileti aramasına izin ver"' in dtdsource def test_accesskey_missing(self): """tests that missing ampersands use the source accesskey""" po_snippet = r'''#: key.label #: key.accesskey msgid "&Search" msgstr "Ileti" ''' dtd_snippet = r'''<!ENTITY key.accesskey "S"> <!ENTITY key.label "Ileti">''' dtdfile = self.merge2dtd(dtd_snippet, po_snippet) dtdsource = str(dtdfile) print(dtdsource) assert '"Ileti"' in dtdsource assert '""' not in dtdsource assert '"S"' in dtdsource def test_accesskey_and_amp_case_no_accesskey(self): """tests that accesskey and &amp; can work together If missing we use the source accesskey""" po_snippet = r'''#: key.label #: key.accesskey msgid "Colour & &Light" msgstr "Lig en Kleur" ''' dtd_snippet = r'''<!ENTITY key.accesskey "L"> <!ENTITY key.label "Colour &amp; Light">''' dtdfile = self.merge2dtd(dtd_snippet, po_snippet) dtdsource = str(dtdfile) print(dtdsource) assert '"Lig en Kleur"' in dtdsource assert '"L"' in dtdsource def test_accesskey_and_amp_case_no_amp(self): """tests that accesskey and &amp; can work together If present we use the target accesskey""" po_snippet = r'''#: key.label #: key.accesskey msgid "Colour & &Light" msgstr "Lig en &Kleur" ''' dtd_snippet = r'''<!ENTITY key.accesskey "L"> <!ENTITY key.label "Colour &amp; Light">''' dtdfile = self.merge2dtd(dtd_snippet, po_snippet) dtdsource = str(dtdfile) print(dtdsource) assert '"Lig en Kleur"' in dtdsource assert '"K"' in dtdsource def test_accesskey_and_amp_case_both_amp_and_accesskey(self): """tests that accesskey and &amp; can work together If present both & (and) and a marker then we use the correct source accesskey""" po_snippet = r'''#: key.label #: key.accesskey msgid "Colour & &Light" msgstr "Lig & &Kleur" ''' dtd_snippet = r'''<!ENTITY key.accesskey "L"> <!ENTITY key.label "Colour &amp; Light">''' dtdfile = self.merge2dtd(dtd_snippet, po_snippet) dtdsource = str(dtdfile) print(dtdsource) assert '"Lig &amp; Kleur"' in dtdsource assert '"K"' in dtdsource def test_entities_two(self): """test the error ouput when we find two entities""" simplestring = '''#: simple.string second.string\nmsgid "Simple String"\nmsgstr "Dimpled Ring"\n''' dtdfile = self.po2dtd(simplestring) dtdsource = str(dtdfile) assert "CONVERSION NOTE - multiple entities" in dtdsource def test_entities(self): """tests that entities are correctly idnetified in the dtd""" simplestring = '''#: simple.string\nmsgid "Simple String"\nmsgstr "Dimpled Ring"\n''' dtdfile = self.po2dtd(simplestring) dtdsource = str(dtdfile) assert dtdsource.startswith("<!ENTITY simple.string") def test_comments_translator(self): """tests for translator comments""" simplestring = '''# Comment1\n# Comment2\n#: simple.string\nmsgid "Simple String"\nmsgstr "Dimpled Ring"\n''' dtdfile = self.po2dtd(simplestring) dtdsource = str(dtdfile) assert dtdsource.startswith("<!-- Comment1 -->") def test_retains_hashprefix(self): """tests that hash prefixes in the dtd are retained""" hashpo = '''#: lang.version\nmsgid "__MOZILLA_LOCALE_VERSION__"\nmsgstr "__MOZILLA_LOCALE_VERSION__"\n''' hashdtd = '#expand <!ENTITY lang.version "__MOZILLA_LOCALE_VERSION__">\n' dtdfile = self.merge2dtd(hashdtd, hashpo) regendtd = str(dtdfile) assert regendtd == hashdtd def test_convertdtd(self): """checks that the convertdtd function is working""" posource = '''#: simple.label\n#: simple.accesskey\nmsgid "Simple &String"\nmsgstr "Dimpled &Ring"\n''' dtdtemplate = '''<!ENTITY simple.label "Simple String">\n<!ENTITY simple.accesskey "S">\n''' dtdexpected = '''<!ENTITY simple.label "Dimpled Ring">\n<!ENTITY simple.accesskey "R">\n''' newdtd = self.convertdtd(posource, dtdtemplate) print(newdtd) assert newdtd == dtdexpected def test_untranslated_with_template(self): """test removing of untranslated entries in redtd""" posource = '''#: simple.label msgid "Simple string" msgstr "Dimpled ring" #: simple.label2 msgid "Simple string 2" msgstr "" #: simple.label3 msgid "Simple string 3" msgstr "Simple string 3" #: simple.label4 #, fuzzy msgid "Simple string 4" msgstr "simple string four" ''' dtdtemplate = '''<!ENTITY simple.label "Simple string"> <!ENTITY simple.label2 "Simple string 2"> <!ENTITY simple.label3 "Simple string 3"> <!ENTITY simple.label4 "Simple string 4"> ''' dtdexpected = '''<!ENTITY simple.label "Dimpled ring"> <!ENTITY simple.label3 "Simple string 3"> ''' newdtd = self.convertdtd(posource, dtdtemplate, remove_untranslated=True) print(newdtd) assert newdtd == dtdexpected def test_untranslated_without_template(self): """test removing of untranslated entries in po2dtd""" posource = '''#: simple.label msgid "Simple string" msgstr "Dimpled ring" #: simple.label2 msgid "Simple string 2" msgstr "" #: simple.label3 msgid "Simple string 3" msgstr "Simple string 3" #: simple.label4 #, fuzzy msgid "Simple string 4" msgstr "simple string four" ''' dtdexpected = '''<!ENTITY simple.label "Dimpled ring"> <!ENTITY simple.label3 "Simple string 3"> ''' newdtd = self.po2dtd(posource, remove_untranslated=True) print(newdtd) assert str(newdtd) == dtdexpected def test_blank_source(self): """test removing of untranslated entries where source is blank""" posource = '''#: simple.label msgid "Simple string" msgstr "Dimpled ring" #: simple.label2 msgid "" msgstr "" #: simple.label3 msgid "Simple string 3" msgstr "Simple string 3" ''' dtdtemplate = '''<!ENTITY simple.label "Simple string"> <!ENTITY simple.label2 ""> <!ENTITY simple.label3 "Simple string 3"> ''' dtdexpected_with_template = '''<!ENTITY simple.label "Dimpled ring"> <!ENTITY simple.label2 ""> <!ENTITY simple.label3 "Simple string 3"> ''' dtdexpected_no_template = '''<!ENTITY simple.label "Dimpled ring"> <!ENTITY simple.label3 "Simple string 3"> ''' newdtd_with_template = self.convertdtd(posource, dtdtemplate, remove_untranslated=True) print(newdtd_with_template) assert newdtd_with_template == dtdexpected_with_template newdtd_no_template = self.po2dtd(posource, remove_untranslated=True) print(newdtd_no_template) assert str(newdtd_no_template) == dtdexpected_no_template def test_newlines_escapes(self): """check that we can handle a \n in the PO file""" posource = '''#: simple.label\n#: simple.accesskey\nmsgid "A hard coded newline.\\n"\nmsgstr "Hart gekoeerde nuwe lyne\\n"\n''' dtdtemplate = '<!ENTITY simple.label "A hard coded newline.\n">\n' dtdexpected = '''<!ENTITY simple.label "Hart gekoeerde nuwe lyne\n">\n''' dtdfile = self.merge2dtd(dtdtemplate, posource) print(dtdfile) assert str(dtdfile) == dtdexpected def test_roundtrip_simple(self): """checks that simple strings make it through a dtd->po->dtd roundtrip""" self.check_roundtrip('"Hello"') self.check_roundtrip('"Hello Everybody"') def test_roundtrip_escape(self): """checks that escapes in strings make it through a dtd->po->dtd roundtrip""" self.check_roundtrip(r'"Simple Escape \ \n \\ \: \t \r "') self.check_roundtrip(r'"End Line Escape \"') def test_roundtrip_quotes(self): """Checks that quotes make it through a DTD->PO->DTD roundtrip. Quotes may be escaped or not. """ # NOTE: during the roundtrip, if " quote mark is present, then it is # converted to &quot; and the resulting string is always enclosed # between " characters independently of which quotation marks the # original string is enclosed between. Thus the string cannot be # compared with itself and therefore other string should be provided to # compare with the result. # # Thus the string cannot be compared with itself and therefore another # string should be provided to compare with the roundtrip result. self.check_roundtrip(r"""'Quote Escape "" '""", r'''"Quote Escape &quot;&quot; "''') self.check_roundtrip(r'''"Double-Quote Escape &quot;&quot; "''') self.check_roundtrip(r'''"Single-Quote ' "''') self.check_roundtrip(r'''"Single-Quote Escape \' "''') # NOTE: during the roundtrip, if " quote mark is present, then ' is # converted to &apos; and " is converted to &quot; Also the resulting # string is always enclosed between " characters independently of which # quotation marks the original string is enclosed between. Thus the # string cannot be compared with itself and therefore another string # should be provided to compare with the result. # # Thus the string cannot be compared with itself and therefore another # string should be provided to compare with the roundtrip result. self.check_roundtrip(r"""'Both Quotes "" &apos;&apos; '""", r'''"Both Quotes &quot;&quot; &apos;&apos; "''') self.check_roundtrip(r'''"Both Quotes &quot;&quot; &apos;&apos; "''') # NOTE: during the roundtrip, if &quot; is present, then ' is converted # to &apos; Also the resulting string is always enclosed between " # characters independently of which quotation marks the original string # is enclosed between. # # Thus the string cannot be compared with itself and therefore another # string should be provided to compare with the roundtrip result. self.check_roundtrip(r'''"Both Quotes &quot;&quot; '' "''', r'''"Both Quotes &quot;&quot; &apos;&apos; "''') def test_roundtrip_amp(self): """Checks that quotes make it through a DTD->PO->DTD roundtrip. Quotes may be escaped or not. """ self.check_roundtrip('"Colour &amp; Light"') def test_merging_entries_with_spaces_removed(self): """dtd2po removes pretty printed spaces, this tests that we can merge this back into the pretty printed dtd""" posource = '''#: simple.label\nmsgid "First line then "\n"next lines."\nmsgstr "Eerste lyne en dan volgende lyne."\n''' dtdtemplate = '<!ENTITY simple.label "First line then\n' + \ ' next lines.">\n' dtdexpected = '<!ENTITY simple.label "Eerste lyne en dan volgende lyne.">\n' dtdfile = self.merge2dtd(dtdtemplate, posource) print(dtdfile) assert str(dtdfile) == dtdexpected def test_preserving_spaces(self): """ensure that we preseve spaces between entity and value. Bug 1662""" posource = '''#: simple.label\nmsgid "One"\nmsgstr "Een"\n''' dtdtemplate = '<!ENTITY simple.label "One">\n' dtdexpected = '<!ENTITY simple.label "Een">\n' dtdfile = self.merge2dtd(dtdtemplate, posource) print(dtdfile) assert str(dtdfile) == dtdexpected def test_preserving_spaces(self): """Preseve spaces after value. Bug 1662""" # Space between value and > posource = '''#: simple.label\nmsgid "One"\nmsgstr "Een"\n''' dtdtemplate = '<!ENTITY simple.label "One" >\n' dtdexpected = '<!ENTITY simple.label "Een" >\n' dtdfile = self.merge2dtd(dtdtemplate, posource) print(dtdfile) assert str(dtdfile) == dtdexpected # Space after > dtdtemplate = '<!ENTITY simple.label "One"> \n' dtdexpected = '<!ENTITY simple.label "Een"> \n' dtdfile = self.merge2dtd(dtdtemplate, posource) print(dtdfile) assert str(dtdfile) == dtdexpected def test_comments(self): """test that we preserve comments, bug 351""" posource = '''#: name\nmsgid "Text"\nmsgstr "Teks"''' dtdtemplate = '''<!ENTITY name "%s">\n<!-- \n\nexample -->\n''' dtdfile = self.merge2dtd(dtdtemplate % "Text", posource) print(dtdfile) assert str(dtdfile) == dtdtemplate % "Teks" def test_duplicates(self): """test that we convert duplicates back correctly to their respective entries.""" posource = r'''#: bookmarksMenu.label bookmarksMenu.accesskey msgctxt "bookmarksMenu.label bookmarksMenu.accesskey" msgid "&Bookmarks" msgstr "Dipu&kutshwayo1" #: bookmarksItem.title msgctxt "bookmarksItem.title msgid "Bookmarks" msgstr "Dipukutshwayo2" #: bookmarksButton.label msgctxt "bookmarksButton.label" msgid "Bookmarks" msgstr "Dipukutshwayo3" ''' dtdtemplate = r'''<!ENTITY bookmarksMenu.label "Bookmarks"> <!ENTITY bookmarksMenu.accesskey "B"> <!ENTITY bookmarksItem.title "Bookmarks"> <!ENTITY bookmarksButton.label "Bookmarks"> ''' dtdexpected = r'''<!ENTITY bookmarksMenu.label "Dipukutshwayo1"> <!ENTITY bookmarksMenu.accesskey "k"> <!ENTITY bookmarksItem.title "Dipukutshwayo2"> <!ENTITY bookmarksButton.label "Dipukutshwayo3"> ''' dtdfile = self.merge2dtd(dtdtemplate, posource) print(dtdfile) assert str(dtdfile) == dtdexpected class TestPO2DTDCommand(test_convert.TestConvertCommand, TestPO2DTD): """Tests running actual po2dtd commands on files""" convertmodule = po2dtd defaultoptions = {"progress": "none"} # TODO: because of having 2 base classes, we need to call all their setup and teardown methods # (otherwise we won't reset the warnings etc) def setup_method(self, method): """call both base classes setup_methods""" test_convert.TestConvertCommand.setup_method(self, method) TestPO2DTD.setup_method(self, method) def teardown_method(self, method): """call both base classes teardown_methods""" test_convert.TestConvertCommand.teardown_method(self, method) TestPO2DTD.teardown_method(self, method) def test_help(self): """tests getting help""" options = test_convert.TestConvertCommand.test_help(self) options = self.help_check(options, "-t TEMPLATE, --template=TEMPLATE") options = self.help_check(options, "--fuzzy") options = self.help_check(options, "--threshold=PERCENT") options = self.help_check(options, "--removeuntranslated") options = self.help_check(options, "--nofuzzy", last=True)
whip112/Whip112
vendor/packages/translate/convert/test_po2dtd.py
Python
mpl-2.0
22,776
import os import sys import traceback from .. import constants, logger, exceptions, dialogs from . import scene, geometry, api, base_classes def _error_handler(func): def inner(filepath, options, *args, **kwargs): level = options.get(constants.LOGGING, constants.DEBUG) version = options.get('addon_version') logger.init('io_three.export.log', level=level) if version is not None: logger.debug("Addon Version %s", version) api.init() try: func(filepath, options, *args, **kwargs) except: info = sys.exc_info() trace = traceback.format_exception( info[0], info[1], info[2].tb_next) trace = ''.join(trace) logger.error(trace) print('Error recorded to %s' % logger.LOG_FILE) raise else: print('Log: %s' % logger.LOG_FILE) return inner @_error_handler def export_scene(filepath, options): selected = [] # during scene exports unselect everything. this is needed for # applying modifiers, if it is necessary # record the selected nodes so that selection is restored later for obj in api.selected_objects(): api.object.unselect(obj) selected.append(obj) active = api.active_object() try: scene_ = scene.Scene(filepath, options=options) scene_.parse() scene_.write() except: _restore_selection(selected, active) raise _restore_selection(selected, active) @_error_handler def export_geometry(filepath, options, node=None): msg = "" exception = None if node is None: node = api.active_object() if node is None: msg = "Nothing selected" logger.error(msg) exception = exceptions.SelectionError if node.type != 'MESH': msg = "%s is not a valid mesh object" % node.name logger.error(msg) exception = exceptions.GeometryError if exception is not None: if api.batch_mode(): raise exception(msg) else: dialogs.error(msg) return mesh = api.object.mesh(node, options) parent = base_classes.BaseScene(filepath, options) geo = geometry.Geometry(mesh, parent) geo.parse() geo.write() if not options.get(constants.EMBED_ANIMATION, True): geo.write_animation(os.path.dirname(filepath)) def _restore_selection(objects, active): for obj in objects: api.object.select(obj) api.set_active_object(active)
meizhoubao/three.js
utils/exporters/blender/addons/io_three/exporter/__init__.py
Python
mit
2,661
import webapp2, urllib, jinja2, os from google.appengine.api import taskqueue from google.appengine.api import urlfetch from apikeys import * # contains api key for YOIN15MIN,YOIN30MIN,YOINANHOUR SINGLE_YO_API = "http://api.justyo.co/yo/" jinja_environment = jinja2.Environment(autoescape=True, loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates'))) class ScheduleHandler(webapp2.RequestHandler): def get(self): username = self.request.get("username") link = self.request.get("link") path = self.request.path if path in APIDATA and username: apitoken,delay = APIDATA[path] taskqueue.add( url="/yo", params={"username":username.upper(), "api_token":apitoken, "link":link}, method="POST", countdown=delay) class YoHandler(webapp2.RequestHandler): def post(self): params = {field:self.request.get(field) for field in self.request.arguments()} if "username" in params and "api_token" in params: form_data = urllib.urlencode(params) urlfetch.fetch(url=SINGLE_YO_API, payload=form_data, method=urlfetch.POST, headers={'Content-Type': 'application/x-www-form-urlencoded'}) class HomePageHandler(webapp2.RequestHandler): def get(self): template = jinja_environment.get_template("index.html") self.response.write(template.render({})) app = webapp2.WSGIApplication([ (callback,ScheduleHandler ) for callback in APIDATA ] + [ ("/", HomePageHandler), ("/yo", YoHandler) ], debug=True)
yasszoug/yoreminder
main.py
Python
mit
1,860
from django import forms from .models import Dataset class DatasetForm(forms.ModelForm): class Meta: model = Dataset
dpelc/qPRC
qPRC/apps/datasets/forms.py
Python
agpl-3.0
132
import unittest import karmatrain.db_management as db_management # CONSTANTS FOR TESTING SD1 = {"title": "title1", "selftext": "selftext1", "subreddit": "brasil", "time": 1234, "thread_id": "1DFGG"} SD2 = {"title": "title2", "selftext": "selftext1", "subreddit": "brasil2", "time": 1234, "thread_id": "2DFGG"} SD3 = {"title": "title3", "selftext": "selftext1", "subreddit": "brasil3", "time": 1234, "thread_id": "3DFGG"} SD_EXTRA = {"title": "title3", "selftext": "selftext1", "subreddit": "brasil3", "time": 1234, "thread_id": "4DFGG"} ST1 = {"thread_id": "1DFGG", "computer_time": 1, "ratio": 0.1, "ups": 99, "num_comments": 9001} ST2 = {"thread_id": "1DFGG", "computer_time": 2, "ratio": 0.1, "ups": 98, "num_comments": 9002} ST3 = {"thread_id": "1DFGG", "computer_time": 3, "ratio": 0.1, "ups": 0, "num_comments": 9003} UPDATE_SUBMISSION_EXPECTED_RESULT = ((1.0, 0.1, 99, 9001, 1), (2.0, 0.1, 98, 9002, 2), (3.0, 0.1, 0, 9003, 3)) ST_LIST = [ST1, ST2, ST3] THREAD_ID_LIST = [] class TestMySQLConnection(unittest.TestCase): def setUp(self): try: self.conn = db_management.MySQLConnection() except: # TODO Find specific error later self.skipTest("Can't connect to database") pass def test_methods(self): """I made a monolithic unit test because otherwise I would have to write hundreds of lines to set up different environments""" # test insert_new_submission method self.assertEqual( self.conn.insert_new_submission(SD1["title"], SD1["thread_id"], SD1["subreddit"], SD1["selftext"], SD1["time"]), True) THREAD_ID_LIST.append(SD1["thread_id"]) # test update_submission method for st in ST_LIST: self.conn.update_submission(st["thread_id"], st["computer_time"], st["ratio"], st["ups"],st["num_comments"]) self.assertEqual(self.conn.get_submission(SD1["thread_id"]), UPDATE_SUBMISSION_EXPECTED_RESULT) # test get_list_of_submissions method self.assertEqual(self.conn.get_list_of_submissions(), ['1DFGG']) # test delete_submission self.conn.delete_submission(SD1["thread_id"]) self.assertEqual(self.conn.get_list_of_submissions(), []) THREAD_ID_LIST.remove('1DFGG') pass @classmethod def tearDownClass(cls): print("cleaning up database...", end='') conn = db_management.MySQLConnection() cur = conn.connection.cursor() try: cur.execute("DROP TABLE submissions") for thread_id in THREAD_ID_LIST: cur.execute(u"DROP TABLE {0:s}".format(thread_id)) print(" DONE!") except: print(" FAILED to clean up database after test") pass def tearDown(self): self.conn.close() pass if __name__ == '__main__': unittest.main() pass
andredriem/KarmaTrain
tests/db_management_test.py
Python
gpl-3.0
2,951
from django.test import TestCase from django_mailbox.models import Message from batch_apps.models import App, Day, Execution from batch_apps.generator import get_current_date_in_gmt8 from batch_apps.integration import ( execute_end_to_end_tasks, get_unexecuted_due_executions, get_unprocessed_unmatched_emails, ) from batch_apps.matcher import match_subject import datetime import pytz class EmailExecutionAppPatternMatcherTest(TestCase): fixtures = ['test_apps.json', 'test_messages.json'] def test_email_matches_single_active_daily_app_with_single_active_pattern_using_low_level_steps(self): app = App.objects.get(name="SGDailyAppTask SendExpiringNotice") self.assertTrue(app.is_active) self.assertEqual(app.frequency, 'daily') pattern_list = app.pattern_set.filter(is_active=True) self.assertEqual(len(pattern_list), 1) pattern = pattern_list[0] self.assertEqual(pattern.name_pattern, "SendExpiringNotice Success") self.assertTrue(pattern.is_active) day = Execution.objects._get_or_create_day_object(datetime.date(2014, 10, 20)) execution = Execution.objects._get_or_create_execution_object(day, app) self.assertTrue(execution.is_due_today) self.assertFalse(execution.is_executed) email = Message.objects.get(message_id="<CAFKhJv21JtjnT74zzsrRuOwyEU1=1bnz2mzKV8e0_DAw0U46KA@mail.gmail.com>") self.assertEqual(email.subject, "Batch App - SGDailyAppTask SendExpiringNotice Success") self.assertEqual(str(email.sent_time), "2014-10-20 02:31:25+00:00") self.assertFalse(email.processed_batch_apps) self.assertFalse(email.matched_batch_apps) matched = match_subject(str(pattern), email.subject) self.assertTrue(matched) email.matched_batch_apps = True email.processed_batch_apps = True email.save() execution.is_executed = True execution.email = email execution.save() email_recheck = Message.objects.get(message_id="<CAFKhJv21JtjnT74zzsrRuOwyEU1=1bnz2mzKV8e0_DAw0U46KA@mail.gmail.com>") self.assertEqual(email_recheck, email) self.assertTrue(email_recheck.processed_batch_apps) self.assertTrue(email_recheck.matched_batch_apps) execution_recheck = Execution.objects._get_or_create_execution_object(day, app) self.assertTrue(execution_recheck.is_executed) self.assertEqual(execution_recheck.email, email) def test_execute_end_to_end_module_using_fixture_should_pass(self): app = App.objects.get(name="SGDailyAppTask SendExpiringNotice") self.assertTrue(app.is_active) self.assertEqual(app.frequency, 'daily') pattern_list = app.pattern_set.filter(is_active=True) self.assertEqual(len(pattern_list), 1) pattern = pattern_list[0] self.assertEqual(pattern.name_pattern, "SendExpiringNotice Success") self.assertTrue(pattern.is_active) email = Message.objects.get(message_id="<CAFKhJv21JtjnT74zzsrRuOwyEU1=1bnz2mzKV8e0_DAw0U46KA@mail.gmail.com>") self.assertEqual(email.subject, "Batch App - SGDailyAppTask SendExpiringNotice Success") self.assertEqual(str(email.sent_time), "2014-10-20 02:31:25+00:00") self.assertFalse(email.processed_batch_apps) self.assertFalse(email.matched_batch_apps) execute_end_to_end_tasks(datetime.date(2014, 10, 20)) email_recheck = Message.objects.get(message_id="<CAFKhJv21JtjnT74zzsrRuOwyEU1=1bnz2mzKV8e0_DAw0U46KA@mail.gmail.com>") self.assertEqual(email_recheck, email) self.assertTrue(email_recheck.processed_batch_apps) self.assertTrue(email_recheck.matched_batch_apps) execution_recheck = Execution.objects.get(app=app, day__date=datetime.date(2014, 10, 20)) self.assertTrue(execution_recheck.is_executed) self.assertEqual(execution_recheck.email, email) def test_execute_end_to_end_module_should_default_to_today_if_date_is_not_given(self): execute_end_to_end_tasks() day = Day.objects.get(pk=1) self.assertEqual(len(Day.objects.all()), 1) self.assertEqual(get_current_date_in_gmt8(), day.date) class EmailFilteringTest(TestCase): fixtures = ['test_messages.json'] def setUp(self): self.email1 = Message.objects.get(message_id="<CAFKhJv2VAg2jx7o+Y+Kz_Ze72m7PAPq0Q8QjhC7_J+OVVnUvvg@mail.gmail.com>") self.email2 = Message.objects.get(message_id="<CAFKhJv1ugtTL=ji5_JxZ9KwVxfqi_haYpGb+wJrekW7RUx0pRw@mail.gmail.com>") self.email3 = Message.objects.get(message_id="<CAFKhJv21JtjnT74zzsrRuOwyEU1=1bnz2mzKV8e0_DAw0U46KA@mail.gmail.com>") self.email4 = Message.objects.get(message_id="<CAFKhJv18p+O28UB2nQT1cTKL437GFM7SJpK=30x5j7+dNRtD7A@mail.gmail.com>") self.email5 = Message.objects.get(message_id="<CAFKhJv0yhRMvdqF9JGabbHDH2Esw86Q9OZ40B52-y=MPLCyYBg@mail.gmail.com>") self.email6 = Message.objects.get(message_id="<CAG6WN+9O6P7arbVA=M1Mz=_9cSJ-nOL47eB2DaVYN_iJvc-9Lg@mail.gmail.com>") def test_get_unprocessed_unmatched_emails_should_return_unprocessed_emails(self): self.email2.processed_batch_apps = True self.email2.save() self.assertFalse(self.email1.processed_batch_apps) self.assertTrue(self.email2.processed_batch_apps) results = get_unprocessed_unmatched_emails(datetime.date(2014, 10, 20)) self.assertIn(self.email1, results) self.assertNotIn(self.email2, results) def test_get_unprocessed_unmatched_emails_should_return_unmatched_emails(self): self.email4.matched_batch_apps = True self.email4.save() self.assertFalse(self.email3.matched_batch_apps) self.assertTrue(self.email4.matched_batch_apps) results = get_unprocessed_unmatched_emails(datetime.date(2014, 10, 20)) self.assertIn(self.email3, results) self.assertNotIn(self.email4, results) def test_get_unprocessed_unmatched_emails_should_return_emails_with_correct_date(self): self.email6.sent_time = datetime.datetime.now(pytz.timezone('Asia/Kuala_Lumpur')) self.email6.save() date_ = datetime.date(2014, 10, 20) results = get_unprocessed_unmatched_emails(date_) self.assertIn(self.email5, results) self.assertNotIn(self.email6, results) def test_get_unprocessed_unmatched_email_should_select_email_according_to_gmt8_timezone(self): self.email1.sent_time = datetime.datetime(2014, 11, 27, hour=15, minute=59, second=59, tzinfo=pytz.utc) self.email2.sent_time = datetime.datetime(2014, 11, 27, hour=16, minute=00, second=00, tzinfo=pytz.utc) self.email3.sent_time = datetime.datetime(2014, 11, 28, hour=15, minute=59, second=59, tzinfo=pytz.utc) self.email4.sent_time = datetime.datetime(2014, 11, 28, hour=16, minute=00, second=00, tzinfo=pytz.utc) self.email1.save() self.email2.save() self.email3.save() self.email4.save() date_ = datetime.date(2014, 11, 28) results = get_unprocessed_unmatched_emails(date_) self.assertNotIn(self.email1, results) self.assertIn(self.email2, results) self.assertIn(self.email3, results) self.assertNotIn(self.email4, results) class ExecutionFilteringTest(TestCase): def test_get_due_executions_should_return_executions_with_correct_date(self): app1 = App.objects.create(name="My App 001") day1 = Day.objects.create(date=datetime.date(2014, 10, 20)) day2 = Day.objects.create(date=datetime.date(2014, 10, 21)) execution1 = Execution.objects.create(day=day1, app=app1, is_executed=False, is_due_today=True) execution2 = Execution.objects.create(day=day2, app=app1, is_executed=False, is_due_today=True) date_ = datetime.date(2014, 10, 20) results = get_unexecuted_due_executions(date_) self.assertIn(execution1, results) self.assertNotIn(execution2, results) def test_get_due_executions_should_return_executions_due_on_the_date(self): app1 = App.objects.create(name="My App 001") app2 = App.objects.create(name="My App 002") day = Day.objects.create(date=datetime.date(2014, 10, 20)) execution1 = Execution.objects.create(day=day, app=app1, is_executed=False, is_due_today=True) execution2 = Execution.objects.create(day=day, app=app2, is_executed=False, is_due_today=False) date_ = datetime.date(2014, 10, 20) results = get_unexecuted_due_executions(date_) self.assertIn(execution1, results) self.assertNotIn(execution2, results) def test_get_due_executions_should_return_unexecuted_executions(self): app1 = App.objects.create(name="My App 001") app2 = App.objects.create(name="My App 002") day = Day.objects.create(date=datetime.date(2014, 10, 20)) execution1 = Execution.objects.create(day=day, app=app1, is_executed=False, is_due_today=True) execution2 = Execution.objects.create(day=day, app=app2, is_executed=True, is_due_today=True) date_ = datetime.date(2014, 10, 20) results = get_unexecuted_due_executions(date_) self.assertIn(execution1, results) self.assertNotIn(execution2, results)
azam-a/batcher
batch_apps/tests/test_integrations.py
Python
mit
9,337
## Auto-contained SimpleJSON encoder ## Brewed by: Alvaro Lopez Ortega, 2010 # Copyright (c) 2006 Bob Ippolito # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is furnished to do # so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import re import sys import struct from decimal import Decimal def _floatconstants(): _BYTES = '7FF80000000000007FF0000000000000'.decode('hex') # The struct module in Python 2.4 would get frexp() out of range here # when an endian is specified in the format string. Fixed in Python 2.5+ if sys.byteorder != 'big': _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1] nan, inf = struct.unpack('dd', _BYTES) return nan, inf, -inf NaN, PosInf, NegInf = _floatconstants() ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') ESCAPE_DCT = { '\\': '\\\\', '"': '\\"', '\b': '\\b', '\f': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', } for i in range(0x20): #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) FLOAT_REPR = repr def encode_basestring(s): """Return a JSON representation of a Python string """ if isinstance(s, str) and HAS_UTF8.search(s) is not None: s = s.decode('utf-8') def replace(match): return ESCAPE_DCT[match.group(0)] return u'"' + ESCAPE.sub(replace, s) + u'"' def py_encode_basestring_ascii(s): """Return an ASCII-only JSON representation of a Python string """ if isinstance(s, str) and HAS_UTF8.search(s) is not None: s = s.decode('utf-8') def replace(match): s = match.group(0) try: return ESCAPE_DCT[s] except KeyError: n = ord(s) if n < 0x10000: #return '\\u{0:04x}'.format(n) return '\\u%04x' % (n,) else: # surrogate pair n -= 0x10000 s1 = 0xd800 | ((n >> 10) & 0x3ff) s2 = 0xdc00 | (n & 0x3ff) #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) return '\\u%04x\\u%04x' % (s1, s2) return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' encode_basestring_ascii = py_encode_basestring_ascii class JSONEncoder(object): """Extensible JSON <http://json.org> encoder for Python data structures. Supports the following objects and types by default: +-------------------+---------------+ | Python | JSON | +===================+===============+ | dict | object | +-------------------+---------------+ | list, tuple | array | +-------------------+---------------+ | str, unicode | string | +-------------------+---------------+ | int, long, float | number | +-------------------+---------------+ | True | true | +-------------------+---------------+ | False | false | +-------------------+---------------+ | None | null | +-------------------+---------------+ To extend this to recognize other objects, subclass and implement a ``.default()`` method with another method that returns a serializable object for ``o`` if possible, otherwise it should call the superclass implementation (to raise ``TypeError``). """ item_separator = ', ' key_separator = ': ' def __init__(self, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, sort_keys=False, indent=None, separators=None, encoding='utf-8', default=None, use_decimal=False): """Constructor for JSONEncoder, with sensible defaults. If skipkeys is false, then it is a TypeError to attempt encoding of keys that are not str, int, long, float or None. If skipkeys is True, such items are simply skipped. If ensure_ascii is true, the output is guaranteed to be str objects with all incoming unicode characters escaped. If ensure_ascii is false, the output will be unicode object. If check_circular is true, then lists, dicts, and custom encoded objects will be checked for circular references during encoding to prevent an infinite recursion (which would cause an OverflowError). Otherwise, no such check takes place. If allow_nan is true, then NaN, Infinity, and -Infinity will be encoded as such. This behavior is not JSON specification compliant, but is consistent with most JavaScript based encoders and decoders. Otherwise, it will be a ValueError to encode such floats. If sort_keys is true, then the output of dictionaries will be sorted by key; this is useful for regression tests to ensure that JSON serializations can be compared on a day-to-day basis. If indent is a string, then JSON array elements and object members will be pretty-printed with a newline followed by that string repeated for each level of nesting. ``None`` (the default) selects the most compact representation without any newlines. For backwards compatibility with versions of simplejson earlier than 2.1.0, an integer is also accepted and is converted to a string with that many spaces. If specified, separators should be a (item_separator, key_separator) tuple. The default is (', ', ': '). To get the most compact JSON representation you should specify (',', ':') to eliminate whitespace. If specified, default is a function that gets called for objects that can't otherwise be serialized. It should return a JSON encodable version of the object or raise a ``TypeError``. If encoding is not None, then all input strings will be transformed into unicode using that encoding prior to JSON-encoding. The default is UTF-8. If use_decimal is true (not the default), ``decimal.Decimal`` will be supported directly by the encoder. For the inverse, decode JSON with ``parse_float=decimal.Decimal``. """ self.skipkeys = skipkeys self.ensure_ascii = ensure_ascii self.check_circular = check_circular self.allow_nan = allow_nan self.sort_keys = sort_keys self.use_decimal = use_decimal if isinstance(indent, (int, long)): indent = ' ' * indent self.indent = indent if separators is not None: self.item_separator, self.key_separator = separators if default is not None: self.default = default self.encoding = encoding def default(self, o): """Implement this method in a subclass such that it returns a serializable object for ``o``, or calls the base implementation (to raise a ``TypeError``). For example, to support arbitrary iterators, you could implement default like this:: def default(self, o): try: iterable = iter(o) except TypeError: pass else: return list(iterable) return JSONEncoder.default(self, o) """ raise TypeError(repr(o) + " is not JSON serializable") def encode(self, o): """Return a JSON string representation of a Python data structure. >>> from simplejson import JSONEncoder >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) '{"foo": ["bar", "baz"]}' """ # This is for extremely simple cases and benchmarks. if isinstance(o, basestring): if isinstance(o, str): _encoding = self.encoding if (_encoding is not None and not (_encoding == 'utf-8')): o = o.decode(_encoding) if self.ensure_ascii: return encode_basestring_ascii(o) else: return encode_basestring(o) # This doesn't pass the iterator directly to ''.join() because the # exceptions aren't as detailed. The list call should be roughly # equivalent to the PySequence_Fast that ''.join() would do. chunks = self.iterencode(o, _one_shot=True) if not isinstance(chunks, (list, tuple)): chunks = list(chunks) if self.ensure_ascii: return ''.join(chunks) else: return u''.join(chunks) def iterencode(self, o, _one_shot=False): """Encode the given object and yield each string representation as available. For example:: for chunk in JSONEncoder().iterencode(bigobject): mysocket.write(chunk) """ if self.check_circular: markers = {} else: markers = None if self.ensure_ascii: _encoder = encode_basestring_ascii else: _encoder = encode_basestring if self.encoding != 'utf-8': def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): if isinstance(o, str): o = o.decode(_encoding) return _orig_encoder(o) def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf): # Check for specials. Note that this type of test is processor # and/or platform-specific, so do tests which don't depend on # the internals. if o != o: text = 'NaN' elif o == _inf: text = 'Infinity' elif o == _neginf: text = '-Infinity' else: return _repr(o) if not allow_nan: raise ValueError( "Out of range float values are not JSON compliant: " + repr(o)) return text key_memo = {} _iterencode = _make_iterencode( markers, self.default, _encoder, self.indent, floatstr, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, _one_shot, self.use_decimal) try: return _iterencode(o, 0) finally: key_memo.clear() class JSONEncoderForHTML(JSONEncoder): """An encoder that produces JSON safe to embed in HTML. To embed JSON content in, say, a script tag on a web page, the characters &, < and > should be escaped. They cannot be escaped with the usual entities (e.g. &amp;) because they are not expanded within <script> tags. """ def encode(self, o): # Override JSONEncoder.encode because it has hacks for # performance that make things more complicated. chunks = self.iterencode(o, True) if self.ensure_ascii: return ''.join(chunks) else: return u''.join(chunks) def iterencode(self, o, _one_shot=False): chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot) for chunk in chunks: chunk = chunk.replace('&', '\\u0026') chunk = chunk.replace('<', '\\u003c') chunk = chunk.replace('>', '\\u003e') yield chunk def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, _use_decimal, ## HACK: hand-optimized bytecode; turn globals into locals False=False, True=True, ValueError=ValueError, basestring=basestring, Decimal=Decimal, dict=dict, float=float, id=id, int=int, isinstance=isinstance, list=list, long=long, str=str, tuple=tuple, ): def _iterencode_list(lst, _current_indent_level): if not lst: yield '[]' return if markers is not None: markerid = id(lst) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = lst buf = '[' if _indent is not None: _current_indent_level += 1 newline_indent = '\n' + (_indent * _current_indent_level) separator = _item_separator + newline_indent buf += newline_indent else: newline_indent = None separator = _item_separator first = True for value in lst: if first: first = False else: buf = separator if isinstance(value, basestring): yield buf + _encoder(value) elif value is None: yield buf + 'null' elif value is True: yield buf + 'true' elif value is False: yield buf + 'false' elif isinstance(value, (int, long)): yield buf + str(value) elif isinstance(value, float): yield buf + _floatstr(value) elif _use_decimal and isinstance(value, Decimal): yield buf + str(value) else: yield buf if isinstance(value, (list, tuple)): chunks = _iterencode_list(value, _current_indent_level) elif isinstance(value, dict): chunks = _iterencode_dict(value, _current_indent_level) else: chunks = _iterencode(value, _current_indent_level) for chunk in chunks: yield chunk if newline_indent is not None: _current_indent_level -= 1 yield '\n' + (_indent * _current_indent_level) yield ']' if markers is not None: del markers[markerid] def _iterencode_dict(dct, _current_indent_level): if not dct: yield '{}' return if markers is not None: markerid = id(dct) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = dct yield '{' if _indent is not None: _current_indent_level += 1 newline_indent = '\n' + (_indent * _current_indent_level) item_separator = _item_separator + newline_indent yield newline_indent else: newline_indent = None item_separator = _item_separator first = True if _sort_keys: items = dct.items() items.sort(key=lambda kv: kv[0]) else: items = dct.iteritems() for key, value in items: if isinstance(key, basestring): pass # JavaScript is weakly typed for these, so it makes sense to # also allow them. Many encoders seem to do something like this. elif isinstance(key, float): key = _floatstr(key) elif key is True: key = 'true' elif key is False: key = 'false' elif key is None: key = 'null' elif isinstance(key, (int, long)): key = str(key) elif _skipkeys: continue else: raise TypeError("key " + repr(key) + " is not a string") if first: first = False else: yield item_separator yield _encoder(key) yield _key_separator if isinstance(value, basestring): yield _encoder(value) elif value is None: yield 'null' elif value is True: yield 'true' elif value is False: yield 'false' elif isinstance(value, (int, long)): yield str(value) elif isinstance(value, float): yield _floatstr(value) elif _use_decimal and isinstance(value, Decimal): yield str(value) else: if isinstance(value, (list, tuple)): chunks = _iterencode_list(value, _current_indent_level) elif isinstance(value, dict): chunks = _iterencode_dict(value, _current_indent_level) else: chunks = _iterencode(value, _current_indent_level) for chunk in chunks: yield chunk if newline_indent is not None: _current_indent_level -= 1 yield '\n' + (_indent * _current_indent_level) yield '}' if markers is not None: del markers[markerid] def _iterencode(o, _current_indent_level): if isinstance(o, basestring): yield _encoder(o) elif o is None: yield 'null' elif o is True: yield 'true' elif o is False: yield 'false' elif isinstance(o, (int, long)): yield str(o) elif isinstance(o, float): yield _floatstr(o) elif isinstance(o, (list, tuple)): for chunk in _iterencode_list(o, _current_indent_level): yield chunk elif isinstance(o, dict): for chunk in _iterencode_dict(o, _current_indent_level): yield chunk elif _use_decimal and isinstance(o, Decimal): yield str(o) else: if markers is not None: markerid = id(o) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = o o = _default(o) for chunk in _iterencode(o, _current_indent_level): yield chunk if markers is not None: del markers[markerid] return _iterencode _default_encoder = JSONEncoder( skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, indent=None, separators=None, encoding='utf-8', default=None, use_decimal=False, ) def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, use_decimal=False, **kw): # cached encoder if (not skipkeys and ensure_ascii and check_circular and allow_nan and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not use_decimal and not kw): return _default_encoder.encode(obj) if cls is None: cls = JSONEncoder return cls( skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, use_decimal=use_decimal, **kw).encode(obj)
helix84/activae
src/CTK_trunk/CTK/json_embedded.py
Python
bsd-3-clause
20,102
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from twilio.base import deserialize from twilio.base import values from twilio.base.instance_context import InstanceContext from twilio.base.instance_resource import InstanceResource from twilio.base.list_resource import ListResource from twilio.base.page import Page class AuthRegistrationsCredentialListMappingList(ListResource): """ """ def __init__(self, version, account_sid, domain_sid): """ Initialize the AuthRegistrationsCredentialListMappingList :param Version version: Version that contains the resource :param account_sid: The SID of the Account that created the resource :param domain_sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingList :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingList """ super(AuthRegistrationsCredentialListMappingList, self).__init__(version) # Path Solution self._solution = {'account_sid': account_sid, 'domain_sid': domain_sid, } self._uri = '/Accounts/{account_sid}/SIP/Domains/{domain_sid}/Auth/Registrations/CredentialListMappings.json'.format(**self._solution) def create(self, credential_list_sid): """ Create a new AuthRegistrationsCredentialListMappingInstance :param unicode credential_list_sid: The SID of the CredentialList resource to map to the SIP domain :returns: Newly created AuthRegistrationsCredentialListMappingInstance :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance """ data = values.of({'CredentialListSid': credential_list_sid, }) payload = self._version.create( 'POST', self._uri, data=data, ) return AuthRegistrationsCredentialListMappingInstance( self._version, payload, account_sid=self._solution['account_sid'], domain_sid=self._solution['domain_sid'], ) def stream(self, limit=None, page_size=None): """ Streams AuthRegistrationsCredentialListMappingInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page(page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit']) def list(self, limit=None, page_size=None): """ Lists AuthRegistrationsCredentialListMappingInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance] """ return list(self.stream(limit=limit, page_size=page_size, )) def page(self, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of AuthRegistrationsCredentialListMappingInstance records from the API. Request is executed immediately :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of AuthRegistrationsCredentialListMappingInstance :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingPage """ params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return AuthRegistrationsCredentialListMappingPage(self._version, response, self._solution) def get_page(self, target_url): """ Retrieve a specific page of AuthRegistrationsCredentialListMappingInstance records from the API. Request is executed immediately :param str target_url: API-generated URL for the requested results page :returns: Page of AuthRegistrationsCredentialListMappingInstance :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingPage """ response = self._version.domain.twilio.request( 'GET', target_url, ) return AuthRegistrationsCredentialListMappingPage(self._version, response, self._solution) def get(self, sid): """ Constructs a AuthRegistrationsCredentialListMappingContext :param sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingContext :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingContext """ return AuthRegistrationsCredentialListMappingContext( self._version, account_sid=self._solution['account_sid'], domain_sid=self._solution['domain_sid'], sid=sid, ) def __call__(self, sid): """ Constructs a AuthRegistrationsCredentialListMappingContext :param sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingContext :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingContext """ return AuthRegistrationsCredentialListMappingContext( self._version, account_sid=self._solution['account_sid'], domain_sid=self._solution['domain_sid'], sid=sid, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Api.V2010.AuthRegistrationsCredentialListMappingList>' class AuthRegistrationsCredentialListMappingPage(Page): """ """ def __init__(self, version, response, solution): """ Initialize the AuthRegistrationsCredentialListMappingPage :param Version version: Version that contains the resource :param Response response: Response from the API :param account_sid: The SID of the Account that created the resource :param domain_sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingPage :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingPage """ super(AuthRegistrationsCredentialListMappingPage, self).__init__(version, response) # Path Solution self._solution = solution def get_instance(self, payload): """ Build an instance of AuthRegistrationsCredentialListMappingInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance """ return AuthRegistrationsCredentialListMappingInstance( self._version, payload, account_sid=self._solution['account_sid'], domain_sid=self._solution['domain_sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Api.V2010.AuthRegistrationsCredentialListMappingPage>' class AuthRegistrationsCredentialListMappingContext(InstanceContext): """ """ def __init__(self, version, account_sid, domain_sid, sid): """ Initialize the AuthRegistrationsCredentialListMappingContext :param Version version: Version that contains the resource :param account_sid: The SID of the Account that created the resource to fetch :param domain_sid: The SID of the SIP domain that contains the resource to fetch :param sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingContext :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingContext """ super(AuthRegistrationsCredentialListMappingContext, self).__init__(version) # Path Solution self._solution = {'account_sid': account_sid, 'domain_sid': domain_sid, 'sid': sid, } self._uri = '/Accounts/{account_sid}/SIP/Domains/{domain_sid}/Auth/Registrations/CredentialListMappings/{sid}.json'.format(**self._solution) def fetch(self): """ Fetch a AuthRegistrationsCredentialListMappingInstance :returns: Fetched AuthRegistrationsCredentialListMappingInstance :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return AuthRegistrationsCredentialListMappingInstance( self._version, payload, account_sid=self._solution['account_sid'], domain_sid=self._solution['domain_sid'], sid=self._solution['sid'], ) def delete(self): """ Deletes the AuthRegistrationsCredentialListMappingInstance :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._version.delete('delete', self._uri) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Api.V2010.AuthRegistrationsCredentialListMappingContext {}>'.format(context) class AuthRegistrationsCredentialListMappingInstance(InstanceResource): """ """ def __init__(self, version, payload, account_sid, domain_sid, sid=None): """ Initialize the AuthRegistrationsCredentialListMappingInstance :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance """ super(AuthRegistrationsCredentialListMappingInstance, self).__init__(version) # Marshaled Properties self._properties = { 'account_sid': payload.get('account_sid'), 'date_created': deserialize.rfc2822_datetime(payload.get('date_created')), 'date_updated': deserialize.rfc2822_datetime(payload.get('date_updated')), 'friendly_name': payload.get('friendly_name'), 'sid': payload.get('sid'), } # Context self._context = None self._solution = { 'account_sid': account_sid, 'domain_sid': domain_sid, 'sid': sid or self._properties['sid'], } @property def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: AuthRegistrationsCredentialListMappingContext for this AuthRegistrationsCredentialListMappingInstance :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingContext """ if self._context is None: self._context = AuthRegistrationsCredentialListMappingContext( self._version, account_sid=self._solution['account_sid'], domain_sid=self._solution['domain_sid'], sid=self._solution['sid'], ) return self._context @property def account_sid(self): """ :returns: The SID of the Account that created the resource :rtype: unicode """ return self._properties['account_sid'] @property def date_created(self): """ :returns: The RFC 2822 date and time in GMT that the resource was created :rtype: datetime """ return self._properties['date_created'] @property def date_updated(self): """ :returns: The RFC 2822 date and time in GMT that the resource was last updated :rtype: datetime """ return self._properties['date_updated'] @property def friendly_name(self): """ :returns: The string that you assigned to describe the resource :rtype: unicode """ return self._properties['friendly_name'] @property def sid(self): """ :returns: The unique string that identifies the resource :rtype: unicode """ return self._properties['sid'] def fetch(self): """ Fetch a AuthRegistrationsCredentialListMappingInstance :returns: Fetched AuthRegistrationsCredentialListMappingInstance :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance """ return self._proxy.fetch() def delete(self): """ Deletes the AuthRegistrationsCredentialListMappingInstance :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._proxy.delete() def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Api.V2010.AuthRegistrationsCredentialListMappingInstance {}>'.format(context)
tysonholub/twilio-python
twilio/rest/api/v2010/account/sip/domain/auth_types/auth_registrations_mapping/auth_registrations_credential_list_mapping.py
Python
mit
17,559
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pipeline.param.base_param import BaseParam class StochasticQuasiNewtonParam(BaseParam): """ Parameters used for stochastic quasi-newton method. Parameters ---------- update_interval_L : int, default: 3 Set how many iteration to update hess matrix memory_M : int, default: 5 Stack size of curvature information, i.e. y_k and s_k in the paper. sample_size : int, default: 5000 Sample size of data that used to update Hess matrix """ def __init__(self, update_interval_L=3, memory_M=5, sample_size=5000, random_seed=None): super().__init__() self.update_interval_L = update_interval_L self.memory_M = memory_M self.sample_size = sample_size self.random_seed = random_seed def check(self): descr = "hetero sqn param's" self.check_positive_integer(self.update_interval_L, descr) self.check_positive_integer(self.memory_M, descr) self.check_positive_integer(self.sample_size, descr) if self.random_seed is not None: self.check_positive_integer(self.random_seed, descr) return True
FederatedAI/FATE
python/fate_client/pipeline/param/sqn_param.py
Python
apache-2.0
1,812
#!/usr/bin/env python # Copyright 2012 Google Inc. All Rights Reserved. """Tests for grr.lib.objectfilter.""" import unittest from grr.lib import objectfilter attr1 = "Backup" attr2 = "Archive" hash1 = "123abc" hash2 = "456def" filename = "boot.ini" class DummyObject(object): def __init__(self, key, value): setattr(self, key, value) class HashObject(object): def __init__(self, hash_value=None): self.value = hash_value @property def md5(self): return self.value def __eq__(self, y): return self.value == y def __lt__(self, y): return self.value < y class Dll(object): def __init__(self, name, imported_functions=None, exported_functions=None): self.name = name self._imported_functions = imported_functions or [] self.num_imported_functions = len(self._imported_functions) self.exported_functions = exported_functions or [] self.num_exported_functions = len(self.exported_functions) @property def imported_functions(self): for fn in self._imported_functions: yield fn class DummyFile(object): non_callable_leaf = "yoda" def __init__(self): self.non_callable = HashObject(hash1) self.non_callable_repeated = [DummyObject("desmond", ["brotha", "brotha"]), DummyObject("desmond", ["brotha", "sista"])] self.imported_dll1 = Dll("a.dll", ["FindWindow", "CreateFileA"]) self.imported_dll2 = Dll("b.dll", ["RegQueryValueEx"]) @property def name(self): return filename @property def attributes(self): return [attr1, attr2] @property def hash(self): return [HashObject(hash1), HashObject(hash2)] @property def mapping(self): return {"hashes": [HashObject(hash1), HashObject(hash2)], "nested": {"attrs": [attr1, attr2]}, "string": "mate", "float": 42.0} @property def size(self): return 10 @property def deferred_values(self): for v in ["a", "b"]: yield v @property def novalues(self): return [] @property def imported_dlls(self): return [self.imported_dll1, self.imported_dll2] def Callable(self): raise RuntimeError("This can not be called.") @property def float(self): return 123.9823 class ObjectFilterTest(unittest.TestCase): def setUp(self): self.file = DummyFile() self.filter_imp = objectfilter.LowercaseAttributeFilterImplementation self.value_expander = self.filter_imp.FILTERS["ValueExpander"] operator_tests = { objectfilter.Less: [ (True, ["size", 1000]), (True, ["size", 11]), (False, ["size", 10]), (False, ["size", 0]), (False, ["float", 1.0]), (True, ["float", 123.9824]), ], objectfilter.LessEqual: [ (True, ["size", 1000]), (True, ["size", 11]), (True, ["size", 10]), (False, ["size", 9]), (False, ["float", 1.0]), (True, ["float", 123.9823]), ], objectfilter.Greater: [ (True, ["size", 1]), (True, ["size", 9.23]), (False, ["size", 10]), (False, ["size", 1000]), (True, ["float", 122]), (True, ["float", 1.0]), ], objectfilter.GreaterEqual: [ (False, ["size", 1000]), (False, ["size", 11]), (True, ["size", 10]), (True, ["size", 0]), # Floats work fine too (True, ["float", 122]), (True, ["float", 123.9823]), # Comparisons works with strings, although it might be a bit silly (True, ["name", "aoot.ini"]), ], objectfilter.Contains: [ # Contains works with strings (True, ["name", "boot.ini"]), (True, ["name", "boot"]), (False, ["name", "meh"]), # Works with generators (True, ["imported_dlls.imported_functions", "FindWindow"]), # But not with numbers (False, ["size", 12]), ], objectfilter.NotContains: [ (False, ["name", "boot.ini"]), (False, ["name", "boot"]), (True, ["name", "meh"]), ], objectfilter.Equals: [ (True, ["name", "boot.ini"]), (False, ["name", "foobar"]), (True, ["float", 123.9823]), ], objectfilter.NotEquals: [ (False, ["name", "boot.ini"]), (True, ["name", "foobar"]), (True, ["float", 25]), ], objectfilter.InSet: [ (True, ["name", ["boot.ini", "autoexec.bat"]]), (True, ["name", "boot.ini"]), (False, ["name", "NOPE"]), # All values of attributes are within these (True, ["attributes", ["Archive", "Backup", "Nonexisting"]]), # Not all values of attributes are within these (False, ["attributes", ["Executable", "Sparse"]]), ], objectfilter.NotInSet: [ (False, ["name", ["boot.ini", "autoexec.bat"]]), (False, ["name", "boot.ini"]), (True, ["name", "NOPE"]), ], objectfilter.Regexp: [ (True, ["name", "^boot.ini$"]), (True, ["name", "boot.ini"]), (False, ["name", "^$"]), (True, ["attributes", "Archive"]), # One can regexp numbers if he's inclined to (True, ["size", 0]), # But regexp doesn't work with lists or generators for the moment (False, ["imported_dlls.imported_functions", "FindWindow"]) ], } def testBinaryOperators(self): for operator, test_data in self.operator_tests.items(): for test_unit in test_data: print ("Testing %s with %s and %s" % ( operator, test_unit[0], test_unit[1])) kwargs = {"arguments": test_unit[1], "value_expander": self.value_expander} self.assertEqual(test_unit[0], operator(**kwargs).Matches(self.file)) def testExpand(self): # Case insensitivity values_lowercase = self.value_expander().Expand(self.file, "size") values_uppercase = self.value_expander().Expand(self.file, "Size") self.assertListEqual(list(values_lowercase), list(values_uppercase)) # Existing, non-repeated, leaf is a value values = self.value_expander().Expand(self.file, "size") self.assertListEqual(list(values), [10]) # Existing, non-repeated, leaf is a string in mapping values = self.value_expander().Expand(self.file, "mapping.string") self.assertListEqual(list(values), ["mate"]) # Existing, non-repeated, leaf is a scalar in mapping values = self.value_expander().Expand(self.file, "mapping.float") self.assertListEqual(list(values), [42.0]) # Existing, non-repeated, leaf is iterable values = self.value_expander().Expand(self.file, "attributes") self.assertListEqual(list(values), [[attr1, attr2]]) # Existing, repeated, leaf is value values = self.value_expander().Expand(self.file, "hash.md5") self.assertListEqual(list(values), [hash1, hash2]) # Existing, repeated, leaf is iterable values = self.value_expander().Expand(self.file, "non_callable_repeated.desmond") self.assertListEqual(list(values), [["brotha", "brotha"], ["brotha", "sista"]]) # Existing, repeated, leaf is mapping. values = self.value_expander().Expand(self.file, "mapping.hashes") self.assertListEqual(list(values), [hash1, hash2]) values = self.value_expander().Expand(self.file, "mapping.nested.attrs") self.assertListEqual(list(values), [[attr1, attr2]]) # Now with an iterator values = self.value_expander().Expand(self.file, "deferred_values") self.assertListEqual([list(value) for value in values], [["a", "b"]]) # Iterator > generator values = self.value_expander().Expand(self.file, "imported_dlls.imported_functions") expected = [ ["FindWindow", "CreateFileA"], ["RegQueryValueEx"]] self.assertListEqual([list(value) for value in values], expected) # Non-existing first path values = self.value_expander().Expand(self.file, "nonexistant") self.assertListEqual(list(values), []) # Non-existing in the middle values = self.value_expander().Expand(self.file, "hash.mink.boo") self.assertListEqual(list(values), []) # Non-existing as a leaf values = self.value_expander().Expand(self.file, "hash.mink") self.assertListEqual(list(values), []) # Non-callable leaf values = self.value_expander().Expand(self.file, "non_callable_leaf") self.assertListEqual(list(values), [DummyFile.non_callable_leaf]) # callable values = self.value_expander().Expand(self.file, "Callable") self.assertListEqual(list(values), []) # leaf under a callable. Will return nothing values = self.value_expander().Expand(self.file, "Callable.a") self.assertListEqual(list(values), []) def testGenericBinaryOperator(self): class TestBinaryOperator(objectfilter.GenericBinaryOperator): values = list() def Operation(self, x, _): return self.values.append(x) # Test a common binary operator tbo = TestBinaryOperator(arguments=["whatever", 0], value_expander=self.value_expander) self.assertEqual(tbo.right_operand, 0) self.assertEqual(tbo.args[0], "whatever") tbo.Matches(DummyObject("whatever", "id")) tbo.Matches(DummyObject("whatever", "id2")) tbo.Matches(DummyObject("whatever", "bg")) tbo.Matches(DummyObject("whatever", "bg2")) self.assertListEqual(tbo.values, ["id", "id2", "bg", "bg2"]) def testContext(self): self.assertRaises(objectfilter.InvalidNumberOfOperands, objectfilter.Context, arguments=["context"], value_expander=self.value_expander) self.assertRaises( objectfilter.InvalidNumberOfOperands, objectfilter.Context, arguments=["context", objectfilter.Equals( arguments=["path", "value"], value_expander=self.value_expander), objectfilter.Equals( arguments=["another_path", "value"], value_expander=self.value_expander)], value_expander=self.value_expander) # "One imported_dll imports 2 functions AND one imported_dll imports # function RegQueryValueEx" arguments = [ objectfilter.Equals(["imported_dlls.num_imported_functions", 1], value_expander=self.value_expander), objectfilter.Contains(["imported_dlls.imported_functions", "RegQueryValueEx"], value_expander=self.value_expander)] condition = objectfilter.AndFilter(arguments=arguments) # Without context, it matches because both filters match separately self.assertEqual(True, condition.Matches(self.file)) arguments = [ objectfilter.Equals(["num_imported_functions", 2], value_expander=self.value_expander), objectfilter.Contains(["imported_functions", "RegQueryValueEx"], value_expander=self.value_expander)] condition = objectfilter.AndFilter(arguments=arguments) # "The same DLL imports 2 functions AND one of these is RegQueryValueEx" context = objectfilter.Context(arguments=["imported_dlls", condition], value_expander=self.value_expander) # With context, it doesn't match because both don't match in the same dll self.assertEqual(False, context.Matches(self.file)) # "One imported_dll imports only 1 function AND one imported_dll imports # function RegQueryValueEx" condition = objectfilter.AndFilter(arguments=[ objectfilter.Equals(arguments=["num_imported_functions", 1], value_expander=self.value_expander), objectfilter.Contains(["imported_functions", "RegQueryValueEx"], value_expander=self.value_expander)]) # "The same DLL imports 1 function AND it"s RegQueryValueEx" context = objectfilter.Context(["imported_dlls", condition], value_expander=self.value_expander) self.assertEqual(True, context.Matches(self.file)) # Now test the context with a straight query query = """ @imported_dlls ( imported_functions contains "RegQueryValueEx" AND num_imported_functions == 1 ) """ filter_ = objectfilter.Parser(query).Parse() filter_ = filter_.Compile(self.filter_imp) self.assertEqual(True, filter_.Matches(self.file)) def testRegexpRaises(self): self.assertRaises(ValueError, objectfilter.Regexp, arguments=["name", "I [dont compile"], value_expander=self.value_expander) def testEscaping(self): parser = objectfilter.Parser(r"a is '\n'").Parse() self.assertEqual(parser.args[0], "\n") # Invalid escape sequence parser = objectfilter.Parser(r"a is '\z'") self.assertRaises(objectfilter.ParseError, parser.Parse) # Can escape the backslash parser = objectfilter.Parser(r"a is '\\'").Parse() self.assertEqual(parser.args[0], "\\") # HEX ESCAPING # This fails as it's not really a hex escaped string parser = objectfilter.Parser(r"a is '\xJZ'") self.assertRaises(objectfilter.ParseError, parser.Parse) # Instead, this is what one should write parser = objectfilter.Parser(r"a is '\\xJZ'").Parse() self.assertEqual(parser.args[0], r"\xJZ") # Standard hex-escape parser = objectfilter.Parser(r"a is '\x41\x41\x41'").Parse() self.assertEqual(parser.args[0], "AAA") # Hex-escape + a character parser = objectfilter.Parser(r"a is '\x414'").Parse() self.assertEqual(parser.args[0], r"A4") # How to include r'\x41' parser = objectfilter.Parser(r"a is '\\x41'").Parse() self.assertEqual(parser.args[0], r"\x41") def testParse(self): # Arguments are either int, float or quoted string objectfilter.Parser("attribute == 1").Parse() objectfilter.Parser("attribute == 0x10").Parse() objectfilter.Parser("attribute == 0xa").Parse() objectfilter.Parser("attribute == 0xFF").Parse() parser = objectfilter.Parser("attribute == 1a") self.assertRaises(objectfilter.ParseError, parser.Parse) objectfilter.Parser("attribute == 1.2").Parse() objectfilter.Parser("attribute == 'bla'").Parse() objectfilter.Parser("attribute == \"bla\"").Parse() parser = objectfilter.Parser("something == red") self.assertRaises(objectfilter.ParseError, parser.Parse) # Can't start with AND parser = objectfilter.Parser("and something is 'Blue'") self.assertRaises(objectfilter.ParseError, parser.Parse) # Need to close braces objectfilter.Parser("(a is 3)").Parse() parser = objectfilter.Parser("(a is 3") self.assertRaises(objectfilter.ParseError, parser.Parse) # Need to open braces to close them parser = objectfilter.Parser("a is 3)") self.assertRaises(objectfilter.ParseError, parser.Parse) # Can parse lists objectfilter.Parser("attribute inset [1, 2, '3', 4.01, 0xa]").Parse() # Need to close square braces for lists. parser = objectfilter.Parser("attribute inset [1, 2, '3', 4.01, 0xA") self.assertRaises(objectfilter.ParseError, parser.Parse) # Need to opensquare braces to close lists. parser = objectfilter.Parser("attribute inset 1, 2, '3', 4.01]") self.assertRaises(objectfilter.ParseError, parser.Parse) # Context Operator alone is not accepted parser = objectfilter.Parser("@attributes") self.assertRaises(objectfilter.ParseError, parser.Parse) # Accepted only with braces objectfilter.Parser("@attributes( name is 'adrien')").Parse() # Not without them parser = objectfilter.Parser("@attributes name is 'adrien'") self.assertRaises(objectfilter.ParseError, parser.Parse) # Can nest context operators query = "@imported_dlls( @imported_function( name is 'OpenFileA'))" objectfilter.Parser(query).Parse() # Can nest context operators and mix braces without it messing up query = "@imported_dlls( @imported_function( name is 'OpenFileA'))" parser = objectfilter.Parser(query).Parse() query = """ @imported_dlls ( @imported_function ( name is 'OpenFileA' and ordinal == 12 ) ) """ parser = objectfilter.Parser(query).Parse() # Mix context and binary operators query = """ @imported_dlls ( @imported_function ( name is 'OpenFileA' ) AND num_functions == 2 ) """ parser = objectfilter.Parser(query).Parse() # Also on the right query = """ @imported_dlls ( num_functions == 2 AND @imported_function ( name is 'OpenFileA' ) ) """ # Altogether # There's an imported dll that imports OpenFileA AND # an imported DLL matching advapi32.dll that imports RegQueryValueExA AND # and it exports a symbol called 'inject' query = """ @imported_dlls( @imported_function ( name is 'OpenFileA' ) ) AND @imported_dlls ( name regexp '(?i)advapi32.dll' AND @imported_function ( name is 'RegQueryValueEx' ) ) AND @exported_symbols(name is 'inject') """ def testInset(self): obj = DummyObject("clone", 2) parser = objectfilter.Parser("clone inset [1, 2, 3]").Parse() filter_ = parser.Compile(self.filter_imp) self.assertEqual(filter_.Matches(obj), True) obj = DummyObject("troubleshooter", "red") parser = objectfilter.Parser("troubleshooter inset ['red', 'blue']").Parse() filter_ = parser.Compile(self.filter_imp) self.assertEqual(filter_.Matches(obj), True) obj = DummyObject("troubleshooter", "infrared") parser = objectfilter.Parser("troubleshooter inset ['red', 'blue']").Parse() filter_ = parser.Compile(self.filter_imp) self.assertEqual(filter_.Matches(obj), False) def testCompile(self): obj = DummyObject("something", "Blue") parser = objectfilter.Parser("something == 'Blue'").Parse() filter_ = parser.Compile(self.filter_imp) self.assertEqual(filter_.Matches(obj), True) parser = objectfilter.Parser("something == 'Red'").Parse() filter_ = parser.Compile(self.filter_imp) self.assertEqual(filter_.Matches(obj), False) parser = objectfilter.Parser("something == \"Red\"").Parse() filter_ = parser.Compile(self.filter_imp) self.assertEqual(filter_.Matches(obj), False) obj = DummyObject("size", 4) parser = objectfilter.Parser("size < 3").Parse() filter_ = parser.Compile(self.filter_imp) self.assertEqual(filter_.Matches(obj), False) parser = objectfilter.Parser("size == 4").Parse() filter_ = parser.Compile(self.filter_imp) self.assertEqual(filter_.Matches(obj), True) query = "something is 'Blue' and size notcontains 3" parser = objectfilter.Parser(query).Parse() filter_ = parser.Compile(self.filter_imp) self.assertEqual(filter_.Matches(obj), False) if __name__ == "__main__": unittest.main()
statik/grr
lib/objectfilter_test.py
Python
apache-2.0
19,241
""" A component which allows you to send data to Dweet.io. For more details about this component, please refer to the documentation at https://home-assistant.io/components/dweet/ """ import logging from datetime import timedelta import voluptuous as vol from homeassistant.const import EVENT_STATE_CHANGED, STATE_UNKNOWN import homeassistant.helpers.config_validation as cv from homeassistant.helpers import state as state_helper from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) DOMAIN = "dweet" DEPENDENCIES = [] REQUIREMENTS = ['dweepy==0.2.0'] CONF_NAME = 'name' CONF_WHITELIST = 'whitelist' MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=1) CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Required(CONF_NAME): cv.string, vol.Required(CONF_WHITELIST): cv.string, }), }, extra=vol.ALLOW_EXTRA) # pylint: disable=too-many-locals def setup(hass, config): """Setup the Dweet.io component.""" conf = config[DOMAIN] name = conf[CONF_NAME] whitelist = conf.get(CONF_WHITELIST, []) json_body = {} def dweet_event_listener(event): """Listen for new messages on the bus and sends them to Dweet.io.""" state = event.data.get('new_state') if state is None or state.state in (STATE_UNKNOWN, '') \ or state.entity_id not in whitelist: return try: _state = state_helper.state_as_number(state) except ValueError: _state = state.state json_body[state.attributes.get('friendly_name')] = _state send_data(name, json_body) hass.bus.listen(EVENT_STATE_CHANGED, dweet_event_listener) return True @Throttle(MIN_TIME_BETWEEN_UPDATES) def send_data(name, msg): """Send the collected data to Dweet.io.""" import dweepy try: dweepy.dweet_for(name, msg) except dweepy.DweepyError: _LOGGER.error("Error saving data '%s' to Dweet.io", msg)
mikaelboman/home-assistant
homeassistant/components/dweet.py
Python
mit
1,961
#!/usr/bin/python import os import time import Bio.SubsMat.MatrixInfo import Bio.Blast.NCBIWWW import Bio.Blast.NCBIXML from Bio import Entrez #run ncbi blast entrez_query = '"serum albumin"[Protein name] AND mammals[Organism]' ncbi = Bio.Blast.NCBIWWW.qblast(program = "blastp", database = "swissprot", sequence = "4502027", entrez_query = entrez_query, hitlist_size = 500, expect = 100.0) #ncbi = open("/home/rytis/Downloads/740B49S001R-Alignment.xml", "r") blast = Bio.Blast.NCBIXML.read(ncbi) #2.a Filter results to only leave 80+% coverage #Build up a list of elements whose coverage is below 80 popping = [] for seq in blast.alignments: cover_percentage = 0 for hsps in seq.hsps: cover_percentage += ((hsps.query_end - hsps.query_start) * 100.0 / blast.query_length) if cover_percentage < 80.0: popping.append(seq) #now pop all those elements for pop in popping: blast.alignments.remove(pop) #save FASTAs to file f = open("BlastSequences", "w") for seq in blast.alignments: #use windows line endings just in case f.write(">" + str(seq.hit_id) + " " + str(seq.hit_def) + "\n\r") f.write(str(seq.hsps[0].sbjct) + "\n\r") f.close() #Use Mafft program os.system("mafft --quiet BlastSequences > MultipleAlignedSequences") #figure out most unique and most mamalian sequences of amino-acids #we will use blosum62 and conservation estimate for this blosum62 = Bio.SubsMat.MatrixInfo.blosum62 conservation = [0] * blast.query_length #at first initiate to all zeros #Now go through each sequence each and sum up blosum62 scores for seq in blast.alignments: #use only best matches hsp = seq.hsps[0] for index in range(len(hsp.query)): cons_idx = index + hsp.query_start - 1 if hsp.query[index] == "-": continue if hsp.sbjct[index] == "-": continue try: conservation[cons_idx] += blosum62[(hsp.query[index], hsp.sbjct[index])] except KeyError: #Other way round conservation[cons_idx] += blosum62[(hsp.sbjct[index], hsp.query[index])] #now we search for continuous sequence of aminoacids whose conservation #values are highest (lowest) highest_sum = None lowest_sum = None highest_idx = None lowest_idx = None for index in range(len(conservation) - 15): cons_sum = sum(conservation[index : index + 15]) #save if higher than previous highest if highest_sum == None: highest_idx = index highest_sum = cons_sum elif highest_sum < cons_sum: highest_idx = index highest_sum = cons_sum #save if lower than previous lowest if lowest_sum == None: lowest_idx = index lowest_sum = cons_sum elif lowest_sum > cons_sum: lowest_idx = index lowest_sum = cons_sum #Retrive full FASTA of the original protein Entrez.email = "A.N.Other@xample.com" handle = Entrez.efetch(db = "Protein", id="NP_000468.1", rettype = "fasta", retmode="xml") record = Entrez.read(handle) #print the results highest_str = "Labiausiai panasi seka prasideda: " highest_str += str(highest_idx) highest_str += " Baigiasi: " highest_str += str(highest_idx + 14) highest_str += " Seka: " highest_str += record[0]["TSeq_sequence"][highest_idx : highest_idx + 15] print highest_str lowest_str = "Labiausiai nepanasi seka prasideda: " lowest_str += str(lowest_idx) lowest_str += " Baigiasi: " lowest_str += str(lowest_idx + 14) lowest_str += " Seka: " lowest_str += record[0]["TSeq_sequence"][lowest_idx : lowest_idx + 15] print lowest_str
jauler/VU_MIF_tasks
BakalauroStudijos/BioInformatika/2uzd.py
Python
gpl-2.0
3,860