code
stringlengths
3
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
3
1.05M
from pypers.core.step import Step from pypers.steps.mothur import Mothur import os import json import re class MothurSeqError(Mothur): spec = { 'name' : 'MothurSeqError', 'version' : '20151109', 'descr' : [ 'The seq.error command reads a query alignment file and a reference alignment file to measure the error rates' ], 'url' : 'http://www.mothur.org/wiki/Seq.error', 'args' : { 'inputs' : [ { 'name' : 'input_fasta', 'type' : 'file', 'iterable' : True, 'descr' : 'input fasta filename' } ], 'outputs' : [ { 'name' : 'output_fasta', 'type' : 'file', 'descr': 'output fasta filename' }, { 'name' : 'output_counts', 'type' : 'file', 'descr': 'output counts filename' } ], 'params' : [ { 'name' : 'aligned', 'type' : 'str', 'descr': 'define whether seq is aligned', 'value' : 'F', 'readonly': True }, { 'name' : 'reference', 'type' : 'file', 'descr': 'the trimmed sequences used in the mock community', 'value' : '/pypers/develop/ref/mothur/mock_seqs.V4.ng.fasta', 'readonly': True } ] }, 'requirements' : { 'cpus' : '8' } } def process(self): """ Create the necessary input file links and run mothur command """ if type(self.input_fasta) != list: self.input_fasta = [self.input_fasta] for input_fasta in self.input_fasta: self.mk_links([input_fasta],self.output_dir) input_fasta = os.path.join(self.output_dir,os.path.basename(input_fasta)) extra_params={'fasta':input_fasta, 'aligned':self.aligned, 'reference':self.reference} self.run_cmd('seq.error',extra_params) self.output_fasta = re.sub('.fasta$','.pick.fasta',input_fasta) self.output_counts = re.sub('.count_table$','.pick.count_table',input_counts)
frankosan/pypers
pypers/steps/mothur/MothurSeqError.py
Python
gpl-3.0
2,540
# -*- coding: utf-8 -*- # This file is part of IRIS: Infrastructure and Release Information System # # Copyright (C) 2013 Intel Corporation # # IRIS is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # version 2.0 as published by the Free Software Foundation. """ API URL configuration for the iris-packagedb project. Permittable URLs and views accessible through REST API are defined here. """ # pylint: disable=C0103 from django.conf.urls import patterns, url, include from rest_framework.routers import DefaultRouter from iris.packagedb.apiviews import (DomainViewSet, SubDomainViewSet, LicenseViewSet, GitTreeViewSet, PackageViewSet, ProductViewSet, ImageViewSet) # Create a router and register our views with it. router = DefaultRouter() router.register(r'domains', DomainViewSet) router.register(r'subdomains', SubDomainViewSet) router.register(r'licenses', LicenseViewSet) router.register(r'gittrees', GitTreeViewSet) router.register(r'packages', PackageViewSet) router.register(r'products', ProductViewSet) router.register(r'images', ImageViewSet) # The API URLs are now determined automatically by the router. # Additionally, we include the login URLs for the browseable API. urlpatterns = patterns( 'iris.packagedb.apiviews', url(r'^', include(router.urls)), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), )
gttechsign/iris-panel
iris/packagedb/apiurls.py
Python
gpl-2.0
1,448
#!/usr/bin/python3 #https://docs.python.org/2.6/library/logging.html import logging import logging.handlers def setup(lfile, level): #levels: #logging.DEBUG, #logging.INFO #logging.WARNING #logging.ERROR #logging.CRITICAL #log handle mylog = logging.getLogger(lfile) mylog.setLevel(level) #beef up logging by adding safeguards handler = logging.handlers.RotatingFileHandler(lfile, maxBytes=1000000, backupCount=3)# approx. 1MB per file handler.setFormatter(logging.Formatter( '%(asctime)s [%(process)d]: %(levelname)s %(message)s')) # '%(asctime)s %(pathname)s [%(process)d]: %(levelname)s %(message)s')) mylog.addHandler(handler) return mylog
thirschbuechler/ubuntu-recentquicklists
log3.py
Python
mit
756
VGG_MEAN = [104, 117, 123] def create_yahoo_image_loader(expand_dims=True): """Yahoo open_nsfw image loading mechanism Approximation of the image loading mechanism defined in https://github.com/yahoo/open_nsfw/blob/79f77bcd45076b000df71742a59d726aa4a36ad1/classify_nsfw.py#L40 """ import numpy as np import skimage import skimage.io from PIL import Image from io import BytesIO def load_image(image_path): pimg = open(image_path, 'rb').read() img_data = pimg im = Image.open(BytesIO(img_data)) if im.mode != "RGB": im = im.convert('RGB') imr = im.resize((256, 256), resample=Image.BILINEAR) fh_im = BytesIO() imr.save(fh_im, format='JPEG') fh_im.seek(0) image = (skimage.img_as_float(skimage.io.imread(fh_im, as_gray=False)) .astype(np.float32)) H, W, _ = image.shape h, w = (224, 224) h_off = max((H - h) // 2, 0) w_off = max((W - w) // 2, 0) image = image[h_off:h_off + h, w_off:w_off + w, :] # RGB to BGR image = image[:, :, :: -1] image = image.astype(np.float32, copy=False) image = image * 255.0 image -= np.array(VGG_MEAN, dtype=np.float32) if expand_dims: image = np.expand_dims(image, axis=0) return image return load_image def create_tensorflow_image_loader(session, expand_dims=True, options=None, run_metadata=None): """Tensorflow image loader Results seem to deviate quite a bit from yahoo image loader due to different jpeg encoders/decoders and different image resize implementations between PIL, skimage and tensorflow Only supports jpeg images. Relevant tensorflow issues: * https://github.com/tensorflow/tensorflow/issues/6720 * https://github.com/tensorflow/tensorflow/issues/12753 """ import tensorflow as tf def load_image(image_path): image = tf.read_file(image_path) image = __tf_jpeg_process(image) if expand_dims: image_batch = tf.expand_dims(image, axis=0) return session.run(image_batch, options=options, run_metadata=run_metadata) return session.run(image, options=options, run_metadata=run_metadata) return load_image def load_base64_tensor(_input): import tensorflow as tf def decode_and_process(base64): _bytes = tf.decode_base64(base64) _image = __tf_jpeg_process(_bytes) return _image # we have to do some preprocessing with map_fn, since functions like # decode_*, resize_images and crop_to_bounding_box do not support # processing of batches image = tf.map_fn(decode_and_process, _input, back_prop=False, dtype=tf.float32) return image def __tf_jpeg_process(data): import tensorflow as tf # The whole jpeg encode/decode dance is neccessary to generate a result # that matches the original model's (caffe) preprocessing # (as good as possible) image = tf.image.decode_jpeg(data, channels=3, fancy_upscaling=True, dct_method="INTEGER_FAST") image = tf.image.convert_image_dtype(image, tf.float32, saturate=True) image = tf.image.resize_images(image, (256, 256), method=tf.image.ResizeMethod.BILINEAR, align_corners=True) image = tf.image.convert_image_dtype(image, tf.uint8, saturate=True) image = tf.image.encode_jpeg(image, format='', quality=75, progressive=False, optimize_size=False, chroma_downsampling=True, density_unit=None, x_density=None, y_density=None, xmp_metadata=None) image = tf.image.decode_jpeg(image, channels=3, fancy_upscaling=False, dct_method="INTEGER_ACCURATE") image = tf.cast(image, dtype=tf.float32) image = tf.image.crop_to_bounding_box(image, 16, 16, 224, 224) image = tf.reverse(image, axis=[2]) image -= VGG_MEAN return image
sankit1/cv-tricks.com
Tensorflow-tutorials/Not-Safe-For-Work-Detection/image_utils.py
Python
mit
4,492
# Definition for a binary tree node # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: # @param A : root node of tree # @return a list of list of integers def zigzagLevelOrder(self, A): output, temp = list(), list() if not A: return output q1, q2 = list(), list() q1.append(A) while q1 or q2: temp = list() while q1: popped = q1.pop() temp.append(popped.val) if popped.left: q2.append(popped.left) if popped.right: q2.append(popped.right) if temp: output.append([y for y in temp]) temp = list() while q2: popped = q2.pop() temp.append(popped.val) if popped.right: q1.append(popped.right) if popped.left: q1.append(popped.left) if temp: output.append([y for y in temp]) return output
purushothamc/myibitsolutions
trees/zigzag_traversal.py
Python
gpl-3.0
1,147
# -*- coding: utf-8 -*- """ Note views """ import os from django.conf import settings from django.views import generic from django.shortcuts import get_object_or_404 from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from django.contrib import messages from django.utils.translation import ugettext_lazy as _ from braces.views import LoginRequiredMixin from taggit.models import Tag from sendfile import sendfile from bazar.models import ATTACHMENTS_WITH_SENDFILE, Entity, Note from bazar.forms.note import NoteForm, NoteDeleteForm from bazar.utils.mixins import MarkupMixin, EntityMixin class NoteEntityBaseView(EntityMixin): """ Base view to get the entity from note """ def get_context_data(self, **kwargs): context = super(NoteEntityBaseView, self).get_context_data(**kwargs) context.update({ 'ATTACHMENTS_WITH_SENDFILE': ATTACHMENTS_WITH_SENDFILE, 'entity_instance': self.entity, }) return context def get(self, request, *args, **kwargs): self.entity = self.get_entity() return super(NoteEntityBaseView, self).get(request, *args, **kwargs) def post(self, request, *args, **kwargs): self.entity = self.get_entity() return super(NoteEntityBaseView, self).post(request, *args, **kwargs) class NoteCreateView(LoginRequiredMixin, MarkupMixin, NoteEntityBaseView, generic.CreateView): """ Form view to create a note for an entity """ model = Note template_name = "bazar/note/form.html" form_class = NoteForm def get_success_url(self): return reverse('bazar:entity-detail', args=[self.get_kind(), self.entity.id]) def get_form_kwargs(self): kwargs = super(NoteCreateView, self).get_form_kwargs() kwargs.update({ 'author': self.request.user, 'entity': self.entity, }) return kwargs def form_valid(self, form): messages.add_message(self.request, messages.SUCCESS, _('Note has been created'), fail_silently=True) return super(NoteCreateView, self).form_valid(form) class NoteEditView(LoginRequiredMixin, MarkupMixin, NoteEntityBaseView, generic.UpdateView): """ Form view to edit a note for an entity """ model = Note template_name = "bazar/note/form.html" form_class = NoteForm def get_object(self, queryset=None): return get_object_or_404(Note, entity=self.get_entity_id(), pk=self.kwargs['note_id']) def get_success_url(self): return reverse('bazar:entity-note-detail', args=[self.get_kind(), self.entity.id, self.object.id]) def get_context_data(self, **kwargs): context = super(NoteEditView, self).get_context_data(**kwargs) context.update({ 'note_instance': self.object, }) return context def get_form_kwargs(self): kwargs = super(NoteEditView, self).get_form_kwargs() kwargs.update({ 'author': self.request.user, 'entity': self.entity, }) return kwargs def form_valid(self, form): messages.add_message(self.request, messages.SUCCESS, _('Note has been edited successfully'), fail_silently=True) return super(NoteEditView, self).form_valid(form) class TagNoteListView(LoginRequiredMixin, generic.ListView): """ List notes from a tag """ model = Note template_name = "bazar/note/list.html" paginate_by = settings.BAZAR_NOTE_INDEX_PAGINATE def get_context_data(self, **kwargs): context = super(TagNoteListView, self).get_context_data(**kwargs) context.update({ 'ATTACHMENTS_WITH_SENDFILE': ATTACHMENTS_WITH_SENDFILE, }) return context def get_tags_slug(self): """ Tag slug comes from url kwargs """ return self.kwargs['tag'] def get_tag(self): """ Getting Tag object """ return get_object_or_404(Tag, slug=self.get_tags_slug()) def get_queryset(self): return self.model.objects.select_related('entity').filter(tags__slug__in=[self.get_tags_slug()]) def get(self, request, *args, **kwargs): self.tag = self.get_tag() return super(TagNoteListView, self).get(request, *args, **kwargs) class NoteDetailView(LoginRequiredMixin, MarkupMixin, NoteEntityBaseView, generic.DetailView): """ Note detail view """ model = Entity template_name = "bazar/note/detail.html" context_object_name = "note_instance" def get_object(self, queryset=None): return get_object_or_404(Note, entity=self.get_entity_id(), pk=self.kwargs['note_id']) class NoteDeleteView(LoginRequiredMixin, NoteEntityBaseView, generic.UpdateView): """ Note delete view """ model = Note form_class = NoteDeleteForm template_name = 'bazar/note/delete_form.html' def get_object(self, queryset=None): return get_object_or_404(Note, entity=self.get_entity_id(), pk=self.kwargs['note_id']) def get_context_data(self, **kwargs): context = super(NoteDeleteView, self).get_context_data(**kwargs) context.update({ 'note_instance': self.object, }) return context def get_success_url(self): return reverse('bazar:entity-detail', args=[self.get_kind(), self.entity.id]) def get(self, request, *args, **kwargs): self.entity = self.get_entity() return super(NoteDeleteView, self).get(request, *args, **kwargs) def post(self, request, *args, **kwargs): self.entity = self.get_entity() return super(NoteDeleteView, self).post(request, *args, **kwargs) def form_valid(self, form): messages.add_message(self.request, messages.SUCCESS, _('Note has been deleted'), fail_silently=True) return super(NoteDeleteView, self).form_valid(form) class AttachmentProtectedDownloadView(NoteDetailView): """ View to download protected note attachment TODO: Unbind POST method for sanity """ def get(self, request, **kwargs): self.object = self.get_object() file_path = os.path.join(settings.PROJECT_PATH, self.object.file.path) return sendfile(request, file_path, attachment=True, attachment_filename=os.path.basename(file_path))
emencia/emencia-django-bazar
bazar/views/note.py
Python
mit
6,422
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # class Dimension(dict): def startElement(self, name, attrs, connection): pass def endElement(self, name, value, connection): if name == 'Name': self._name = value elif name == 'Value': if self._name in self: self[self._name].append(value) else: self[self._name] = [value] else: setattr(self, name, value)
GbalsaC/bitnamiP
venv/lib/python2.7/site-packages/boto/ec2/cloudwatch/dimension.py
Python
agpl-3.0
1,532
''' SPDX-License-Identifier: Apache-2.0 Copyright 2017 Massachusetts Institute of Technology. ''' from multiprocessing import Process import threading import functools import time import os import sys import signal import simplejson as json import zmq from keylime import config from keylime import crypto from keylime import keylime_logging from keylime import secure_mount logger = keylime_logging.init_logging('revocation_notifier') broker_proc = None def start_broker(): def worker(): context = zmq.Context(1) frontend = context.socket(zmq.SUB) frontend.bind("ipc:///tmp/keylime.verifier.ipc") frontend.setsockopt(zmq.SUBSCRIBE, b'') # Socket facing services backend = context.socket(zmq.PUB) backend.bind( f"tcp://{config.get('cloud_verifier', 'revocation_notifier_ip')}:" f"{config.getint('cloud_verifier', 'revocation_notifier_port')}" ) zmq.device(zmq.FORWARDER, frontend, backend) global broker_proc broker_proc = Process(target=worker) broker_proc.start() def stop_broker(): global broker_proc if broker_proc is not None: # Remove the socket file before we kill the process if os.path.exists("/tmp/keylime.verifier.ipc"): os.remove("/tmp/keylime.verifier.ipc") os.kill(broker_proc.pid, signal.SIGKILL) def notify(tosend): def worker(tosend): context = zmq.Context() mysock = context.socket(zmq.PUB) mysock.connect("ipc:///tmp/keylime.verifier.ipc") # wait 100ms for connect to happen time.sleep(0.2) # now send it out via 0mq logger.info("Sending revocation event to listening nodes...") for i in range(config.getint('cloud_verifier', 'max_retries')): try: mysock.send_string(json.dumps(tosend)) break except Exception as e: logger.debug("Unable to publish revocation message %d times, trying again in %f seconds: %s" % ( i, config.getfloat('cloud_verifier', 'retry_interval'), e)) time.sleep(config.getfloat('cloud_verifier', 'retry_interval')) mysock.close() cb = functools.partial(worker, tosend) t = threading.Thread(target=cb) t.start() cert_key = None def await_notifications(callback, revocation_cert_path): global cert_key if revocation_cert_path is None: raise Exception("must specify revocation_cert_path") context = zmq.Context() mysock = context.socket(zmq.SUB) mysock.setsockopt(zmq.SUBSCRIBE, b'') mysock.connect( f"tcp://{config.get('general', 'receive_revocation_ip')}:" f"{config.getint('general', 'receive_revocation_port')}" ) logger.info('Waiting for revocation messages on 0mq %s:%s' % (config.get('general', 'receive_revocation_ip'), config.getint('general', 'receive_revocation_port'))) while True: rawbody = mysock.recv() body = json.loads(rawbody) if cert_key is None: # load up the CV signing public key if revocation_cert_path is not None and os.path.exists(revocation_cert_path): logger.info( "Lazy loading the revocation certificate from %s" % revocation_cert_path) with open(revocation_cert_path) as f: certpem = f.read() cert_key = crypto.x509_import_pubkey(certpem) if cert_key is None: logger.warning( "Unable to check signature of revocation message: %s not available" % revocation_cert_path) elif 'signature' not in body or body['signature'] == 'none': logger.warning("No signature on revocation message from server") elif not crypto.rsa_verify(cert_key, body['msg'].encode('utf-8'), body['signature'].encode('utf-8')): logger.error("Invalid revocation message siganture %s" % body) else: message = json.loads(body['msg']) logger.debug( "Revocation signature validated for revocation: %s" % message) callback(message) def main(): start_broker() def worker(): def print_notification(revocation): logger.warning("Received revocation: %s" % revocation) keypath = '%s/unzipped/RevocationNotifier-cert.crt' % ( secure_mount.mount()) await_notifications(print_notification, revocation_cert_path=keypath) t = threading.Thread(target=worker) t.start() # time.sleep(0.5) json_body2 = { 'v': 'vbaby', 'agent_id': '2094aqrea3', 'cloudagent_ip': 'ipaddy', 'cloudagent_port': '39843', 'tpm_policy': '{"ab":"1"}', 'vtpm_policy': '{"ab":"1"}', 'metadata': '{"cert_serial":"1"}', 'allowlist': '{}', 'ima_sign_verification_keys': '{}', 'revocation_key': '', 'revocation': '{"cert_serial":"1"}', } print("sending notification") notify(json_body2) time.sleep(2) print("shutting down") stop_broker() print("exiting...") sys.exit(0) print("done") if __name__ == "__main__": main()
mit-ll/python-keylime
keylime/revocation_notifier.py
Python
bsd-2-clause
5,242
"""Util for Conversation.""" import re def create_matcher(utterance): """Create a regex that matches the utterance.""" # Split utterance into parts that are type: NORMAL, GROUP or OPTIONAL # Pattern matches (GROUP|OPTIONAL): Change light to [the color] {name} parts = re.split(r"({\w+}|\[[\w\s]+\] *)", utterance) # Pattern to extract name from GROUP part. Matches {name} group_matcher = re.compile(r"{(\w+)}") # Pattern to extract text from OPTIONAL part. Matches [the color] optional_matcher = re.compile(r"\[([\w ]+)\] *") pattern = ["^"] for part in parts: group_match = group_matcher.match(part) optional_match = optional_matcher.match(part) # Normal part if group_match is None and optional_match is None: pattern.append(part) continue # Group part if group_match is not None: pattern.append(r"(?P<{}>[\w ]+?)\s*".format(group_match.groups()[0])) # Optional part elif optional_match is not None: pattern.append(r"(?:{} *)?".format(optional_match.groups()[0])) pattern.append("$") return re.compile("".join(pattern), re.I)
fbradyirl/home-assistant
homeassistant/components/conversation/util.py
Python
apache-2.0
1,195
# -*- coding: utf-8 -*- # <nbformat>3.0</nbformat> # <codecell> from pyndamics import Simulation # <markdowncell> # From http://wiki.scipy.org/Cookbook/CoupledSpringMassSystem # <codecell> sim=Simulation() sim.add("x1'=y1",0.5,plot=1) sim.add("y1'=(-b1 * y1 - k1 * (x1 - L1) + k2 * (x2 - x1 - L2)) / m1",0.0,plot=False) sim.add("x2'=y2",2.25,plot=1) sim.add("y2'=(-b2 * y2 - k2 * (x2 - x1 - L2)) / m2",0.0,plot=False) sim.params( m1 = 1.0, # masses m2 = 1.5, # masses k1 = 8.0, # Spring constants k2 = 40.0, L1 = 0.5, # Natural lengths L2 = 1.0, b1 = 0.8, # Friction coefficients b2 = 0.5, ) sim.run(0,10) # <codecell> sim=Simulation() sim.add("x1''=(-b1 * x1' - k1 * (x1 - L1) + k2 * (x2 - x1 - L2)) / m1",[0.5,0.0],plot=[1,2]) sim.add("x2''=(-b2 * x2' - k2 * (x2 - x1 - L2)) / m2",[2.25,0.0],plot=[1,2]) sim.params( m1 = 1.0, # masses m2 = 1.5, # masses k1 = 8.0, # Spring constants k2 = 40.0, L1 = 0.5, # Natural lengths L2 = 1.0, b1 = 0.8, # Friction coefficients b2 = 0.5, ) sim.run(0,10) # <codecell> system_equation="x[i]''=(-b[i] * x[i]' - k[i] * (x[i] -x[i-1] -L[i]) + k[i+1] * (x[i+1] - x[i] - L[i+1])) / m[i]" system_params=dict(k=[8.0,40.0], m=[1,1.5], L=[0.5,1], b=[0.8,0.5], ) system_bc=dict(x_1=0, x_N=0, k_N=0, L_N=0, ) N=2 eqns=[] for i in range(N): eqn=system_equation eqn=eqn.replace('[i]','%d' % i) if (i+1)<N: eqn=eqn.replace('[i+1]','%d' % (i+1)) else: eqn=eqn.replace('[i+1]','_N') if (i-1)>=0: eqn=eqn.replace('[i-1]','%d' % (i-1)) else: eqn=eqn.replace('[i-1]','_%d' % abs((i-1))) eqns.append(eqn) print system for eqn in eqns: print eqn print print "x1''=(-b1 * x1' - k1 * (x1 - L1) + k2 * (x2 - x1 - L2)) / m1".replace('1','0').replace('2','1') print "x2''=(-b2 * x2' - k2 * (x2 - x1 - L2)) / m2".replace('1','0').replace('2','1') # <codecell> def add_system(sim,N,equations,initial_values,plot=True,**kwargs): if isinstance(equations,str): equations=[equations] plot_values=plot for system_equation in equations: for i in range(N): val=initial_values[i] try: plot=plot_values[i] except TypeError: plot=plot_values eqn=system_equation eqn=eqn.replace('[i]','%d' % i) if (i+1)<N: eqn=eqn.replace('[i+1]','%d' % (i+1)) else: eqn=eqn.replace('[i+1]','_N') if (i-1)>=0: eqn=eqn.replace('[i-1]','%d' % (i-1)) else: eqn=eqn.replace('[i-1]','_%d' % abs((i-1))) sim.add(eqn,val,plot=plot) def add_system_params(sim,N,**kwargs): params={} for name in kwargs: if name.endswith('_1') or name.endswith('_N'): params[name]=kwargs[name] else: for i in range(N): newname=name+"%d" % i try: params[newname]=kwargs[name][i] except TypeError: params[newname]=kwargs[name] sim.params(**params) N=2 sim=Simulation() add_system(sim,N,"x[i]''=(-b[i] * x[i]' - k[i] * (x[i] -x[i-1] -L[i]) + k[i+1] * (x[i+1] - x[i] - L[i+1])) / m[i]", [[0.5,0.0],[2.25,0.0]], # initial conditions [[1,2],[1,2]], # plot arguments ) add_system_params(sim,N, k=[8.0,40.0], m=[1,1.5], L=[0.5,1], b=[0.8,0.5], x_1=0, x_N=0, k_N=0, L_N=0, ) print sim.equations() sim.run(0,10) # <codecell> N=3 sim=Simulation() add_system(sim,N,"x[i]''=(-b[i] * x[i]' - k[i] * (x[i] -x[i-1] -L[i]) + k[i+1] * (x[i+1] - x[i] - L[i+1])) / m[i]", [[0.5,0.0],[2.25,0.0],[4.0,0.0]], # initial conditions [[1,2],[1,2],[1,2]], # plot arguments ) add_system_params(sim,N, k=[8.0,40.0,10.0], m=[1,1.5,1.0], L=[0.5,1,2.0], b=[0.8,0.5,.7], x_1=0, x_N=0, k_N=0, L_N=0, ) print sim.equations() sim.run(0,10) # <codecell> A=zeros((3,3)) b=[0.8,0.5] L=[0.5,1] m=[1,1.5] k=[8.0,40.0] # <codecell> system_equation="x[i]''=(-b[i] * x[i]' - k[i] * (x[i] -x[i-1] -L[i]) + k[i+1] * (x[i+1] - x[i] - L[i+1])) / m[i]" system_params=dict(k=[8.0,40.0], m=[1,1.5], L=[0.5,1], b=[0.8,0.5], ) system_bc=dict(x_1=0, x_N=0, k_N=0, L_N=0, ) N=2 eqns=[] for i in range(N): eqn=system_equation eqn=eqn.replace('[i]','%d' % i) if (i+1)<N: eqn=eqn.replace('[i+1]','%d' % (i+1)) else: eqn=eqn.replace('[i+1]','_N') if (i-1)>=0: eqn=eqn.replace('[i-1]','%d' % (i-1)) else: eqn=eqn.replace('[i-1]','_%d' % abs((i-1))) eqns.append(eqn) print system for eqn in eqns: print eqn print print "x1''=(-b1 * x1' - k1 * (x1 - L1) + k2 * (x2 - x1 - L2)) / m1".replace('1','0').replace('2','1') print "x2''=(-b2 * x2' - k2 * (x2 - x1 - L2)) / m2".replace('1','0').replace('2','1')
bblais/pyndamics
examples/Systems of Equations.py
Python
mit
5,694
""" EINLEITUNG Willkommen in der Lernreihe fuer die Programmiersprache Python! Hier werden wir uns mit den Grundlagen, aber auch Fortgeschrittenen Themen Python's auseinandersetzen. Jeder kann in diesem Moment sein erstes Python Programm schreiben und ausfuehren, ohne ein Programm zu installieren. Dazu gibt es im Internet viele Python Compiler (Ein Compiler wandelt Python-Code in Maschinensprache um) Hier sind ein paar bekannte Websites "https://www.tutorialspoint.com/codingground.htm" (Professionell, nicht auf jedem Browser verfuegbar) "http://ideone.com/" (Simpler, Auf vielen Browsern verfuegbar) aber es gibt viele mehr. WARUM SOLLTE ICH PROGRAMMIEREN LERNEN? Oft versucht man seinen Computer anzuschreien, damit ein Programm das tut, was es soll. Leider klappt das sehr selten. Aus diesem Grund ist Programmieren lernen eine mögliche Lösung: Wenn es kein Programm für seinen Zweck gibt, baut man sich selbst eins. Programmieren kann man auch in der Freizeit; Wer wollte nicht schonmal zufaellige Sätze generieren oder sein eigenes Text-Adventure erschaffen? WAS IST PYTHON? Python ist eine Programmiersprache. Das heisst, sie ermoeglicht es einem, dem Computer zu sagen was er tun soll, ohne nur '1' und '0' zu schreiben. Programmiersprachen bestehen aus vielen einzelnen Befehlen, die auf Zeilen aufgeteilt werden. Diese Zeilen werden der Reihe nach von oben nach unten abgearbeitet. Es gibt auch Moeglichkeiten, bestimmte Befehle zu wiederholen oder nur unter bestimmten Bedingungen auszufuehren. Es gibt viele weitere Programmiersprachen, aber Python ist sowohl gut fuer Einsteiger als auch bekannt unter Profi-Entwicklern, was es zur idealen Sprache fuer Einsteiger macht. """
PlayLucky/PythonTutorials
Chapter1-BuildingACalculator/Lesson0-Hello.py
Python
mit
1,717
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for convolutional operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python.client import session as session_lib from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.layers import convolutional from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_impl from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables import tensorflow.python.ops.nn_grad # pylint: disable=unused-import from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging from tensorflow.python.util.compat import collections_abc def GetShrunkInceptionShapes(shrink=10): """Iterator for smaller versions of convolution shapes in 2015 Inception. Relative to inception, each depth value is `depth // shrink`. Args: shrink: Factor to shrink each depth value by relative to Inception. Yields: Tuple (input_size, filter_size, out_size, stride, padding), the convolution parameters of Inception layers. """ input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384], [4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248], [4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224], [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216], [4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152], [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152], [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128], [4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96], [4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288], [4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256], [4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192], [4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64], [4, 147, 147, 24]] filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384], [1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320], [1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384], [1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320], [3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192], [3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224], [3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192], [1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224], [1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128], [3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160], [1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160], [3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128], [1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128], [1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96], [3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64], [1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48], [3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64], [1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64], [1, 1, 24, 64]] out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384], [4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320], [4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384], [4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320], [4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192], [4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224], [4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192], [4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224], [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128], [4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96], [4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48], [4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64], [4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64], [4, 147, 147, 64]] strides = [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ] # Shrink sizes to make the test faster for i in input_sizes: i[3] //= shrink for f in filter_sizes: f[2] //= shrink f[3] //= shrink for o in out_sizes: o[3] //= shrink # pylint: disable=invalid-name VALID = "VALID" SAME = "SAME" # pylint: enable=invalid-name paddings = [ SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, VALID, VALID, VALID ] for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides, paddings): yield i, f, o, s, p def GetTestConfigs(): """Get all the valid tests configs to run. Returns: all the valid test configs as tuples of data_format and use_gpu. """ test_configs = [("NHWC", False), ("NHWC", True)] if test.is_gpu_available(cuda_only=True): # "NCHW" format is only supported on CUDA. test_configs += [("NCHW", True)] return test_configs class Conv2DTest(test.TestCase): def _DtypesToTest(self, use_gpu): # double datatype is currently not supported for convolution ops # on the ROCm platform optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64] if use_gpu and not test_util.GpuSupportsHalfMatMulAndConv(): return [dtypes.float32] + optional_float64 else: # It is important that float32 comes before float16 here, # as we will be using its gradients as reference for fp16 gradients. return [dtypes.float32, dtypes.float16] + optional_float64 def _CreateNumpyTensor(self, shape): total_size = 1 for s in shape: total_size *= s return np.arange(1, total_size + 1, dtype=np.float32).reshape(shape) def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, dtype, use_gpu): """Verifies the output values of the convolution function. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. dilations: Dilated rate: [col_dilation, row_dilation] strides: Stride: [col_stride, row_stride] padding: Padding type. data_format: Format of the data tensors. dtype: Data type for inputs and outputs. use_gpu: True if the operations should be run on GPU Returns: Symbolic tensor value that can be used to execute the computation """ x1 = self._CreateNumpyTensor(tensor_in_sizes) x2 = self._CreateNumpyTensor(filter_in_sizes) with test_util.device(use_gpu): t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype) t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype) strides = [1] + strides + [1] dilations = [1] + dilations + [1] if isinstance(padding, (list, tuple)): padding = [(0, 0)] + padding + [(0, 0)] if data_format == "NCHW": t1 = test_util.NHWCToNCHW(t1) strides = test_util.NHWCToNCHW(strides) dilations = test_util.NHWCToNCHW(dilations) if isinstance(padding, (list, tuple)): padding = test_util.NHWCToNCHW(padding) conv = nn_ops.conv2d( t1, t2, dilations=dilations, strides=strides, padding=padding, data_format=data_format) self.assertEqual(conv.dtype, dtype) if data_format == "NCHW": conv = test_util.NCHWToNHWC(conv) return conv def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides, padding): """Verifies that CPU and GPU produce the same values. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. conv_strides: [row_stride, col_stride] for the convolution; padding: Padding type. """ x1 = np.random.rand(*tensor_in_sizes).astype(np.float32) x2 = np.random.rand(*filter_in_sizes).astype(np.float32) def _SetupVal(data_format, use_gpu): with test_util.device(use_gpu): t1 = constant_op.constant(x1, shape=tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) strides = [1] + conv_strides + [1] if data_format == "NCHW": t1 = test_util.NHWCToNCHW(t1) strides = test_util.NHWCToNCHW(strides) conv = nn_ops.conv2d( t1, t2, strides=strides, padding=padding, data_format=data_format) if data_format == "NCHW": conv = test_util.NCHWToNHWC(conv) return conv tensors = [] for (data_format, use_gpu) in GetTestConfigs(): tensors.append(_SetupVal(data_format, use_gpu)) values = self.evaluate(tensors) for i in range(1, len(values)): self.assertAllClose(values[0], values[i], rtol=1e-3, atol=1e-3) def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes, stride, dilation, padding, data_format, use_gpu): x1 = self._CreateNumpyTensor(tensor_in_sizes) x2 = self._CreateNumpyTensor(filter_in_sizes) with test_util.device(use_gpu): t1 = constant_op.constant(x1, shape=tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) if isinstance(stride, collections_abc.Iterable): strides = list(stride) else: strides = [stride, stride] if data_format == "NCHW": t1 = test_util.NHWCToNCHW(t1) full_strides = [1, 1] + strides full_dilation = [1, 1] + dilation else: full_strides = [1] + strides + [1] full_dilation = [1] + dilation + [1] expected = nn_ops.convolution( t1, t2, padding=padding, strides=strides, dilation_rate=dilation, data_format=data_format) computed = nn_ops.conv2d( t1, t2, strides=full_strides, dilations=full_dilation, padding=padding, data_format=data_format) if data_format == "NCHW": expected = test_util.NCHWToNHWC(expected) computed = test_util.NCHWToNHWC(computed) return expected, computed def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, strides, padding, dilations, rtol=1e-4): expected_results = [] computed_results = [] for data_format, use_gpu in GetTestConfigs(): expected, computed = self._ComputeReferenceDilatedConv( tensor_in_sizes, filter_in_sizes, strides, dilations, padding, data_format, use_gpu) expected_results.append(expected) computed_results.append(computed) tolerance = 1e-2 if use_gpu else 1e-5 expected_values = self.evaluate(expected_results) computed_values = self.evaluate(computed_results) for e_value, c_value in zip(expected_values, computed_values): tf_logging.debug("expected = %s", e_value) tf_logging.debug("actual = %s", c_value) self.assertAllClose( e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=rtol) def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, strides, padding, expected, dilations=(1, 1), gpu_only=False, test_grappler_layout_optimizer=False, tol=1e-5, fp16_tol=1e-3): if gpu_only and not test.is_gpu_available(cuda_only=True): return tensors = [] dilations = list(dilations) for (data_format, use_gpu) in GetTestConfigs(): if gpu_only and not use_gpu: continue dtypes_to_test = self._DtypesToTest(use_gpu) if not test_grappler_layout_optimizer and data_format == "NHWC": dtypes_to_test.append(dtypes.int32) for dtype in dtypes_to_test: result = self._SetupValuesForDevice( tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, dtype, use_gpu=use_gpu) if test_grappler_layout_optimizer and data_format == "NHWC" and use_gpu: # Grappler's layout optimizer will not optimize a fetch node, so # this identity allows Grappler to optimize the Conv2D node. result = array_ops.identity(result) tensors.append(result) values = self.evaluate(tensors) for i in range(len(tensors)): conv = tensors[i] value = values[i] tf_logging.debug("expected = %s", expected) tf_logging.debug("actual = %s", value) tol_to_use = fp16_tol if value.dtype == np.float16 else tol if np.issubdtype(value.dtype, np.integer): self.assertAllEqual(np.rint(expected), np.ravel(value)) else: self.assertAllClose(expected, np.ravel(value), atol=tol_to_use, rtol=tol_to_use) self.assertShapeEqual(value, conv) self.assertEqual(value.dtype, conv.dtype.as_numpy_dtype) def _VerifyExplicitPaddings(self, tensor_in_sizes, filter_in_sizes, strides, padding, dilations=(1, 1), test_grappler_layout_optimizer=False, tol=1e-5, fp16_tol=1e-3): """Verifies Conv2D with explicit padding generates correct values. It does this by comparing with Conv2D without explicit padding. This function assumes Conv2D without explicit padding works correctly. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. strides: [row_stride, col_stride] for the convolution; padding: Explicit padding amounts. dilations: Dilation values test_grappler_layout_optimizer: If True, allow the Grappler layout optimizer to run, which turns NHWC Conv2Ds on the GPU to NCHW Conv2Ds. tol: The absolute and relative tolerance for non-fp16 dtypes. fp16_tol: The absolute and relative tolerance for fp16. """ input_tensor = self._CreateNumpyTensor(tensor_in_sizes) filter_tensor = self._CreateNumpyTensor(filter_in_sizes) input_tensor = array_ops.pad(input_tensor, [(0, 0)] + padding + [(0, 0)]) dilations = list(dilations) conv2d_result = nn_ops.conv2d( input_tensor, filter_tensor, [1] + list(strides) + [1], "VALID", dilations=[1] + dilations + [1]) expected = list(self.evaluate(array_ops.reshape(conv2d_result, [-1]))) self._VerifyValues( tensor_in_sizes, filter_in_sizes, strides, padding, expected, dilations, test_grappler_layout_optimizer=test_grappler_layout_optimizer, tol=tol, fp16_tol=fp16_tol) @test_util.run_in_graph_and_eager_modes def testConv2D1x1Filter(self): expected_output = [ 30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0, 204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0 ] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[1, 1, 3, 3], strides=[1, 1], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2DExpandedBatch(self): tensor_in_sizes_batch = [10, 2, 3, 3] tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3] filter_in_sizes = [1, 1, 3, 3] filter_in = self._CreateNumpyTensor(filter_in_sizes) x1 = self._CreateNumpyTensor(tensor_in_sizes_batch) x2 = x1.reshape(tensor_in_sizes_expanded_batch) conv1 = nn_ops.conv2d( x1, filter_in, strides=[1, 1], padding="VALID") conv2 = nn_ops.conv2d( x2, filter_in, strides=[1, 1], padding="VALID") self.assertEqual(conv1.shape, tensor_in_sizes_batch) self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch) self.assertAllEqual( conv1, self.evaluate(conv2).reshape(conv1.shape)) @test_util.run_in_graph_and_eager_modes def testConvolutionClass2DExpandedBatch(self): tensor_in_sizes_batch = [10, 2, 3, 3] tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3] filter_in_sizes = [1, 1, 3, 3] filter_in = self._CreateNumpyTensor(filter_in_sizes) x1 = self._CreateNumpyTensor(tensor_in_sizes_batch) x2 = x1.reshape(tensor_in_sizes_expanded_batch) convolver1 = nn_ops.Convolution( input_shape=x1.shape, filter_shape=filter_in.shape, strides=[1, 1], padding="VALID") self.assertEqual(convolver1.num_batch_dims, 1) convolver2 = nn_ops.Convolution( input_shape=x2.shape, filter_shape=filter_in.shape, strides=[1, 1], padding="VALID") self.assertEqual(convolver2.num_batch_dims, 2) conv1 = convolver1(x1, filter_in) conv2 = convolver2(x2, filter_in) self.assertEqual(conv1.shape, tensor_in_sizes_batch) self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch) self.assertAllEqual( conv1, self.evaluate(conv2).reshape(conv1.shape)) @test_util.run_in_graph_and_eager_modes def testConvolutionWith2SpatialDimensionsAndExpandedBatch(self): tensor_in_sizes_batch = [10, 2, 3, 3] tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3] filter_in_sizes = [1, 1, 3, 3] filter_in = self._CreateNumpyTensor(filter_in_sizes) x1 = self._CreateNumpyTensor(tensor_in_sizes_batch) x2 = x1.reshape(tensor_in_sizes_expanded_batch) conv1 = nn_ops.convolution( x1, filter_in, strides=[1, 1], padding="VALID") conv2 = nn_ops.convolution( x2, filter_in, strides=[1, 1], padding="VALID") self.assertEqual(conv1.shape, tensor_in_sizes_batch) self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch) self.assertAllEqual( conv1, self.evaluate(conv2).reshape(conv1.shape)) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Filter2x1Dilation(self): self._VerifyDilatedConvValues( tensor_in_sizes=[1, 4, 4, 1], filter_in_sizes=[2, 2, 1, 1], strides=[1, 1], dilations=[2, 1], padding="VALID") @test_util.run_in_graph_and_eager_modes def testConv2DEmpty(self): expected_output = [] self._VerifyValues( tensor_in_sizes=[0, 2, 3, 3], filter_in_sizes=[1, 1, 3, 3], strides=[1, 1], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2DEmptyDilation(self): self._VerifyDilatedConvValues( tensor_in_sizes=[0, 2, 3, 3], filter_in_sizes=[1, 1, 3, 3], strides=[1, 1], dilations=[2, 1], padding="VALID") @test_util.run_in_graph_and_eager_modes def testConv2D2x2Filter(self): # The outputs are computed using third_party/py/IPython/notebook. expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 3], strides=[1, 1], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2D2x2FilterDilation(self): self._VerifyDilatedConvValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 3], strides=[1, 1], dilations=[1, 2], padding="VALID") @test_util.run_in_graph_and_eager_modes def testConv2D1x2Filter(self): # The outputs are computed using third_party/py/IPython/notebook. expected_output = [ 231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0, 936.0, 1029.0 ] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[1, 2, 3, 3], strides=[1, 1], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2D1x2FilterDilation(self): self._VerifyDilatedConvValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[1, 2, 3, 3], strides=[1, 1], dilations=[2, 1], padding="VALID") @test_util.run_in_graph_and_eager_modes def testConv2D2x2FilterStride2(self): expected_output = [2271.0, 2367.0, 2463.0] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 3], strides=[2, 2], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2D2x2FilterStride2Same(self): expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 3], strides=[2, 2], padding="SAME", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2D2x2FilterStride1x2(self): expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0] self._VerifyValues( tensor_in_sizes=[1, 3, 6, 1], filter_in_sizes=[2, 2, 1, 1], strides=[1, 2], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSmallerThanStrideValid(self): expected_output = [65, 95, 275, 305] self._VerifyValues( tensor_in_sizes=[1, 7, 7, 1], filter_in_sizes=[2, 2, 1, 1], strides=[3, 3], padding="VALID", expected=expected_output) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSmallerThanStrideSame(self): self._VerifyValues( tensor_in_sizes=[1, 3, 3, 1], filter_in_sizes=[1, 1, 1, 1], strides=[2, 2], padding="SAME", expected=[1, 3, 7, 9]) self._VerifyValues( tensor_in_sizes=[1, 4, 4, 1], filter_in_sizes=[1, 1, 1, 1], strides=[2, 2], padding="SAME", expected=[1, 3, 9, 11]) self._VerifyValues( tensor_in_sizes=[1, 4, 4, 1], filter_in_sizes=[2, 2, 1, 1], strides=[3, 3], padding="SAME", expected=[44, 28, 41, 16]) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSizeMatchesInputSize(self): self._VerifyValues( tensor_in_sizes=[1, 2, 2, 1], filter_in_sizes=[2, 2, 1, 2], strides=[1, 1], padding="VALID", expected=[50, 60]) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSizeMatchesInputSizeDilation(self): self._VerifyDilatedConvValues( tensor_in_sizes=[1, 3, 3, 1], filter_in_sizes=[2, 2, 1, 2], strides=[1, 1], dilations=[2, 2], padding="VALID") @test_util.run_in_graph_and_eager_modes() def testConv2D0x0Padding(self): self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 3], strides=[1, 1], padding=[[0, 0], [0, 0]]) self._VerifyExplicitPaddings( tensor_in_sizes=[3, 4, 3, 2], filter_in_sizes=[1, 1, 2, 1], strides=[2, 2], padding=[[0, 0], [0, 0]]) @test_util.run_in_graph_and_eager_modes() def testConv2D1x1Padding(self): self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 3, 2], filter_in_sizes=[2, 2, 2, 2], strides=[1, 1], padding=[[1, 1], [1, 1]]) self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 2, 1], filter_in_sizes=[1, 1, 1, 2], strides=[1, 1], padding=[[1, 1], [1, 1]]) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Padding(self): self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 1, 2], filter_in_sizes=[2, 1, 2, 1], strides=[1, 1], padding=[[2, 2], [2, 2]]) self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 1, 2], filter_in_sizes=[1, 1, 2, 1], strides=[2, 1], padding=[[2, 2], [2, 2]]) @test_util.run_in_graph_and_eager_modes() def testConv2DOnlyBottomPadding(self): self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 2], strides=[1, 1], padding=[[0, 3], [0, 0]], tol=2e-5) self._VerifyExplicitPaddings( tensor_in_sizes=[2, 2, 4, 3], filter_in_sizes=[1, 2, 3, 2], strides=[2, 2], padding=[[0, 3], [0, 0]]) @test_util.run_in_graph_and_eager_modes() def testConv2DOnlyTopRightPadding(self): self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 3, 3], filter_in_sizes=[2, 2, 3, 2], strides=[1, 1], padding=[[1, 0], [0, 2]], tol=5e-5) self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 4, 2], filter_in_sizes=[2, 2, 2, 2], strides=[1, 3], padding=[[1, 0], [0, 2]]) @test_util.run_in_graph_and_eager_modes() def testConv2DLotsPadding(self): self._VerifyExplicitPaddings( tensor_in_sizes=[1, 1, 1, 3], filter_in_sizes=[2, 2, 3, 3], strides=[1, 1], padding=[[3, 4], [4, 2]]) self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 1, 1], filter_in_sizes=[2, 2, 1, 3], strides=[2, 1], padding=[[3, 4], [4, 2]]) @test_util.run_in_graph_and_eager_modes() def testConv2DExplicitPaddingWithDilations(self): self._VerifyExplicitPaddings( tensor_in_sizes=[1, 3, 2, 1], filter_in_sizes=[1, 2, 1, 2], strides=[1, 1], padding=[[1, 0], [0, 1]], dilations=[2, 1]) self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 3, 2], filter_in_sizes=[3, 2, 2, 1], strides=[1, 1], padding=[[2, 1], [1, 2]], dilations=[2, 3]) def testConv2DExplicitPaddingWithLayoutOptimizer(self): # Test with Grappler's layout optimizer, to ensure the layout optimizer # handles explicit padding correctly. self._VerifyExplicitPaddings( tensor_in_sizes=[1, 3, 2, 1], filter_in_sizes=[1, 2, 1, 2], strides=[1, 1], padding=[[1, 0], [0, 1]], dilations=[2, 1], test_grappler_layout_optimizer=True) self._VerifyExplicitPaddings( tensor_in_sizes=[1, 2, 3, 2], filter_in_sizes=[3, 2, 2, 1], strides=[1, 1], padding=[[2, 1], [1, 2]], dilations=[2, 3], test_grappler_layout_optimizer=True) def _VerifyGroupConvFwd(self, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, dtype): """Verify the output of group convolution is equal to a for-loop implementation. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. dilations: Dilated rate: [col_dilation, row_dilation] strides: Stride: [col_stride, row_stride] padding: Padding type. data_format: Format of the data tensors. dtype: Data type for inputs and outputs. """ tensor_in = self._CreateNumpyTensor(tensor_in_sizes) filter_in = self._CreateNumpyTensor(filter_in_sizes) num_groups = tensor_in_sizes[3] // filter_in_sizes[2] assert num_groups > 1 and \ filter_in_sizes[2] * num_groups == tensor_in_sizes[3] with test_util.device(True): t1 = constant_op.constant(tensor_in, dtype=dtype) t2 = constant_op.constant(filter_in, dtype=dtype) strides = [1] + strides + [1] dilations = [1] + dilations + [1] if data_format == "NCHW": t1 = test_util.NHWCToNCHW(t1) strides = test_util.NHWCToNCHW(strides) dilations = test_util.NHWCToNCHW(dilations) t1_splits = array_ops.split(t1, num_groups, axis=1) else: t1_splits = array_ops.split(t1, num_groups, axis=3) t2_splits = array_ops.split(t2, num_groups, axis=3) def MakeConv2d(inputs, filters): return nn_ops.conv2d( inputs, filters, strides, padding, dilations=dilations, data_format=data_format) group_conv = MakeConv2d(t1, t2) group_conv_loop = array_ops.concat( [MakeConv2d(t1s, t2s) for t1s, t2s in zip(t1_splits, t2_splits)], axis=1 if data_format == "NCHW" else 3) results = self.evaluate([group_conv, group_conv_loop]) tol_to_use = 1e-5 self.assertAllClose( results[0], results[1], atol=tol_to_use, rtol=tol_to_use) @test_util.run_in_graph_and_eager_modes def testConv2DGroupConvFwd(self): if test.is_gpu_available(cuda_only=True): data_formats = ["NHWC", "NCHW"] else: data_formats = ["NHWC"] for data_format in data_formats: for dilation in [1, 2]: for stride in [1, 2]: for filter_dims in [[3, 3, 4, 8], [1, 1, 2, 16]]: self._VerifyGroupConvFwd([10, 32, 32, 16], filter_dims, dilations=[dilation, dilation], strides=[stride, stride], padding="SAME", data_format=data_format, dtype=dtypes.float32) @test_util.deprecated_graph_mode_only @test_util.run_cuda_only def testInputGradientGroupConv(self): for data_format in ["NCHW", "NHWC"]: for test_input in [True, False]: self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, num_groups=2, padding="VALID", in_depth=4, out_depth=6, stride_rows=1, stride_cols=1, test_input=test_input, data_format=data_format, use_gpu=True, max_err=0.005) @test_util.deprecated_graph_mode_only @test_util.run_cuda_only def testFilterGradientGroupConv(self): for data_format in ["NCHW", "NHWC"]: for test_input in [True, False]: self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, num_groups=2, padding="VALID", in_depth=4, out_depth=6, stride_rows=1, stride_cols=1, test_input=test_input, data_format=data_format, use_gpu=True, max_err=0.005) # TODO(yzhwang): this currently fails. # self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1], # filter_in_sizes=[2, 2, 1, 1], # strides=[4, 4], padding="SAME", # expected=[72, 112, 392, 432]) # Testing for backprops def _RunAndVerifyBackpropInput(self, input_sizes, filter_sizes, output_sizes, strides, padding, expected, data_format, use_gpu, err, dilations=(1, 1)): if use_gpu and not test.is_gpu_available(cuda_only=True): return x1 = self._CreateNumpyTensor(filter_sizes) x2 = self._CreateNumpyTensor(output_sizes) dilations = list(dilations) with test_util.device(use_gpu): if len(input_sizes) == 4: if data_format == "NCHW": input_sizes = test_util.NHWCToNCHW(input_sizes) t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)]) t1 = constant_op.constant(x1, shape=filter_sizes) t2 = constant_op.constant(x2, shape=output_sizes) strides = [1] + strides + [1] dilations = [1] + dilations + [1] if isinstance(padding, (list, tuple)): padding = [(0, 0)] + padding + [(0, 0)] if data_format == "NCHW": t2 = test_util.NHWCToNCHW(t2) strides = test_util.NHWCToNCHW(strides) dilations = test_util.NHWCToNCHW(dilations) if isinstance(padding, (list, tuple)): padding = test_util.NHWCToNCHW((padding)) conv = nn_ops.conv2d_backprop_input( t0, t1, t2, strides=strides, padding=padding, data_format=data_format, dilations=dilations) if data_format == "NCHW": conv = test_util.NCHWToNHWC(conv) # "values" consists of two tensors for two backprops value = self.evaluate(conv) self.assertShapeEqual(value, conv) tf_logging.debug("expected = %s", expected) tf_logging.debug("actual = %s", value) self.assertAllCloseAccordingToType(expected, value.flatten(), atol=1e-5) def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes, conv_strides, padding): x1 = np.random.rand(*filter_sizes).astype(np.float32) x2 = np.random.rand(*output_sizes).astype(np.float32) def _GetVal(data_format, use_gpu): with test_util.device(use_gpu): if data_format == "NCHW": new_input_sizes = test_util.NHWCToNCHW(input_sizes) else: new_input_sizes = input_sizes t0 = constant_op.constant(new_input_sizes, shape=[len(new_input_sizes)]) t1 = constant_op.constant(x1, shape=filter_sizes) t2 = constant_op.constant(x2, shape=output_sizes) strides = [1] + conv_strides + [1] if data_format == "NCHW": t2 = test_util.NHWCToNCHW(t2) strides = test_util.NHWCToNCHW(strides) conv = nn_ops.conv2d_backprop_input( t0, t1, t2, strides=strides, padding=padding, data_format=data_format) if data_format == "NCHW": conv = test_util.NCHWToNHWC(conv) ret = self.evaluate(conv) self.assertShapeEqual(ret, conv) return ret values = [] for (data_format, use_gpu) in GetTestConfigs(): values.append(_GetVal(data_format, use_gpu)) for i in range(1, len(values)): self.assertAllClose(values[0], values[i], rtol=1e-2, atol=1e-2) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth1ValidBackpropInput(self): expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.run_in_graph_and_eager_modes def testConv2DEmptyBackpropInput(self): expected_output = [] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[0, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[0, 1, 2, 1], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth3ValidBackpropInput(self): expected_output = [ 14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0, 140.0, 158.0, 478.0, 541.0, 604.0, 437.0, 482.0, 527.0 ] for (data_format, use_gpu) in GetTestConfigs(): # The GPU version of this test is not very stable. So adjusting the # error threshold to 1e-4. self._RunAndVerifyBackpropInput( input_sizes=[1, 2, 3, 3], filter_sizes=[2, 2, 3, 3], output_sizes=[1, 1, 2, 3], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-4) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth3ValidBackpropInputStride1x2(self): expected_output = [ 1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0, 16.0, 15.0, 20.0, 18.0, 24.0 ] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[1, 3, 6, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 2, 3, 1], strides=[1, 2], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.run_in_graph_and_eager_modes def testConv2DStrideTwoFilterOneSameBackpropInput(self): expected_output = [ 1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0 ] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[1, 4, 4, 1], filter_sizes=[1, 1, 1, 1], output_sizes=[1, 2, 2, 1], strides=[2, 2], padding="SAME", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSizeMatchesInputSizeBackpropInput(self): expected_output = [5.0, 11.0, 17.0, 23.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[1, 2, 2, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 1, 1, 2], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.run_in_graph_and_eager_modes @test_util.disable_xla("XLA requires input_sizes to be a 4D shape.") def testConv2DInputSizesContainsOnlySpatialDimensionsBackpropInput(self): expected_output = [5.0, 11.0, 17.0, 23.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInput( input_sizes=[2, 2], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 1, 1, 2], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu, err=1e-5) # Testing for backprops def _RunAndVerifyBackpropFilter(self, input_sizes, filter_sizes, output_sizes, strides, padding, expected, data_format, use_gpu, dilations=(1, 1), err=1e-5): x0 = self._CreateNumpyTensor(input_sizes) x2 = self._CreateNumpyTensor(output_sizes) dilations = list(dilations) explicit_strides = [1] + strides + [1] new_padding = padding new_dilations = [1] + dilations + [1] if isinstance(new_padding, (list, tuple)): new_padding = [(0, 0)] + new_padding + [(0, 0)] if data_format == "NCHW": explicit_strides = test_util.NHWCToNCHW(explicit_strides) new_dilations = test_util.NHWCToNCHW(new_dilations) if isinstance(padding, (list, tuple)): new_padding = test_util.NHWCToNCHW(new_padding) for dtype in self._DtypesToTest(use_gpu=use_gpu): with test_util.device(use_gpu): t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype) t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)]) t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype) if data_format == "NCHW": t0 = test_util.NHWCToNCHW(t0) t2 = test_util.NHWCToNCHW(t2) conv = nn_ops.conv2d_backprop_filter( t0, t1, t2, strides=explicit_strides, padding=new_padding, dilations=new_dilations, data_format=data_format) value = self.evaluate(conv) self.assertShapeEqual(value, conv) tf_logging.debug("expected = %s", expected) tf_logging.debug("actual = %s", value) self.assertArrayNear(expected, value.flatten(), err) def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes, conv_strides, padding): x0 = np.random.rand(*input_sizes).astype(np.float32) x2 = np.random.rand(*output_sizes).astype(np.float32) def _GetVal(data_format, use_gpu): with test_util.device(use_gpu): t0 = constant_op.constant(x0, shape=input_sizes) t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)]) t2 = constant_op.constant(x2, shape=output_sizes) strides = [1] + conv_strides + [1] if data_format == "NCHW": t0 = test_util.NHWCToNCHW(t0) t2 = test_util.NHWCToNCHW(t2) strides = test_util.NHWCToNCHW(strides) conv = nn_ops.conv2d_backprop_filter( t0, t1, t2, strides=strides, padding=padding, data_format=data_format) ret = self.evaluate(conv) self.assertShapeEqual(ret, conv) return ret values = [] for (data_format, use_gpu) in GetTestConfigs(): values.append(_GetVal(data_format, use_gpu)) for i in range(1, len(values)): self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth1ValidBackpropFilter(self): expected = [5.0, 8.0, 14.0, 17.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], padding="VALID", expected=expected, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2DEmptyBackpropFilter(self): expected = [] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 0], output_sizes=[1, 1, 2, 0], strides=[1, 1], padding="VALID", expected=expected, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2DBackpropFilterWithEmptyInput(self): expected = [0, 0, 0, 0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[0, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[0, 1, 2, 1], strides=[1, 1], padding="VALID", expected=expected, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth3ValidBackpropFilter(self): expected = [ 17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0, 37.0, 50.0, 63.0, 42.0, 57.0, 72.0, 62.0, 85.0, 108.0, 67.0, 92.0, 117.0, 72.0, 99.0, 126.0, 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0, 120.0, 153.0 ] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 2, 3, 3], filter_sizes=[2, 2, 3, 3], output_sizes=[1, 1, 2, 3], strides=[1, 1], padding="VALID", expected=expected, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self): expected = [161.0, 182.0, 287.0, 308.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 3, 6, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 2, 3, 1], strides=[1, 2], padding="VALID", expected=expected, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2DStrideTwoFilterOneSameBackpropFilter(self): expected_output = [78.] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 4, 4, 1], filter_sizes=[1, 1, 1, 1], output_sizes=[1, 2, 2, 1], strides=[2, 2], padding="SAME", expected=expected_output, data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes def testConv2DKernelSizeMatchesInputSizeBackpropFilter(self): expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 4.0, 8.0] for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilter( input_sizes=[1, 2, 2, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 1, 1, 2], strides=[1, 1], padding="VALID", expected=expected_output, data_format=data_format, use_gpu=use_gpu) # Testing for backprops def _RunAndVerifyBackpropInputDilation(self, input_sizes, filter_sizes, output_sizes, strides, dilations, padding, data_format, use_gpu, err): x1 = self._CreateNumpyTensor(input_sizes) x2 = self._CreateNumpyTensor(filter_sizes) default_dilations = (dilations[0] == 1 and dilations[1] == 1) if default_dilations or use_gpu: with self.cached_session(use_gpu=use_gpu) as sess: if data_format == "NCHW": input_sizes = test_util.NHWCToNCHW(input_sizes) t1 = constant_op.constant(x1, shape=input_sizes) t2 = constant_op.constant(x2, shape=filter_sizes) full_strides = [1] + strides + [1] full_dilations = [1] + dilations + [1] if data_format == "NCHW": full_strides = test_util.NHWCToNCHW(full_strides) full_dilations = test_util.NHWCToNCHW(full_dilations) conv_forward = nn_ops.conv2d( t1, t2, strides=full_strides, dilations=full_dilations, padding=padding, data_format=data_format) conv_forward_2 = nn_ops.convolution( t1, t2, padding=padding, strides=strides, dilation_rate=dilations, data_format=data_format) if data_format == "NCHW": conv_forward = test_util.NCHWToNHWC(conv_forward) conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2) conv = gradients_impl.gradients(conv_forward, t1)[0] conv_2 = gradients_impl.gradients(conv_forward_2, t1)[0] # "values" consists of two tensors for two backprops value = self.evaluate(conv) value_2 = self.evaluate(conv_2) self.assertShapeEqual(value, conv) self.assertShapeEqual(value_2, conv_2) tf_logging.debug("expected = %s", value_2) tf_logging.debug("actual = %s", value) self.assertArrayNear(value_2.flatten(), value.flatten(), err) # Testing for backprops def _RunAndVerifyBackpropFilterDilation(self, input_sizes, filter_sizes, output_sizes, strides, dilations, padding, data_format, use_gpu, err): x1 = self._CreateNumpyTensor(input_sizes) x2 = self._CreateNumpyTensor(filter_sizes) default_dilations = (dilations[0] == 1 and dilations[1] == 1) if default_dilations or use_gpu: with self.cached_session(use_gpu=use_gpu) as sess: if data_format == "NCHW": input_sizes = test_util.NHWCToNCHW(input_sizes) t1 = constant_op.constant(x1, shape=input_sizes) t2 = constant_op.constant(x2, shape=filter_sizes) full_strides = [1] + strides + [1] full_dilations = [1] + dilations + [1] if data_format == "NCHW": full_strides = test_util.NHWCToNCHW(full_strides) full_dilations = test_util.NHWCToNCHW(full_dilations) conv_forward = nn_ops.conv2d( t1, t2, strides=full_strides, dilations=full_dilations, padding=padding, data_format=data_format) conv_forward_2 = nn_ops.convolution( t1, t2, padding=padding, strides=strides, dilation_rate=dilations, data_format=data_format) if data_format == "NCHW": conv_forward = test_util.NCHWToNHWC(conv_forward) conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2) conv = gradients_impl.gradients(conv_forward, t2)[0] conv_2 = gradients_impl.gradients(conv_forward, t2)[0] value = self.evaluate(conv) value_2 = self.evaluate(conv_2) self.assertShapeEqual(value, conv) self.assertShapeEqual(value_2, conv_2) tf_logging.debug("expected = %s", value_2) tf_logging.debug("actual = %s", value) self.assertArrayNear(value_2.flatten(), value.flatten(), err) @test_util.deprecated_graph_mode_only def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterDilation( input_sizes=[1, 3, 6, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 5, 1], strides=[1, 1], dilations=[2, 1], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterDilation( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], dilations=[1, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2DEmptyBackpropFilterDilation1x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterDilation( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 0], output_sizes=[1, 1, 2, 0], strides=[1, 1], dilations=[1, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterDilation( input_sizes=[1, 3, 4, 3], filter_sizes=[2, 2, 3, 3], output_sizes=[1, 1, 2, 3], strides=[1, 1], dilations=[2, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterDilation( input_sizes=[1, 3, 3, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 1, 1, 2], strides=[1, 1], dilations=[2, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputDilation( input_sizes=[1, 3, 6, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 5, 1], strides=[1, 1], dilations=[2, 1], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputDilation( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], dilations=[1, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2DEmptyBackpropInputDilation1x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputDilation( input_sizes=[0, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[0, 1, 2, 1], strides=[1, 1], dilations=[1, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) @test_util.deprecated_graph_mode_only def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): # The GPU version of this test is not very stable. So adjusting the # error threshold to 1e-4. self._RunAndVerifyBackpropInputDilation( input_sizes=[1, 3, 2, 3], filter_sizes=[2, 2, 3, 3], output_sizes=[1, 1, 2, 3], strides=[1, 1], dilations=[2, 1], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-4) @test_util.deprecated_graph_mode_only def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(self): if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled(): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputDilation( input_sizes=[1, 3, 3, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 1, 1, 2], strides=[1, 1], dilations=[2, 2], padding="VALID", data_format=data_format, use_gpu=use_gpu, err=1e-5) def _RunAndVerifyBackpropInputExplicitPadding(self, input_sizes, filter_sizes, output_sizes, strides, padding, data_format, use_gpu, dilations=(1, 1), err=2e-5): if use_gpu and not test.is_gpu_available(cuda_only=True): return if not use_gpu and dilations != (1, 1): return # Non-default dilations is currently not supported on the CPU. x1 = self._CreateNumpyTensor(filter_sizes) x2 = self._CreateNumpyTensor(output_sizes) dilations = list(dilations) padded_input_sizes = input_sizes[:] padded_input_sizes[1] += padding[0][0] + padding[0][1] padded_input_sizes[2] += padding[1][0] + padding[1][1] c = nn_ops.conv2d_backprop_input( padded_input_sizes, x1, x2, strides=[1] + strides + [1], padding="VALID", dilations=[1] + dilations + [1]) c = c[:, padding[0][0]:(c.shape[1] - padding[0][1]), padding[1][0]:( c.shape[2] - padding[1][1]), :] expected = list(self.evaluate(array_ops.reshape(c, [-1]))) self._RunAndVerifyBackpropInput( input_sizes, filter_sizes, output_sizes, strides, padding, expected, data_format, use_gpu=use_gpu, err=err, dilations=dilations) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding0x0BackpropInput(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], padding=[[0, 0], [0, 0]], data_format=data_format, use_gpu=use_gpu) self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 3, 4, 2], filter_sizes=[2, 2, 2, 3], output_sizes=[1, 1, 2, 3], strides=[2, 2], padding=[[0, 0], [0, 0]], data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding1x1BackpropInput(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 3, 4, 2], strides=[1, 1], padding=[[1, 1], [1, 1]], data_format=data_format, use_gpu=use_gpu, err=1e-4) self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 2, 3, 2], filter_sizes=[1, 1, 2, 1], output_sizes=[1, 4, 3, 1], strides=[1, 2], padding=[[1, 1], [1, 1]], data_format=data_format, use_gpu=use_gpu) self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 4, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 4, 2, 1], strides=[1, 2], padding=[[1, 1], [1, 1]], data_format=data_format, dilations=[2, 2], use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding2x2BackpropInput(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[2, 3, 1, 1], filter_sizes=[2, 1, 1, 1], output_sizes=[2, 2, 5, 1], strides=[3, 1], padding=[[2, 2], [2, 2]], data_format=data_format, use_gpu=use_gpu) self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 3, 6, 1], filter_sizes=[3, 2, 1, 1], output_sizes=[1, 3, 4, 1], strides=[1, 2], padding=[[2, 2], [2, 2]], data_format=data_format, dilations=[2, 3], use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding_1_8_4_1_BackpropInput(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 10, 8, 1], strides=[1, 1], padding=[[1, 8], [4, 2]], data_format=data_format, use_gpu=use_gpu, err=5e-5) self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 5, 3, 1], filter_sizes=[3, 2, 1, 1], output_sizes=[1, 4, 8, 1], strides=[3, 1], padding=[[1, 8], [4, 2]], data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding_5_0_2_2_BackpropInput(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 3, 3, 1], filter_sizes=[2, 1, 1, 1], output_sizes=[1, 7, 7, 1], strides=[1, 1], padding=[[5, 0], [2, 2]], data_format=data_format, err=5e-5, use_gpu=use_gpu) self._RunAndVerifyBackpropInputExplicitPadding( input_sizes=[1, 4, 2, 1], filter_sizes=[3, 3, 1, 1], output_sizes=[1, 5, 2, 1], strides=[1, 2], padding=[[5, 0], [2, 2]], data_format=data_format, dilations=[2, 1], use_gpu=use_gpu) def _RunAndVerifyBackpropFilterExplicitPadding(self, input_sizes, filter_sizes, output_sizes, strides, padding, data_format, use_gpu, dilations=(1, 1), err=1e-5): if use_gpu and not test.is_gpu_available(cuda_only=True): return if not use_gpu and dilations != (1, 1): return # Non-default dilations is currently not supported on the CPU. x0 = self._CreateNumpyTensor(input_sizes) x2 = self._CreateNumpyTensor(output_sizes) dilations = list(dilations) x0 = np.pad(x0, [(0, 0)] + padding + [(0, 0)], "constant") c = nn_ops.conv2d_backprop_filter( x0, filter_sizes, x2, strides=[1] + strides + [1], padding="VALID", dilations=[1] + dilations + [1]) expected = list(self.evaluate(array_ops.reshape(c, [-1]))) self._RunAndVerifyBackpropFilter( input_sizes, filter_sizes, output_sizes, strides, padding, expected, data_format, use_gpu=use_gpu, dilations=dilations, err=err) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding0x0BackpropFilter(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 1, 2, 1], strides=[1, 1], padding=[[0, 0], [0, 0]], data_format=data_format, use_gpu=use_gpu) self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 3, 4, 2], filter_sizes=[2, 2, 2, 3], output_sizes=[1, 1, 2, 3], strides=[2, 2], padding=[[0, 0], [0, 0]], data_format=data_format, use_gpu=use_gpu) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding1x1BackpropFilter(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 2], output_sizes=[1, 3, 4, 2], strides=[1, 1], padding=[[1, 1], [1, 1]], data_format=data_format, use_gpu=use_gpu, err=5e-5) self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 2, 3, 2], filter_sizes=[1, 1, 2, 1], output_sizes=[1, 4, 3, 1], strides=[1, 2], padding=[[1, 1], [1, 1]], use_gpu=use_gpu, data_format=data_format) self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 4, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 4, 2, 1], strides=[1, 2], padding=[[1, 1], [1, 1]], data_format=data_format, use_gpu=use_gpu, dilations=[2, 2]) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding2x2BackpropFilter(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[2, 3, 1, 1], filter_sizes=[2, 1, 1, 1], output_sizes=[2, 2, 5, 1], strides=[3, 1], padding=[[2, 2], [2, 2]], data_format=data_format, use_gpu=use_gpu) self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 3, 6, 1], filter_sizes=[3, 2, 1, 1], output_sizes=[1, 3, 4, 1], strides=[1, 2], padding=[[2, 2], [2, 2]], data_format=data_format, use_gpu=use_gpu, dilations=[2, 3]) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding_1_8_4_1_BackpropFilter(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], output_sizes=[1, 10, 8, 1], strides=[1, 1], padding=[[1, 8], [4, 2]], data_format=data_format, use_gpu=use_gpu, err=1e-4) self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 5, 3, 1], filter_sizes=[3, 2, 1, 1], output_sizes=[1, 4, 8, 1], strides=[3, 1], padding=[[1, 8], [4, 2]], use_gpu=use_gpu, data_format=data_format) @test_util.run_in_graph_and_eager_modes() def testConv2D2x2Depth1Padding_5_0_2_2_BackpropFilter(self): for (data_format, use_gpu) in GetTestConfigs(): self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 3, 3, 1], filter_sizes=[2, 1, 1, 1], output_sizes=[1, 7, 7, 1], strides=[1, 1], padding=[[5, 0], [2, 2]], data_format=data_format, use_gpu=use_gpu, err=1e-4) self._RunAndVerifyBackpropFilterExplicitPadding( input_sizes=[1, 4, 2, 1], filter_sizes=[3, 3, 1, 1], output_sizes=[1, 5, 2, 1], strides=[1, 2], padding=[[5, 0], [2, 2]], data_format=data_format, use_gpu=use_gpu, dilations=[2, 1]) # Gradient checkers def ConstructAndTestGradient(self, batch, input_rows, input_cols, filter_rows, filter_cols, in_depth, out_depth, stride_rows, stride_cols, padding, test_input, data_format, use_gpu, num_groups=1, max_err=0.003): assert in_depth % num_groups == 0 and out_depth % num_groups == 0 input_shape = [batch, input_rows, input_cols, in_depth] filter_shape = [filter_rows, filter_cols, in_depth // num_groups, out_depth] # TODO(yangke): re-factor the computation of output shape. if padding == "VALID": output_rows = (input_rows - filter_rows + stride_rows) // stride_rows output_cols = (input_cols - filter_cols + stride_cols) // stride_cols elif padding == "SAME": output_rows = (input_rows + stride_rows - 1) // stride_rows output_cols = (input_cols + stride_cols - 1) // stride_cols else: self.assertIsInstance(padding, (list, tuple)) output_rows = (input_rows + padding[1][0] + padding[1][1] - filter_rows + stride_rows) // stride_rows output_cols = (input_cols + padding[2][0] + padding[2][1] - filter_cols + stride_cols) // stride_cols output_shape = [batch, output_rows, output_cols, out_depth] input_size = 1 for x in input_shape: input_size *= x filter_size = 1 for x in filter_shape: filter_size *= x input_data = [x * 1.0 / input_size for x in range(0, input_size)] filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)] # Conv2DGrad functions are not compiled for double due to # a problem in the way Eigen's Conv2DGrad works for double. # So we disable the DOUBLE path. We should re-enable this # when double support returns for CPU and/or GPU. for dtype in self._DtypesToTest(use_gpu=use_gpu): with self.cached_session(use_gpu=use_gpu): input_tensor = constant_op.constant( input_data, shape=input_shape, dtype=dtype, name="input") filter_tensor = constant_op.constant( filter_data, shape=filter_shape, dtype=dtype, name="filter") strides = [1, stride_rows, stride_cols, 1] new_padding = padding if data_format == "NCHW": new_input_tensor = test_util.NHWCToNCHW(input_tensor) strides = test_util.NHWCToNCHW(strides) if isinstance(padding, (list, tuple)): new_padding = test_util.NHWCToNCHW(padding) else: new_input_tensor = input_tensor conv = nn_ops.conv2d( new_input_tensor, filter_tensor, strides, new_padding, data_format=data_format, name="conv") if data_format == "NCHW": conv = test_util.NCHWToNHWC(conv) self.assertEqual(output_shape, conv.get_shape()) if test_input: jacob_t, jacob_n = gradient_checker.compute_gradient(input_tensor, input_shape, conv, output_shape) else: jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor, filter_shape, conv, output_shape) if dtype == dtypes.float32: reference_jacob_t = jacob_t err = np.fabs(jacob_t - jacob_n).max() else: # Compare fp16 theoretical gradients to fp32 theoretical gradients, # since fp16 numerical gradients are too imprecise. err = np.fabs(jacob_t - reference_jacob_t).max() tf_logging.debug("conv_2d gradient error = %s", err) self.assertLess(err, max_err) @test_util.deprecated_graph_mode_only def testInputGradientValidPaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="VALID", test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientValidPaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=4, input_rows=6, input_cols=5, filter_rows=2, filter_cols=2, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="VALID", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradientValidPaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=4, input_cols=5, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=2, stride_cols=2, padding="VALID", test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientValidPaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=4, input_rows=6, input_cols=5, filter_rows=2, filter_cols=2, in_depth=2, out_depth=3, stride_rows=2, stride_cols=2, padding="VALID", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradientValidPaddingStrideThree(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=7, input_cols=6, filter_rows=3, filter_cols=3, in_depth=4, out_depth=5, stride_rows=3, stride_cols=3, padding="VALID", test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientValidPaddingStrideThree(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=8, input_cols=7, filter_rows=4, filter_cols=4, in_depth=2, out_depth=3, stride_rows=3, stride_cols=3, padding="VALID", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradientSamePaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=7, input_cols=6, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="SAME", test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientSamePaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=4, input_rows=6, input_cols=5, filter_rows=2, filter_cols=2, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="SAME", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradientSamePaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, in_depth=3, out_depth=3, stride_rows=2, stride_cols=2, padding="SAME", test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientSamePaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=4, input_rows=6, input_cols=5, filter_rows=2, filter_cols=2, in_depth=2, out_depth=3, stride_rows=2, stride_cols=2, padding="SAME", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradientSamePaddingStrideThree(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=7, input_cols=6, filter_rows=3, filter_cols=3, in_depth=4, out_depth=5, stride_rows=3, stride_cols=3, padding="SAME", test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientSamePaddingStrideThree(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=8, input_cols=7, filter_rows=4, filter_cols=4, in_depth=2, out_depth=3, stride_rows=3, stride_cols=3, padding="SAME", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientSamePaddingStride2x1(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=8, input_cols=7, filter_rows=4, filter_cols=4, in_depth=2, out_depth=3, stride_rows=2, stride_cols=1, padding="SAME", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradientKernelSizeMatchesInputSize(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=4, input_cols=3, filter_rows=4, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="VALID", test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradientKernelSizeMatchesInputSize(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=4, input_cols=3, filter_rows=4, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding="VALID", test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradient1x1PaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding=[[0, 0], [1, 1], [1, 1], [0, 0]], test_input=True, data_format=data_format, use_gpu=use_gpu, max_err=0.0025) @test_util.deprecated_graph_mode_only def testFilterGradient1x1PaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding=[[0, 0], [1, 1], [1, 1], [0, 0]], test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradient1x1PaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=4, input_cols=5, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=2, stride_cols=2, padding=[[0, 0], [1, 1], [1, 1], [0, 0]], test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradient1x1PaddingStrideTwo(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=4, input_cols=5, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=2, stride_cols=2, padding=[[0, 0], [1, 1], [1, 1], [0, 0]], test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradient2x2PaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding=[[0, 0], [2, 2], [2, 2], [0, 0]], test_input=True, data_format=data_format, use_gpu=use_gpu, max_err=0.003) @test_util.deprecated_graph_mode_only def testFilterGradient2x2PaddingStrideOne(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=5, input_cols=4, filter_rows=3, filter_cols=3, in_depth=2, out_depth=3, stride_rows=1, stride_cols=1, padding=[[0, 0], [2, 2], [2, 2], [0, 0]], test_input=False, data_format=data_format, use_gpu=use_gpu, max_err=0.003) @test_util.deprecated_graph_mode_only def testInputGradient1_2_3_4PaddingStride3x2(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=8, input_cols=5, filter_rows=4, filter_cols=2, in_depth=3, out_depth=2, stride_rows=3, stride_cols=2, padding=[[0, 0], [1, 2], [3, 4], [0, 0]], test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradient1_2_3_4PaddingStride3x2(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=8, input_cols=5, filter_rows=4, filter_cols=2, in_depth=3, out_depth=2, stride_rows=3, stride_cols=2, padding=[[0, 0], [1, 2], [3, 4], [0, 0]], test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradient4_3_2_1PaddingStride2x1(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=3, input_rows=5, input_cols=7, filter_rows=3, filter_cols=2, in_depth=1, out_depth=2, stride_rows=2, stride_cols=1, padding=[[0, 0], [4, 3], [2, 1], [0, 0]], test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradient4_3_2_1PaddingStride2x1(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=3, input_rows=5, input_cols=7, filter_rows=3, filter_cols=2, in_depth=1, out_depth=2, stride_rows=2, stride_cols=1, padding=[[0, 0], [4, 3], [2, 1], [0, 0]], test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testInputGradient0_0_0_5PaddingStride1x2(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=6, input_cols=7, filter_rows=3, filter_cols=4, in_depth=3, out_depth=2, stride_rows=1, stride_cols=2, padding=[[0, 0], [0, 0], [0, 5], [0, 0]], test_input=True, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testFilterGradient0_0_0_5PaddingStride1x2(self): for (data_format, use_gpu) in GetTestConfigs(): self.ConstructAndTestGradient( batch=2, input_rows=6, input_cols=7, filter_rows=3, filter_cols=4, in_depth=3, out_depth=2, stride_rows=1, stride_cols=2, padding=[[0, 0], [0, 0], [0, 5], [0, 0]], test_input=False, data_format=data_format, use_gpu=use_gpu) @test_util.deprecated_graph_mode_only def testShapeFunctionEdgeCases(self): # All shapes unknown. c1 = nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding="SAME") self.assertEqual([None, None, None, None], c1.get_shape().as_list()) # Incorrect input shape. with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder( dtypes.float32, shape=[1, 3]), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding="SAME") # Incorrect filter shape. with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder( dtypes.float32, shape=[1, 3]), strides=[1, 1, 1, 1], padding="SAME") # Depth mismatch. with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder( dtypes.float32, shape=[32, 20, 20, 3]), array_ops.placeholder( dtypes.float32, shape=[4, 4, 2, 2]), strides=[1, 1, 1, 1], padding="SAME") # Input depth divisible by filter depth (group convolution). # No exceptions should appear. nn_ops.conv2d( array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 8]), array_ops.placeholder(dtypes.float32, shape=[4, 4, 2, 16]), strides=[1, 1, 1, 1], padding="SAME") # Negative padding. with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding=[[0, 0], [0, -1], [1, 2], [0, 0]]) # Nonzero padding in nonspatial dimension. with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding=[[1, 0], [0, 0], [0, 0], [0, 0]]) # Nonzero NCHW padding in nonspatial dimension. with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding=[[0, 0], [0, 1], [0, 0], [0, 0]], data_format="NCHW") # Wrong amount of padding with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding=[[0, 0], [0, 0], [0, 0]]) # Only specify one padding amount per dimension with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding=[[0], [0], [0], [0]]) # Explicit padding elements are not lists with self.assertRaises(ValueError): nn_ops.conv2d( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.float32), strides=[1, 1, 1, 1], padding=[0, 0, 0, 0]) @test_util.deprecated_graph_mode_only def testOpEdgeCases(self): with self.cached_session() as sess: # Illegal strides. with self.assertRaisesRegex(errors_impl.UnimplementedError, "strides in the batch and depth"): input_placeholder = array_ops.placeholder(dtypes.float32) input_val = np.ones([10, 10]) filter_placeholder = array_ops.placeholder(dtypes.float32) filter_val = np.ones([10, 10]) sess.run( nn_ops.conv2d( input_placeholder, filter_placeholder, strides=[2, 1, 1, 1], padding="SAME"), feed_dict={ input_placeholder: input_val, filter_placeholder: filter_val }) with self.assertRaisesRegex(errors_impl.UnimplementedError, "strides in the batch and depth"): input_placeholder = array_ops.placeholder(dtypes.float32) filter_placeholder = array_ops.placeholder(dtypes.float32) input_val = np.ones([10, 10]) filter_val = np.ones([10, 10]) sess.run( nn_ops.conv2d( input_placeholder, filter_placeholder, strides=[1, 1, 1, 2], padding="SAME"), feed_dict={ input_placeholder: input_val, filter_placeholder: filter_val }) # Filter larger than input. with self.assertRaisesRegex(ValueError, "Negative dimension size"): input_placeholder = array_ops.placeholder( dtypes.float32, shape=[32, 20, 20, 3]) input_val = np.ones([32, 20, 20, 3]) filter_placeholder = array_ops.placeholder( dtypes.float32, shape=[20, 21, 3, 2]) filter_val = np.ones([20, 21, 3, 2]) sess.run( nn_ops.conv2d( input_placeholder, filter_placeholder, strides=[1, 1, 1, 1], padding="VALID"), feed_dict={ input_placeholder: input_val, filter_placeholder: filter_val }) with self.assertRaisesRegex(ValueError, "Negative dimension size"): input_placeholder = array_ops.placeholder( dtypes.float32, shape=[32, 20, 20, 3]) input_val = np.ones([32, 20, 20, 3]) filter_placeholder = array_ops.placeholder( dtypes.float32, shape=[21, 20, 3, 2]) filter_val = np.ones([21, 20, 3, 2]) sess.run( nn_ops.conv2d( input_placeholder, filter_placeholder, strides=[1, 1, 1, 1], padding="VALID"), feed_dict={ input_placeholder: input_val, filter_placeholder: filter_val }) # Filter larger than input + padding. with self.assertRaisesRegex(ValueError, "Negative dimension size"): input_placeholder = array_ops.placeholder( dtypes.float32, shape=[32, 20, 20, 3]) input_val = np.ones([32, 20, 20, 3]) filter_placeholder = array_ops.placeholder( dtypes.float32, shape=[24, 25, 3, 2]) filter_val = np.ones([24, 25, 3, 2]) sess.run( nn_ops.conv2d( input_placeholder, filter_placeholder, strides=[1, 1, 1, 1], padding=[[0, 0], [2, 2], [2, 2], [0, 0]]), feed_dict={ input_placeholder: input_val, filter_placeholder: filter_val }) # Negative padding during backprop. with self.assertRaisesRegex( errors_impl.InvalidArgumentError, "All elements of explicit_paddings must be nonnegative"): filter_placeholder = array_ops.placeholder( dtypes.float32, shape=[18, 18, 3, 2]) filter_val = np.ones([18, 18, 3, 2]) out_backprop = array_ops.placeholder( dtypes.float32, shape=[32, 3, 2, 2]) out_backprop_val = np.ones([32, 3, 2, 2]) sess.run( nn_ops.conv2d_backprop_input([32, 20, 20, 3], filter_placeholder, out_backprop, strides=[1, 1, 1, 1], padding=[[0, 0], [-1, 0], [0, 0], [0, 0]]), feed_dict={ filter_placeholder: filter_val, out_backprop: out_backprop_val }) with self.assertRaisesRegex( errors_impl.InvalidArgumentError, "All elements of explicit_paddings must be nonnegative"): input_placeholder = array_ops.placeholder( dtypes.float32, shape=[32, 20, 20, 3]) input_val = np.ones([32, 20, 20, 3]) out_backprop = array_ops.placeholder( dtypes.float32, shape=[32, 3, 2, 2]) out_backprop_val = np.ones([32, 3, 2, 2]) sess.run( nn_ops.conv2d_backprop_filter( input_placeholder, [18, 18, 3, 2], out_backprop, strides=[1, 1, 1, 1], padding=[[0, 0], [-1, 0], [0, 0], [0, 0]]), feed_dict={ input_placeholder: input_val, out_backprop: out_backprop_val }) class DepthwiseConv2DTest(test.TestCase): def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, expected): """Verifies the output values of the convolution function. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [filter_rows, filter_cols, input_depth, depth_multiplier]. stride: Stride. padding: Padding type. expected: An array containing the expected operation outputs. """ total_size_1 = 1 total_size_2 = 1 for s in tensor_in_sizes: total_size_1 *= s for s in filter_in_sizes: total_size_2 *= s # Initializes the input tensor with array containing incrementing # numbers from 1. x1 = [f * 1.0 for f in range(1, total_size_1 + 1)] x2 = [f * 1.0 for f in range(1, total_size_2 + 1)] with self.cached_session() as sess: t1 = constant_op.constant(x1, shape=tensor_in_sizes) t1.set_shape(tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) conv = nn_impl.depthwise_conv2d( t1, t2, strides=[1, stride, stride, 1], padding=padding) value = self.evaluate(conv) tf_logging.debug("value = %s", value) self.assertArrayNear(expected, np.ravel(value), 1e-5) self.assertShapeEqual(value, conv) def testConv2D2x2Filter(self): # The inputs look like this (it's a 3 x 2 matrix, each of depth 2): # # [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ] # [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ] # We can view this as two inputs # # input depth 0: # # [ 1.0, 3.0, 5.0 ] # [ 7.0, 9.0, 11.0 ] # # input depth 1: # # [ 2.0, 4.0, 6.0 ] # [ 8.0, 10.0, 12.0 ] # # The filter looks like this (it has two 2 x 2 patches, each generating 2 # depths): # # filter #0: # # [ (1.0, 3.0), ( 5.0, 7.0)] # [ (9.0, 11.0), (13.0, 15.0)] # # filter #1: # # [ ( 2.0, 4.0), ( 6.0, 8.0)] # [ (10.0, 12.0), (14.0, 16.0)] # # So the outputs are: # # (position 0, 0: in_depth 0, output_depth 0 -- using filter #0) # 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196 # (position 0, 0: in_depth 0, output_depth 1 -- using filter #1) # 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216 # (position 0, 0: in_depth 1, output_depth 2 -- using filter #0) # 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272 # (position 0, 0: in_depth 1, output_depth 3 -- using filter #1) # 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296 # # (position 1, 0: in_depth 0, output_depth 0 -- using filter #0) # 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252 # (position 1, 0: in_depth 0, output_depth 1 -- using filter #1) # 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280 # (position 1, 0: in_depth 1, output_depth 2 -- using filter #0) # 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344 # (position 1, 0: in_depth 1, output_depth 3 -- using filter #1) # 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376 expected_output = [196, 216, 272, 296, 252, 280, 344, 376] self._VerifyValues( tensor_in_sizes=[1, 2, 3, 2], filter_in_sizes=[2, 2, 2, 2], stride=1, padding="VALID", expected=expected_output) class SeparableConv2DTest(test.TestCase): def _InitValues(self, sizes): """Initializes values for input tensors. Args: sizes: Tensor dimensions. Returns: Tensor initialized to values. """ total_size = 1 for s in sizes: total_size *= s x = [f * 0.5 for f in range(1, total_size + 1)] return constant_op.constant(x, shape=sizes) def _VerifyValues(self, tensor_in_sizes, depthwise_filter_in_sizes, pointwise_filter_in_sizes, stride, padding, expected, data_format="NHWC"): """Verifies the output values of the separable convolution function. Args: tensor_in_sizes: Input tensor dimensions. depthwise_filter_in_sizes: Depthwise filter tensor dimensions. pointwise_filter_in_sizes: Pointwise filter tensor dimensions. stride: Stride. padding: Padding type. expected: An array containing the expected operation outputs. data_format: string data format for input tensor. """ with self.cached_session(): t1 = self._InitValues(tensor_in_sizes) f1 = self._InitValues(depthwise_filter_in_sizes) f1.set_shape(depthwise_filter_in_sizes) f2 = self._InitValues(pointwise_filter_in_sizes) real_t1 = t1 strides = [1, stride, stride, 1] if data_format == "NCHW": real_t1 = array_ops.transpose(t1, [0, 3, 1, 2]) strides = [1, 1, stride, stride] if isinstance(padding, list): padding = [padding[0], padding[3], padding[1], padding[2]] conv = nn_impl.separable_conv2d( real_t1, f1, f2, strides=strides, padding=padding, data_format=data_format) if data_format == "NCHW": conv = array_ops.transpose(conv, [0, 2, 3, 1]) value = self.evaluate(conv) tf_logging.debug("value = %s", value) self.assertArrayNear(expected, np.ravel(value), 2e-3) self.assertShapeEqual(value, conv) def _testSeparableConv2D(self, data_format): # The output is the result of two convolutions: # First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 2, 3]. # Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 7]. # Complexity is O(2*3*2*2 + 6*7*1*1) as opposed to O(2*7*2*2). expected_output = [ 6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5, 8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5, 11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5, 4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5, 15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5, 18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5, 6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5, 19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5, 22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5, 24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5, 10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75, 7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25, 7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75, 2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75 ] self._VerifyValues( tensor_in_sizes=[1, 4, 4, 2], depthwise_filter_in_sizes=[2, 2, 2, 3], pointwise_filter_in_sizes=[1, 1, 6, 7], stride=1, padding="SAME", expected=expected_output, data_format=data_format) def testSeparableConv2D(self): self._testSeparableConv2D("NHWC") def disabledtestSeparableConv2DNCHW(self): if not test.is_gpu_available(): return self._testSeparableConv2D("NCHW") def _testSeparableConv2DEqualInputOutputDepth(self, data_format): # The output is the result of two convolutions: # First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 3, 3]. # Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 6]. # Complexity is O(2*3*2*2 + 6*6*1*1) as opposed to O(2*6*2*2). expected_output = [ 5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0, 7047.0, 7449.0, 7851.0, 8253.0, 8655.0, 9057.0, 8352.0, 8829.0, 9306.0, 9783.0, 10260.0, 10737.0, 3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0, 10962.0, 11589.0, 12216.0, 12843.0, 13470.0, 14097.0, 12267.0, 12969.0, 13671.0, 14373.0, 15075.0, 15777.0, 13572.0, 14349.0, 15126.0, 15903.0, 16680.0, 17457.0, 5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0, 16182.0, 17109.0, 18036.0, 18963.0, 19890.0, 20817.0, 17487.0, 18489.0, 19491.0, 20493.0, 21495.0, 22497.0, 18792.0, 19869.0, 20946.0, 22023.0, 23100.0, 24177.0, 7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0, 4963.5, 5227.5, 5491.5, 5755.5, 6019.5, 6283.5, 5328.0, 5611.5, 5895.0, 6178.5, 6462.0, 6745.5, 5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5, 1757.25, 1840.5, 1923.75, 2007.0, 2090.25, 2173.5 ] self._VerifyValues( tensor_in_sizes=[1, 4, 4, 2], depthwise_filter_in_sizes=[2, 2, 2, 3], pointwise_filter_in_sizes=[1, 1, 6, 6], stride=1, padding="SAME", expected=expected_output, data_format=data_format) @test_util.deprecated_graph_mode_only def testSeparableConv2DEqualInputOutputDepth(self): self._testSeparableConv2DEqualInputOutputDepth("NHWC") def testSeparableConv2DEqualInputOutputDepthNCHW(self): if not test.is_gpu_available(): return self._testSeparableConv2DEqualInputOutputDepth("NCHW") def _testSeparableConv2dExplicitPadding(self, data_format): tensor_in_sizes = [1, 4, 4, 2] depthwise_filter_in_sizes = [2, 2, 2, 3] pointwise_filter_in_sizes = [1, 1, 6, 7] padding = [[0, 0], [1, 2], [3, 4], [0, 0]] with self.cached_session(): # Compute the 'expected' values by manually padding before calling # separable_conv2d t1 = self._InitValues(tensor_in_sizes) t1 = array_ops.pad(t1, padding) f1 = self._InitValues(depthwise_filter_in_sizes) f1.set_shape(depthwise_filter_in_sizes) f2 = self._InitValues(pointwise_filter_in_sizes) conv = nn_impl.separable_conv2d( t1, f1, f2, strides=[1, 1, 1, 1], padding="VALID", data_format="NHWC") expected = self.evaluate(conv) expected = np.ravel(expected) self._VerifyValues( tensor_in_sizes=tensor_in_sizes, depthwise_filter_in_sizes=depthwise_filter_in_sizes, pointwise_filter_in_sizes=pointwise_filter_in_sizes, stride=1, padding=padding, expected=expected, data_format=data_format) def testSeparableConv2dExplicitPadding(self): self._testSeparableConv2dExplicitPadding("NHWC") def testSeparableConv2dExplicitPaddingNCHW(self): if not test.is_gpu_available(): return self._testSeparableConv2dExplicitPadding("NCHW") class DeepConv2DTest(test.TestCase): def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides, padding): """Verifies that DeepConv2D and Conv2D produce the same values. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. conv_strides: [row_stride, col_stride] for the convolution; padding: Padding type. """ x1 = np.random.rand(*tensor_in_sizes).astype(np.float32) x2 = np.random.rand(*filter_in_sizes).astype(np.float32) with self.cached_session(use_gpu=False) as sess: t1 = constant_op.constant(x1, shape=tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) strides = [1] + conv_strides + [1] conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding) os.environ["TF_USE_DEEP_CONV2D"] = "0" values_expect = self.evaluate([conv]) os.environ["TF_USE_DEEP_CONV2D"] = "1" values_test = self.evaluate([conv]) self.assertAllClose(values_expect, values_test, rtol=1e-5, atol=1e-5) def _RunTestCases(self, conv_strides, padding): input_sizes = [[5, 5, 5, 1248], [3, 17, 17, 192], [2, 35, 35, 288], [2, 6, 8, 517], [2, 7, 4, 81], [3, 11, 3, 77]] filter_sizes = [[3, 3, 1248, 128], [3, 3, 192, 192], [3, 3, 288, 384], [3, 3, 517, 64], [3, 3, 81, 77], [3, 3, 77, 181]] for input_shape, filter_shape in zip(input_sizes, filter_sizes): self._CompareFwdConv2D(input_shape, filter_shape, conv_strides, padding) def testConv2D3x3FilterStride1x1Valid(self): self._RunTestCases([1, 1], "VALID") def testConv2D3x3FilterStride1x1Same(self): self._RunTestCases([1, 1], "SAME") class Conv2DBenchmark(test.Benchmark): def benchmarkGPUConvStackFirst(self): # Benchmark the first iteration of a conv-net with many identical conv # operations. if not test.is_gpu_available(): return with ops.Graph().as_default(), session_lib.Session() as session: batch_size = 1 timesteps = 600 features = 1 inputs = random_ops.random_uniform( [batch_size, 1, timesteps, features], seed=1234) num_outputs_list = [512] * 40 + [1] kernel_w = 3 x = inputs for num_outputs in num_outputs_list: x = convolutional.conv2d(x, num_outputs, [1, kernel_w]) outputs = x self.evaluate(variables.global_variables_initializer()) num_iterations = 4 for iter_index in xrange(num_iterations): start = time.time() session.run(outputs) wall_time = time.time() - start self.report_benchmark( name="conv_stack_iter_%d" % iter_index, wall_time=wall_time) tf_logging.info("conv_stack_iter_%d: %.4f" % (iter_index, wall_time)) def _bench_op(self, name, op, burn_iters, num_iters): config = config_pb2.ConfigProto() # Prevent Grappler from optimizing away the entire graph. config.graph_options.rewrite_options.dependency_optimization = ( rewriter_config_pb2.RewriterConfig.OFF) with session_lib.Session(config=config) as session: self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( session, op, burn_iters=burn_iters, min_iters=num_iters, name=name) def benchmarkExplicitVsManualPadding(self): """Compare performance of EXPLICIT padding and calling tf.pad. A Conv2D op with EXPLICIT padding is benchmarked, and a tf.pad with the same padding followed by an equivalent Conv2D op is benchmarked. """ if not test.is_gpu_available(): return with ops.Graph().as_default(): burn_iters = 15 num_iters = 300 batch_size = 64 # The input and filter correspond to the first layer of Resnet50. input = variables.Variable( # pylint: disable=redefined-builtin random_ops.random_uniform([ batch_size, 3, 224, 224 ])) filter = variables.Variable(random_ops.random_uniform([7, 7, 3, 64])) # pylint: disable=redefined-builtin strides = [1, 1, 2, 2] padding = [(0, 0), (0, 0), (3, 3), (3, 3)] output_explicit_pad = nn_ops.conv2d( input, filter, strides, padding=padding, data_format="NCHW") input_padded = array_ops.pad(input, padding) output_manual_pad = nn_ops.conv2d( input_padded, filter, strides, padding="VALID", data_format="NCHW") # Benchmark just the forward pass. self._bench_op("explicit_pad_forward", output_explicit_pad.op, burn_iters, num_iters) self._bench_op("manual_pad_forward", output_manual_pad.op, burn_iters, num_iters) # Benchmark both the forward and backwards passes. input_grad_explicit_pad, filter_grad_explicit_pad = ( gradients_impl.gradients(output_explicit_pad, [input, filter])) self._bench_op( "explicit_pad_backward", control_flow_ops.group(input_grad_explicit_pad, filter_grad_explicit_pad), burn_iters, num_iters) input_grad_manual_pad, filter_grad_manual_pad = gradients_impl.gradients( output_manual_pad, [input, filter]) self._bench_op( "manual_pad_backward", control_flow_ops.group(input_grad_manual_pad, filter_grad_manual_pad), burn_iters, num_iters) def benchmarkExplicitVsSamePaddingGraph(self): """Compare performance of EXPLICIT and SAME padding in graph mode. A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op with explicit padding is benchmarked, where the padding is the same as in the SAME case. The purpose is to ensure EXPLICIT padding is just as efficient as the SAME case """ if not test.is_gpu_available(): return with ops.Graph().as_default(): burn_iters = 15 num_convs = 20 num_iters = 50 batch_size = 64 # The input and filter correspond to a middle layer of Resnet50. input = variables.Variable( # pylint: disable=redefined-builtin random_ops.random_uniform([ batch_size, 256, 14, 14 ])) filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin strides = [1, 1, 1, 1] padding = [(0, 0), (0, 0), (1, 1), (1, 1)] output_explicit_pad = input output_same_pad = input for _ in range(num_convs): output_explicit_pad = nn_ops.conv2d( output_explicit_pad, filter, strides, padding=padding, data_format="NCHW") output_same_pad = nn_ops.conv2d( output_same_pad, filter, strides, padding="SAME", data_format="NCHW") grad_explicit_pad, = gradients_impl.gradients(output_explicit_pad, filter) grad_same_pad, = gradients_impl.gradients(output_same_pad, filter) self._bench_op("graph_explicit_pad", grad_explicit_pad.op, burn_iters, num_iters) self._bench_op("graph_same_pad", grad_same_pad.op, burn_iters, num_iters) def benchmarkExplicitVsSamePaddingEager(self): """Compare performance of EXPLICIT and SAME padding in eager mode. A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op with explicit padding is benchmarked, where the padding is the same as in the SAME case. Currently, EXPLICIT padding is slightly slower, due to the fact the Python padding list must be checked and processed before the Conv2D op can run. """ # TODO(reedwm): Make EXPLICIT padding as fast as SAME padding. if not test.is_gpu_available(): return with context.eager_mode(): burn_iters = 15 num_convs = 20 num_iters = 50 batch_size = 64 # The input and filter correspond to a middle layer of Resnet50. input = variables.Variable( # pylint: disable=redefined-builtin random_ops.random_uniform([ batch_size, 256, 14, 14 ])) filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin strides = [1, 1, 1, 1] padding = [(0, 0), (0, 0), (1, 1), (1, 1)] output_explicit_pad = input output_same_pad = input for _ in range(burn_iters): output_explicit_pad = nn_ops.conv2d( output_explicit_pad, filter, strides, padding=padding, data_format="NCHW") output_same_pad = nn_ops.conv2d( output_same_pad, filter, strides, padding="SAME", data_format="NCHW") start = time.time() for _ in range(num_iters): with backprop.GradientTape() as tape: for _ in range(num_convs): output_explicit_pad = nn_ops.conv2d( output_explicit_pad, filter, strides, padding=padding, data_format="NCHW") tape.gradient(output_explicit_pad, filter) end = time.time() self.report_benchmark( name="eager_explicit_pad", wall_time=(end - start) / num_iters, iters=num_iters) start = time.time() for _ in range(num_iters): with backprop.GradientTape() as tape: for _ in range(num_convs): output_same_pad = nn_ops.conv2d( output_same_pad, filter, strides, padding="SAME", data_format="NCHW") tape.gradient(output_same_pad, filter) end = time.time() self.report_benchmark( name="eager_same_pad", wall_time=(end - start) / num_iters, iters=num_iters) def GetInceptionFwdTest(input_size, filter_size, stride, padding, gpu_only=False): def Test(self): if gpu_only and not test.is_gpu_available(): tf_logging.info("Skipping InceptionFwd %s", (input_size, filter_size, stride, padding)) return tf_logging.info("Testing InceptionFwd %s", (input_size, filter_size, stride, padding)) self._CompareFwdValues(input_size, filter_size, [stride, stride], padding) return Test def GetInceptionFwdDilatedConvTest(input_size, filter_size, stride, padding): def Test(self): if stride == 1: tf_logging.info("Testing InceptionFwd with dilations %s", (input_size, filter_size, stride, padding)) self._VerifyDilatedConvValues( tensor_in_sizes=input_size, filter_in_sizes=filter_size, strides=[stride, stride], dilations=[2, 2], padding=padding, rtol=5e-4) return Test def GetInceptionBackInputTest(input_size, filter_size, output_size, stride, padding, gpu_only=False): def Test(self): if gpu_only and not test.is_gpu_available(): tf_logging.info("Skipping InceptionBackInput %s", (input_size, filter_size, output_size, stride, padding)) return tf_logging.info("Testing InceptionBackInput %s", (input_size, filter_size, output_size, stride, padding)) self._CompareBackpropInput(input_size, filter_size, output_size, [stride, stride], padding) return Test def GetInceptionBackFilterTest(input_size, filter_size, output_size, strides, padding, gpu_only=False): def Test(self): if gpu_only and not test.is_gpu_available(): tf_logging.info("Skipping InceptionBackFilter %s", (input_size, filter_size, output_size, strides, padding)) return tf_logging.info("Testing InceptionBackFilter %s", (input_size, filter_size, output_size, strides, padding)) self._CompareBackFilter(input_size, filter_size, output_size, strides, padding) return Test class FusedConv2DTest(test.TestCase): def _CreateNumpyTensor(self, shape): total_size = np.prod(shape) return np.arange(1, total_size + 1, dtype=np.float32).reshape(shape) def _CreateConv2D(self, input_values, filters, strides=[1, 1], padding="SAME"): return nn_ops.convolution( input_values, filters, strides=strides, padding=padding) # Tests tensor forwarding of a fused Conv2D+BiasAdd+Add op when the input to # Add has refcount 1. @test_util.run_in_graph_and_eager_modes(use_gpu=False) def testAddWithRefCountOne(self): expected_output = [ 113377, 125570, 77305, 86738, 19433, 22226, 60681, 70722, 36291, 43718, 7143, 9206, 9785, 12098, 4783, 6366, 779, 1134 ] tensor_in_sizes = [1, 3, 3, 2] filter_in_sizes = [2, 2, 2, 2] bias_in_sizes = [2] x = self._CreateNumpyTensor(tensor_in_sizes) filter_in = self._CreateNumpyTensor(filter_in_sizes) bias_in = self._CreateNumpyTensor(bias_in_sizes) # To get different weights for filter offset = 1 conv1 = self._CreateConv2D(x, filter_in) conv2 = self._CreateConv2D(conv1, filter_in + offset) conv = self._CreateConv2D(conv1, filter_in - offset) bias_add = nn_ops.bias_add(conv, bias_in) add = math_ops.add_n([bias_add, conv2]) self.assertAllEqual( np.rint(expected_output), self.evaluate(add).reshape(-1)) # Tests tensor forwarding of a fused Conv2D+BiasAdd+Add op when the input to # Add has a total refcount of 2, and Add is its last consumer. @test_util.run_in_graph_and_eager_modes(use_gpu=False) def testAddWithRefCountTwoAndRunAddLast(self): expected_output = [ 1.907175e+06, 2.253505e+06, 7.809210e+05, 9.537180e+05, 1.184170e+05, 1.523070e+05, 5.367010e+05, 6.803700e+05, 1.867090e+05, 2.529460e+05, 2.362300e+04, 3.522600e+04, 5.121700e+04, 7.168300e+04, 1.494300e+04, 2.347400e+04, 1.558000e+03, 2.903000e+03 ] tensor_in_sizes = [1, 3, 3, 2] filter_in_sizes = [2, 2, 2, 2] bias_in_sizes = [2] x = self._CreateNumpyTensor(tensor_in_sizes) filter_in = self._CreateNumpyTensor(filter_in_sizes) bias_in = self._CreateNumpyTensor(bias_in_sizes) # To get different weights for filter offset = 1 conv1 = self._CreateConv2D(x, filter_in) conv2 = self._CreateConv2D(conv1, filter_in + offset) conv = self._CreateConv2D(conv2, filter_in - offset) bias_add = nn_ops.bias_add(conv, bias_in) add = math_ops.add_n([bias_add, conv1]) self.assertAllEqual( np.rint(expected_output), self.evaluate(add).reshape(-1)) # Tests tensor forwarding of a fused Conv2D+BiasAdd+Add op when the input to # Add has refcount 2 and Add (in the fused Conv2D op) is its first consumer. @test_util.run_in_graph_and_eager_modes(use_gpu=False) def testAddWithRefCountTwoAndRunAddFirst(self): expected_output = [ 176161, 194450, 120673, 134822, 30545, 34734, 96041, 111102, 58149, 69289, 11745, 14839, 15833, 19302, 7965, 10339, 1345, 1877 ] tensor_in_sizes = [1, 3, 3, 2] filter_in_sizes = [2, 2, 2, 2] bias_in_sizes = [2] x = self._CreateNumpyTensor(tensor_in_sizes) filter_in = self._CreateNumpyTensor(filter_in_sizes) bias_in = self._CreateNumpyTensor(bias_in_sizes) # To get different weights for filter offset = 1 conv1 = self._CreateConv2D(x, filter_in) conv2 = self._CreateConv2D(conv1, filter_in + offset) conv = self._CreateConv2D(conv1, filter_in - offset) bias_add = nn_ops.bias_add(conv, bias_in) add = math_ops.add_n([bias_add, conv2]) relu = nn_ops.relu(add) output = math_ops.add_n([relu, conv2]) self.assertAllEqual( np.rint(expected_output), self.evaluate(output).reshape(-1)) # Tests tensor forwarding of a fused Conv2D+BiasAdd+Add op when the input to # Add has refcount 2, and there is no dependency between its two consumers. @test_util.run_in_graph_and_eager_modes(use_gpu=False) def testAddWithRefCountTwoAndNoDependence(self): expected_output = [ 176161, 194450, 120673, 134822, 30545, 34734, 96041, 111102, 58149, 69289, 11745, 14839, 15833, 19302, 7965, 10339, 1345, 1877 ] tensor_in_sizes = [1, 3, 3, 2] filter_in_sizes = [2, 2, 2, 2] bias_in_sizes = [2] x = self._CreateNumpyTensor(tensor_in_sizes) filter_in = self._CreateNumpyTensor(filter_in_sizes) bias_in = self._CreateNumpyTensor(bias_in_sizes) # To get different weights for filter offset = 1 conv1 = self._CreateConv2D(x, filter_in) conv2 = self._CreateConv2D(conv1, filter_in + offset) conv = self._CreateConv2D(conv1, filter_in - offset) bias_add = nn_ops.bias_add(conv, bias_in) add = math_ops.add_n([bias_add, conv2]) relu1 = nn_ops.relu(add) relu2 = nn_ops.relu(conv2) output = math_ops.add_n([relu1, relu2]) self.assertAllEqual( np.rint(expected_output), self.evaluate(output).reshape(-1)) # Tests tensor forwarding of a fused Conv2D+BiasAdd+Add op when the input to # Add is the same as the input to the fused Conv2D op and needs a tensor # buffer. @test_util.run_in_graph_and_eager_modes(use_gpu=False) def testAddWithSameSrcAndAddTensorBuffer(self): expected_output = [ 57157, 63298, 39249, 44026, 9971, 11402, 31193, 36306, 19126, 22948, 3970, 5060, 5135, 6350, 2666, 3524, 461, 674 ] tensor_in_sizes = [1, 3, 3, 2] filter_in_sizes = [2, 2, 2, 2] bias_in_sizes = [2] x = self._CreateNumpyTensor(tensor_in_sizes) filter_in = self._CreateNumpyTensor(filter_in_sizes) bias_in = self._CreateNumpyTensor(bias_in_sizes) conv1 = self._CreateConv2D(x, filter_in) conv = self._CreateConv2D(conv1, filter_in) bias_add = nn_ops.bias_add(conv, bias_in) add = math_ops.add_n([bias_add, conv1]) self.assertAllEqual( np.rint(expected_output), self.evaluate(add).reshape(-1)) if __name__ == "__main__": for index, (input_size_, filter_size_, output_size_, stride_, padding_) in enumerate(GetShrunkInceptionShapes()): setattr(Conv2DTest, "testInceptionFwd_" + str(index), test_util.run_in_graph_and_eager_modes( GetInceptionFwdTest(input_size_, filter_size_, stride_, padding_))) setattr( Conv2DTest, "testInceptionFwdDilatedConv_" + str(index), test_util.run_in_graph_and_eager_modes(GetInceptionFwdDilatedConvTest( input_size_, filter_size_, stride_, padding_))) setattr(Conv2DTest, "testInceptionBackInput_" + str(index), test_util.run_in_graph_and_eager_modes( GetInceptionBackInputTest(input_size_, filter_size_, output_size_, stride_, padding_))) setattr(Conv2DTest, "testInceptionBackFilter_" + str(index), test_util.run_in_graph_and_eager_modes( GetInceptionBackFilterTest(input_size_, filter_size_, output_size_, [stride_, stride_], padding_))) # TODO(b/35359731) # Fwd, BckInput, and BackFilter to test that for certain input parameter # set, winograd nonfused algorithm will be excluded from conv autotune. If # in such case, winograd nonfused algorithm is added as one option of the # conv autotune, and cuDNN version is smaller than 7, the following tests # will fail. ishape = [1, 400, 400, 1] fshape = [1, 1, 1, 256] oshape = [1, 400, 400, 256] setattr(Conv2DTest, "testInceptionFwd_No_Winograd_Nonfused", test_util.run_in_graph_and_eager_modes( GetInceptionFwdTest(ishape, fshape, 1, "SAME", gpu_only=True))) setattr(Conv2DTest, "testInceptionFwdDilatedConv_No_Winograd_Nonfused", test_util.run_in_graph_and_eager_modes( GetInceptionFwdDilatedConvTest(ishape, fshape, 1, "SAME"))) setattr(Conv2DTest, "testInceptionBackInput_No_Winograd_Nonfused", test_util.run_in_graph_and_eager_modes( GetInceptionBackInputTest(ishape, fshape, oshape, 1, "SAME", gpu_only=True))) setattr(Conv2DTest, "testInceptionBackFilter_No_Winograd_Nonfused", test_util.run_in_graph_and_eager_modes( GetInceptionBackFilterTest(ishape, fshape, oshape, [1, 1], "SAME", gpu_only=True))) test.main()
petewarden/tensorflow
tensorflow/python/kernel_tests/conv_ops_test.py
Python
apache-2.0
128,733
# usr/bin/python def averagepitch_calc(y): s=0 # Filter out the unvoiced regions length=len(y) for i in range(0,length): val=y[i] if val[0] == '-': y[i] = '0' count =0 # Calculate the average only on the voiced regions for i in range(0,length): if y[i] != '0': s+=float(y[i]) count+=1 else: pass average_pitch=float(s/count) print average_pitch return average_pitch def deviation_calc(array,mean): darray=[] length = len(array) for i in range(0, length): val= float(array[i]) if val > 1.15* mean and val< 2*mean: darray.append(i) return get_sub_list(darray) #print darray #return darray def split_list(n): """will return the list index""" return [(x+1) for x,y in zip(n, n[1:]) if y-x != 1] def get_sub_list(my_list): """will split the list base on the index""" my_index = split_list(my_list) output = list() prev = 0 for index in my_index: new_list = [ x for x in my_list[prev:] if x < index] output.append(new_list) prev += len(new_list) output.append([ x for x in my_list[prev:]]) return output def time_deviation_single(f,array): tarray=[] for i in f: tarray.append(array[i]) # When there is an array of elements ( not array of list) return tarray def time_deviation(f,array): tarray=[] for list in f: if len(list)>1: tarray.append(list[0]) # When there is an array of lists containing start and end tarray.append(list[-1]) time_stamp=[] for i in range(0,len(tarray)): temp=tarray[i] time_stamp.append(array[temp]) return time_stamp def main(): f=open('example2.txt'); #d=f.read() time_array=[] pitch_array=[] for line in f: #r=f.split() r=line.split() #print r time_array.append(r[0]) # Load the time into an array pitch_array.append(r[1]) # Load the values of pitch into an array f.close() avg = averagepitch_calc(pitch_array) # Calculate the average value of the pitch based on the pitch array deviation = deviation_calc(pitch_array,avg) # Calculate the points which deviate from the average value of the pitch #spatial_time = time_deviation_single(deviation,time_array) spatial_time = time_deviation(deviation,time_array) print 'spatial_time is' print spatial_time f=open('timestamp.txt','w') array_len=len(spatial_time) count=0 for i in range(array_len): try: print 'count is ' print count f.write( str(spatial_time[count]) + ' ' + str(spatial_time[count+1])+ '\n') count = i+1; except IndexError: pass f.close() if __name__ == '__main__': main()
saikrishnar/AudioRenderingofSTEM
systems/technique4/code/pitchChangeDetector.py
Python
apache-2.0
3,129
# This file is part of the myhdl library, a Python package for using # Python as a Hardware Description Language. # # Copyright (C) 2003-2008 Jan Decaluwe # # The myhdl library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation; either version 2.1 of the # License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ Run the unit tests for traceSignals """ from __future__ import absolute_import import os import random import pytest from myhdl import block, Signal, Simulation, _simulator, delay, instance, intbv from myhdl._traceSignals import TraceSignalsError, _error, traceSignals from helpers import raises_kind random.seed(1) # random, but deterministic path = os.path QUIET=1 @block def gen(clk): @instance def logic(): while 1: yield delay(10) clk.next = not clk return logic @block def fun(): clk = Signal(bool(0)) inst = gen(clk) return inst @block def dummy(): clk = Signal(bool(0)) inst = gen(clk) return 1 @block def top(): inst = traceSignals(fun()) return inst @block def top2(): inst = [{} for i in range(4)] j = 3 inst[j-2]['key'] = traceSignals(fun()) return inst @block def top3(): inst_1 = traceSignals(fun()) inst_2 = traceSignals(fun()) return inst_1, inst_2 @block def genTristate(clk, x, y, z): xd = x.driver() yd = y.driver() zd = z.driver() @instance def ckgen(): while 1: yield delay(10) clk.next = not clk @instance def logic(): for v in [True, False, None, 0, True, None, None, 1]: yield clk.posedge xd.next = v if v is None: yd.next = zd.next = None elif v: yd.next = zd.next = 11 else: yd.next = zd.next = 0 return ckgen,logic @block def tristate(): from myhdl import TristateSignal clk = Signal(bool(0)) x = TristateSignal(True) # single bit y = TristateSignal(intbv(0)) # intbv with undefined width z = TristateSignal(intbv(0)[8:]) # intbv with fixed width inst = genTristate(clk, x, y, z) return inst @block def topTristate(): inst = traceSignals(tristate()) return inst @pytest.yield_fixture def vcd_dir(tmpdir): with tmpdir.as_cwd(): yield tmpdir if _simulator._tracing: _simulator._tf.close() _simulator._tracing = 0 class TestTraceSigs: # TODO: multiple trace handling is different now has the # calls go bottom-up. To be revisited. # def testMultipleTraces(self, vcd_dir): # with raises_kind(TraceSignalsError, _error.MultipleTraces): # dut = top3() def testArgType1(self, vcd_dir): with raises_kind(TraceSignalsError, _error.ArgType): dut = traceSignals([1, 2]) # this test is no longer relevant # def testReturnVal(self, vcd_dir): # from myhdl import ExtractHierarchyError # from myhdl._extractHierarchy import _error # kind = _error.InconsistentToplevel % (2, "dummy") # with raises_kind(ExtractHierarchyError, kind): # dut = traceSignals(dummy()) def testHierarchicalTrace1(self, vcd_dir): p = "%s.vcd" % fun.__name__ top() assert path.exists(p) def testHierarchicalTrace2(self, vcd_dir): pdut = "%s.vcd" % top.__name__ psub = "%s.vcd" % fun.__name__ dut = traceSignals(top()) assert path.exists(pdut) assert not path.exists(psub) def testTristateTrace(self, vcd_dir): sim = Simulation(topTristate()) sim.run(100, quiet=QUIET) sim.quit() def testBackupOutputFile(self, vcd_dir): p = "%s.vcd" % fun.__name__ dut = traceSignals(fun()) sim = Simulation(dut) sim.run(1000, quiet=QUIET) sim.quit() _simulator._tf.close() _simulator._tracing = 0 size = path.getsize(p) pbak = p[:-4] + '.' + str(path.getmtime(p)) + '.vcd' assert not path.exists(pbak) dut = traceSignals(fun()) _simulator._tf.close() _simulator._tracing = 0 assert path.exists(p) assert path.exists(pbak) assert path.getsize(pbak) == size assert path.getsize(p) < size def testSetDirectory(self, vcd_dir): traceSignals.directory = 'some_vcd_dir' os.mkdir(path.join(str(vcd_dir), traceSignals.directory)) pdut = "%s.vcd" % top.__name__ psub = "%s.vcd" % fun.__name__ pdutd = path.join(traceSignals.directory, "%s.vcd" % top.__name__) psubd = path.join(traceSignals.directory, "%s.vcd" % fun.__name__) dut = traceSignals(top()) _simulator._tf.close() _simulator._tracing = 0 traceSignals.directory = None assert not path.exists(pdut) assert not path.exists(psub) assert path.exists(pdutd) assert not path.exists(psubd)
hgomersall/myhdl
myhdl/test/core/test_traceSignals.py
Python
lgpl-2.1
5,575
from Screen import Screen from Components.ActionMap import ActionMap from Components.Harddisk import harddiskmanager from Components.MenuList import MenuList from Components.Label import Label from Components.Pixmap import Pixmap from Screens.MessageBox import MessageBox import Components.Task # [iq from Components.config import config from enigma import eTimer class HarddiskWait(Screen): def doInit(self): self.timer.stop() for i in range(0,5): result = self.hdd.initialize() if result != -2: break; self.close(result) def doCheck(self): self.timer.stop() result = self.hdd.check() self.close(result) def doConvert(self): self.timer.stop() result = self.hdd.convertExt3ToExt4() self.close(result) def __init__(self, session, hdd, type): Screen.__init__(self, session) self.hdd = hdd self.timer = eTimer() if type == HarddiskSetup.HARDDISK_INITIALIZE: text = _("Formatting in progress, please wait.") self.timer.callback.append(self.doInit) elif type == HarddiskSetup.HARDDISK_CHECK: text = _("Checking Filesystem, please wait.") self.timer.callback.append(self.doCheck) else: text = _("Changing Filesystem ext3 to ext4, please wait.") self.timer.callback.append(self.doConvert) self["wait"] = Label(text) self.timer.start(100) # iq] class HarddiskSetup(Screen): HARDDISK_INITIALIZE = 1 HARDDISK_CHECK = 2 HARDDISK_CHANGE_FILESYSTEM = 3 # [iq] # def __init__(self, session, hdd, action, text, question): def __init__(self, session, hdd, action = None, text = None, question = None, type = None): # [iq] Screen.__init__(self, session) self.hdd = hdd # [iq] self.action = action self.question = question # [iq if type not in (self.HARDDISK_INITIALIZE, self.HARDDISK_CHECK, self.HARDDISK_CHANGE_FILESYSTEM): self.type = self.HARDDISK_INITIALIZE else: self.type = type # iq] self["model"] = Label(_("Model: ") + hdd.model()) self["capacity"] = Label(_("Capacity: ") + hdd.capacity()) self["bus"] = Label(_("Bus: ") + hdd.bus()) self["initialize"] = Pixmap() # [iq if self.type == self.HARDDISK_INITIALIZE: text = _("Format") elif self.type == self.HARDDISK_CHECK: text = _("Check") else: #HARDDISK_CHANGE_FILESYSTEM text = _("Convert ext3 to ext4") # iq] self["initializetext"] = Label(text) self["actions"] = ActionMap(["OkCancelActions"], { "ok": self.close, "cancel": self.close }) self["shortcuts"] = ActionMap(["ShortcutActions"], { "red": self.hddQuestion }) # [iq def hddReady(self, result): print "Result: " + str(result) if (result != 0): if self.type == self.HARDDISK_INITIALIZE: message = _("Unable to format device.\nError: ") elif self.type == self.HARDDISK_CHECK: message = _("Unable to complete filesystem check.\nError: ") else: message = _("Unable to convert filesystem.\nError: ") self.session.open(MessageBox, message + str(self.hdd.errorList[0 - result]), MessageBox.TYPE_ERROR) else: self.close() # iq] def hddQuestion(self): # message = self.question + "\n" + _("You can continue watching TV etc. while this is running.") # [iq if self.type == self.HARDDISK_INITIALIZE: message = _("Do you really want to format the device?\nAll data on the disk will be lost!") elif self.type == self.HARDDISK_CHECK: message = _("Do you really want to check the filesystem?\nThis could take lots of time!") else: message = _("Do you really want to convert the filesystem?\nThis could take lots of time!") # iq] self.session.openWithCallback(self.hddConfirmed, MessageBox, message) def hddConfirmed(self, confirmed): if not confirmed: return # try: # Components.Task.job_manager.AddJob(self.action()) # except Exception, ex: # self.session.open(MessageBox, str(ex), type=MessageBox.TYPE_ERROR, timeout=10) # self.close() # [iq if config.usage.background_hddjob.value: try: Components.Task.job_manager.AddJob(self.action()) except Exception, ex: self.session.open(MessageBox, str(ex), type=MessageBox.TYPE_ERROR, timeout=10) self.close() else: print "this will start either the initialize or the fsck now!" self.session.openWithCallback(self.hddReady, HarddiskWait, self.hdd, self.type) # iq] class HarddiskSelection(Screen): def __init__(self, session): Screen.__init__(self, session) self.skinName = "HarddiskSelection" # For derived classes if harddiskmanager.HDDCount() == 0: tlist = [] tlist.append((_("no storage devices found"), 0)) self["hddlist"] = MenuList(tlist) else: self["hddlist"] = MenuList(harddiskmanager.HDDList()) self["actions"] = ActionMap(["OkCancelActions"], { "ok": self.okbuttonClick, "cancel": self.close }) def doIt(self, selection): self.session.openWithCallback(self.close, HarddiskSetup, selection, action=selection.createInitializeJob, text=_("Initialize"), # question=_("Do you really want to initialize the device?\nAll data on the disk will be lost!")) # [iq question=_("Do you really want to initialize the device?\nAll data on the disk will be lost!"), type=HarddiskSetup.HARDDISK_INITIALIZE) # iq] def okbuttonClick(self): selection = self["hddlist"].getCurrent() if selection[1] != 0: # self.doIt(selection[1]) # [iq if config.usage.background_hddjob.value: self.doIt(selection[1]) else: self.session.open(HarddiskSetup, selection[1], type=HarddiskSetup.HARDDISK_INITIALIZE) # iq] # This is actually just HarddiskSelection but with correct type class HarddiskFsckSelection(HarddiskSelection): def __init__(self, session): HarddiskSelection.__init__(self, session) self.skinName = "HarddiskSelection" # [iq def okbuttonClick(self): selection = self["hddlist"].getCurrent() if selection[1] != 0: if config.usage.background_hddjob.value: self.doIt(selection[1]) else: self.session.open(HarddiskSetup, selection[1], type=HarddiskSetup.HARDDISK_CHECK) # iq] def doIt(self, selection): self.session.openWithCallback(self.close, HarddiskSetup, selection, action=selection.createCheckJob, text=_("Check"), # question=_("Do you really want to check the filesystem?\nThis could take lots of time!")) # [iq question=_("Do you really want to check the filesystem?\nThis could take lots of time!"), type=HarddiskSetup.HARDDISK_CHECK) # iq] class HarddiskConvertExt4Selection(HarddiskSelection): def __init__(self, session): HarddiskSelection.__init__(self, session) self.skinName = "HarddiskSelection" # [iq def okbuttonClick(self): selection = self["hddlist"].getCurrent() if selection[1] != 0: if config.usage.background_hddjob.value: self.doIt(selection[1]) else: self.session.open(HarddiskSetup, selection[1], type=HarddiskSetup.HARDDISK_CHANGE_FILESYSTEM) # iq] def doIt(self, selection): self.session.openWithCallback(self.close, HarddiskSetup, selection, action=selection.createExt4ConversionJob, text=_("Convert ext3 to ext4"), # question=_("Do you really want to convert the filesystem?\nYou cannot go back!")) # [iq question=_("Do you really want to convert the filesystem?\nYou cannot go back!"), type=HarddiskSetup.HARDDISK_CHANGE_FILESYSTEM) # iq]
pli3/enigma2-git
lib/python/Screens/HarddiskSetup.py
Python
gpl-2.0
7,215
from hwt.synthesizer.interface import Interface from hwt.synthesizer.unit import Unit from ipCorePackager.constants import INTF_DIRECTION def connect_to_const(val, intf: Interface): """ Connect constant to all output ports, used mainly during the debbug to dissable interface """ if intf._interfaces: for i in intf._interfaces: connect_to_const(val, i) else: if intf._direction == INTF_DIRECTION.SLAVE: intf(val) class EmptyUnit(Unit): """ :class:`hwt.synthesizer.unit.Unit` used for prototyping all output interfaces are connected to _def_val and this is only think which architecture contains :cvar _def_val: this value is used to initialize all signals """ _def_val = None def _impl(self): for i in self._interfaces: connect_to_const(self._def_val, i)
Nic30/HWToolkit
hwt/synthesizer/interfaceLevel/emptyUnit.py
Python
mit
873
from libqtile.command.graph import * # noqa from libqtile.log_utils import logger logger.warning( 'libqtile.command_graph is deprecated. ' 'It has been moved to libqtile.command.graph' )
ramnes/qtile
libqtile/command_graph.py
Python
mit
197
# coding: utf-8 from django.conf.urls import patterns, url from klebercode.blog.views import (EntryYearArchiveView, EntryMonthArchiveView, EntryDayArchiveView, EntryListView, EntryDateDetailView, # EntryDetailViewAdmin, EntryTagListView, EntryCategoryListView) urlpatterns = patterns( 'klebercode.blog.views', url(r'^$', EntryListView.as_view(), name='home'), url(r'^(?P<year>\d{4})/$', EntryYearArchiveView.as_view(), name='entry_archive_year'), url(r'^(?P<year>\d{4})/(?P<month>\d+)/$', EntryMonthArchiveView.as_view(month_format='%m'), name='entry_archive_month'), url(r'^(?P<year>\d{4})/(?P<month>\d+)/(?P<day>\d+)/$', EntryDayArchiveView.as_view(month_format='%m'), name='entry_archive_day'), url(r'^(?P<year>\d{4})/(?P<month>\d+)/(?P<day>\d+)/(?P<slug>[-\w]+)/$', EntryDateDetailView.as_view(month_format='%m'), name='entry_date_detail'), # url(r'^(?P<slug>[-\w]+)/$', # EntryDetailViewAdmin, # name='entry_detail_admin'), url(r'^marcacao/(?P<tag_slug>[-\w]+)/$', EntryTagListView.as_view(), name='tag_list'), url(r'^categoria/(?P<cat_slug>[-\w]+)/$', EntryCategoryListView.as_view(), name='category_list'), )
klebercode/klebercode
klebercode/blog/urls.py
Python
gpl-2.0
1,391
# 各組分別在各自的 .py 程式中建立應用程式 (第1步/總共3步) from flask import Blueprint, render_template # 利用 Blueprint建立 ag1, 並且 url 前綴為 /ag1, 並設定 template 存放目錄 ag6_40323152 = Blueprint('ag6_40323152', __name__, url_prefix='/ag6_40323152', template_folder='templates') # 展示傳回 Brython 程式 @ag6_40323152.route('/A') def task1(): outstring = ''' from javascript import JSConstructor from browser import window import math cango = JSConstructor(window.Cango) cobj = JSConstructor(window.Cobj) shapedefs = window.shapeDefs obj2d = JSConstructor(window.Obj2D) cgo = cango("plotarea") cgo.setWorldCoords(-250, -4500, 5000, 5000) # 決定要不要畫座標軸線 #cgo.drawAxes(0, 240, 0, 240, { # "strokeColor":"#aaaaaa", # "fillColor": "#aaaaaa", # "xTickInterval": 20, # "xLabelInterval": 20, # "yTickInterval": 20, # "yLabelInterval": 20}) #cgo.drawText("使用 Cango 繪圖程式庫!", 0, 0, {"fontSize":60, "fontWeight": 1200, "lorg":5 }) deg = math.pi/180 def O(x, y, rx, ry, rot, color, border, linewidth): # 旋轉必須要針對相對中心 rot not working yet chamber = "M -6.8397, -1.4894 \ A 7, 7, 0, 1, 0, 6.8397, -1.4894 \ A 40, 40, 0, 0, 1, 6.8397, -18.511 \ A 7, 7, 0, 1, 0, -6.8397, -18.511 \ A 40, 40, 0, 0, 1, -6.8397, -1.4894 z" cgoChamber = window.svgToCgoSVG(chamber) cmbr = cobj(cgoChamber, "SHAPE", { "fillColor": color, "border": border, "strokeColor": "tan", "lineWidth": linewidth }) # 複製 cmbr, 然後命名為 basic1 basic4 = cmbr.dup() basic4.rotate(0) basic4.translate(40, 0) basic5 = cmbr.dup() basic5.rotate(0) basic5.translate(40, 20) cmbr.appendPath(basic4) cmbr.appendPath(basic5) # hole 為原點位置 hole = cobj(shapedefs.circle(4), "PATH") cmbr.appendPath(hole) # 表示放大 3 倍 #cgo.render(cmbr, x, y, 3, rot) # 放大 5 倍 cgo.render(cmbr, x, y, 5, rot) O(0, 0, 0, 0, 0, "lightyellow", True, 4) ''' return outstring @ag6_40323152.route('/B') def task2(): outstring = ''' from javascript import JSConstructor from browser import window import math cango = JSConstructor(window.Cango) cobj = JSConstructor(window.Cobj) shapedefs = window.shapeDefs obj2d = JSConstructor(window.Obj2D) cgo = cango("plotarea") cgo.setWorldCoords(-250, -4500, 5000, 5000) # 決定要不要畫座標軸線 #cgo.drawAxes(0, 240, 0, 240, { # "strokeColor":"#aaaaaa", # "fillColor": "#aaaaaa", # "xTickInterval": 20, # "xLabelInterval": 20, # "yTickInterval": 20, # "yLabelInterval": 20}) #cgo.drawText("使用 Cango 繪圖程式庫!", 0, 0, {"fontSize":60, "fontWeight": 1200, "lorg":5 }) deg = math.pi/180 def O(x, y, rx, ry, rot, color, border, linewidth): # 旋轉必須要針對相對中心 rot not working yet chamber = "M -6.8397, -1.4894 \ A 7, 7, 0, 1, 0, 6.8397, -1.4894 \ A 40, 40, 0, 0, 1, 6.8397, -18.511 \ A 7, 7, 0, 1, 0, -6.8397, -18.511 \ A 40, 40, 0, 0, 1, -6.8397, -1.4894 z" cgoChamber = window.svgToCgoSVG(chamber) cmbr = cobj(cgoChamber, "SHAPE", { "fillColor": color, "border": border, "strokeColor": "tan", "lineWidth": linewidth }) # 複製 cmbr, 然後命名為 basic1 basic8 = cmbr.dup() basic8.rotate(120) basic8.translate(80, 20) basic9 = cmbr.dup() basic9.rotate(60) basic9.translate(80, 20) cmbr.appendPath(basic8) cmbr.appendPath(basic9) # hole 為原點位置 hole = cobj(shapedefs.circle(4), "PATH") cmbr.appendPath(hole) # 表示放大 3 倍 #cgo.render(cmbr, x, y, 3, rot) # 放大 5 倍 cgo.render(cmbr, x, y, 5, rot) O(0, 0, 0, 0, 0, "lightyellow", True, 4) ''' return outstring @ag6_40323152.route('/C') def task3(): outstring = ''' from javascript import JSConstructor from browser import window import math cango = JSConstructor(window.Cango) cobj = JSConstructor(window.Cobj) shapedefs = window.shapeDefs obj2d = JSConstructor(window.Obj2D) cgo = cango("plotarea") cgo.setWorldCoords(-250, -4500, 5000, 5000) # 決定要不要畫座標軸線 #cgo.drawAxes(0, 240, 0, 240, { # "strokeColor":"#aaaaaa", # "fillColor": "#aaaaaa", # "xTickInterval": 20, # "xLabelInterval": 20, # "yTickInterval": 20, # "yLabelInterval": 20}) #cgo.drawText("使用 Cango 繪圖程式庫!", 0, 0, {"fontSize":60, "fontWeight": 1200, "lorg":5 }) deg = math.pi/180 def O(x, y, rx, ry, rot, color, border, linewidth): # 旋轉必須要針對相對中心 rot not working yet chamber = "M -6.8397, -1.4894 \ A 7, 7, 0, 1, 0, 6.8397, -1.4894 \ A 40, 40, 0, 0, 1, 6.8397, -18.511 \ A 7, 7, 0, 1, 0, -6.8397, -18.511 \ A 40, 40, 0, 0, 1, -6.8397, -1.4894 z" cgoChamber = window.svgToCgoSVG(chamber) cmbr = cobj(cgoChamber, "SHAPE", { "fillColor": color, "border": border, "strokeColor": "tan", "lineWidth": linewidth }) # 複製 cmbr, 然後命名為 basic1 basic5 = cmbr.dup() basic5.rotate(90) basic5.translate((20*math.cos(60*deg)+120), (20*math.sin(60*deg)+40)) cmbr.appendPath(basic5) # hole 為原點位置 hole = cobj(shapedefs.circle(4), "PATH") cmbr.appendPath(hole) # 表示放大 3 倍 #cgo.render(cmbr, x, y, 3, rot) # 放大 5 倍 cgo.render(cmbr, x, y, 5, rot) O(0, 0, 0, 0, 0, "lightyellow", True, 4) ''' return outstring @ag6_40323152.route('/D') def task4(): outstring = ''' from javascript import JSConstructor from browser import window import math cango = JSConstructor(window.Cango) cobj = JSConstructor(window.Cobj) shapedefs = window.shapeDefs obj2d = JSConstructor(window.Obj2D) cgo = cango("plotarea") cgo.setWorldCoords(-250, -4500, 5000, 5000) # 決定要不要畫座標軸線 #cgo.drawAxes(0, 240, 0, 240, { # "strokeColor":"#aaaaaa", # "fillColor": "#aaaaaa", # "xTickInterval": 20, # "xLabelInterval": 20, # "yTickInterval": 20, # "yLabelInterval": 20}) #cgo.drawText("使用 Cango 繪圖程式庫!", 0, 0, {"fontSize":60, "fontWeight": 1200, "lorg":5 }) deg = math.pi/180 def O(x, y, rx, ry, rot, color, border, linewidth): # 旋轉必須要針對相對中心 rot not working yet chamber = "M -6.8397, -1.4894 \ A 7, 7, 0, 1, 0, 6.8397, -1.4894 \ A 40, 40, 0, 0, 1, 6.8397, -18.511 \ A 7, 7, 0, 1, 0, -6.8397, -18.511 \ A 40, 40, 0, 0, 1, -6.8397, -1.4894 z" cgoChamber = window.svgToCgoSVG(chamber) cmbr = cobj(cgoChamber, "SHAPE", { "fillColor": color, "border": border, "strokeColor": "tan", "lineWidth": linewidth }) # 複製 cmbr, 然後命名為 basic1 basic7 = cmbr.dup() basic7.rotate(60) basic7.translate(200, 60) basic8 = cmbr.dup() basic8.rotate(120) basic8.translate(200, -20) cmbr.appendPath(basic7) cmbr.appendPath(basic8) # hole 為原點位置 hole = cobj(shapedefs.circle(4), "PATH") cmbr.appendPath(hole) # 表示放大 3 倍 #cgo.render(cmbr, x, y, 3, rot) # 放大 5 倍 cgo.render(cmbr, x, y, 5, rot) O(0, 0, 0, 0, 0, "lightyellow", True, 4) ''' return outstring
2015fallhw/cdw2
users/s2a/g6/ag6_40323152_task1.py
Python
agpl-3.0
7,671
# ----------------------------------------------------------------------------- # yacc_nodoc.py # # Rule with a missing doc-string # ----------------------------------------------------------------------------- import sys if ".." not in sys.path: sys.path.insert(0,"..") import ply.yacc as yacc from calclex import tokens # Parsing rules precedence = ( ('left','PLUS','MINUS'), ('left','TIMES','DIVIDE'), ('right','UMINUS'), ) # dictionary of names names = { } def p_statement_assign(t): 'statement : NAME EQUALS expression' names[t[1]] = t[3] def p_statement_expr(t): print(t[1]) def p_expression_binop(t): '''expression : expression PLUS expression | expression MINUS expression | expression TIMES expression | expression DIVIDE expression''' if t[2] == '+' : t[0] = t[1] + t[3] elif t[2] == '-': t[0] = t[1] - t[3] elif t[2] == '*': t[0] = t[1] * t[3] elif t[2] == '/': t[0] = t[1] / t[3] def p_expression_uminus(t): 'expression : MINUS expression %prec UMINUS' t[0] = -t[2] def p_expression_group(t): 'expression : LPAREN expression RPAREN' t[0] = t[2] def p_expression_number(t): 'expression : NUMBER' t[0] = t[1] def p_expression_name(t): 'expression : NAME' try: t[0] = names[t[1]] except LookupError: print("Undefined name '%s'" % t[1]) t[0] = 0 def p_error(t): print("Syntax error at '%s'" % t.value) yacc.yacc()
dpac-vlsi/SynchroTrace
util/ext/ply/test/yacc_nodoc.py
Python
bsd-3-clause
1,506
# Copyright 2010-2014 Google # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os as _os __path__.append(_os.path.join(__path__[0], '..', 'gen', 'ortools', 'graph')) __path__.append(_os.path.join(__path__[0], '..', '..', 'lib'))
legrosbuffle/or-tools
ortools/graph/__init__.py
Python
apache-2.0
732
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Proxy AMI-related calls from cloud controller to objectstore service.""" import base64 import binascii import os import shutil import tarfile import tempfile import boto.s3.connection import eventlet from lxml import etree from nova.api.ec2 import ec2utils import nova.cert.rpcapi from nova import exception from nova.image import glance from nova.openstack.common import cfg from nova.openstack.common import log as logging from nova import utils LOG = logging.getLogger(__name__) s3_opts = [ cfg.StrOpt('image_decryption_dir', default='/tmp', help='parent dir for tempdir used for image decryption'), cfg.StrOpt('s3_access_key', default='notchecked', help='access key to use for s3 server for images'), cfg.StrOpt('s3_secret_key', default='notchecked', help='secret key to use for s3 server for images'), cfg.BoolOpt('s3_use_ssl', default=False, help='whether to use ssl when talking to s3'), cfg.BoolOpt('s3_affix_tenant', default=False, help='whether to affix the tenant id to the access key ' 'when downloading from s3'), ] CONF = cfg.CONF CONF.register_opts(s3_opts) CONF.import_opt('s3_host', 'nova.config') CONF.import_opt('s3_port', 'nova.config') class S3ImageService(object): """Wraps an existing image service to support s3 based register.""" def __init__(self, service=None, *args, **kwargs): self.cert_rpcapi = nova.cert.rpcapi.CertAPI() self.service = service or glance.get_default_image_service() self.service.__init__(*args, **kwargs) def _translate_uuids_to_ids(self, context, images): return [self._translate_uuid_to_id(context, img) for img in images] def _translate_uuid_to_id(self, context, image): image_copy = image.copy() try: image_uuid = image_copy['id'] except KeyError: pass else: image_copy['id'] = ec2utils.glance_id_to_id(context, image_uuid) for prop in ['kernel_id', 'ramdisk_id']: try: image_uuid = image_copy['properties'][prop] except (KeyError, ValueError): pass else: image_id = ec2utils.glance_id_to_id(context, image_uuid) image_copy['properties'][prop] = image_id return image_copy def _translate_id_to_uuid(self, context, image): image_copy = image.copy() try: image_id = image_copy['id'] except KeyError: pass else: image_copy['id'] = ec2utils.id_to_glance_id(context, image_id) for prop in ['kernel_id', 'ramdisk_id']: try: image_id = image_copy['properties'][prop] except (KeyError, ValueError): pass else: image_uuid = ec2utils.id_to_glance_id(context, image_id) image_copy['properties'][prop] = image_uuid return image_copy def create(self, context, metadata, data=None): """Create an image. metadata['properties'] should contain image_location. """ image = self._s3_create(context, metadata) return image def delete(self, context, image_id): image_uuid = ec2utils.id_to_glance_id(context, image_id) self.service.delete(context, image_uuid) def update(self, context, image_id, metadata, data=None): image_uuid = ec2utils.id_to_glance_id(context, image_id) metadata = self._translate_id_to_uuid(context, metadata) image = self.service.update(context, image_uuid, metadata, data) return self._translate_uuid_to_id(context, image) def detail(self, context, **kwargs): #NOTE(bcwaldon): sort asc to make sure we assign lower ids # to older images kwargs.setdefault('sort_dir', 'asc') images = self.service.detail(context, **kwargs) return self._translate_uuids_to_ids(context, images) def show(self, context, image_id): image_uuid = ec2utils.id_to_glance_id(context, image_id) image = self.service.show(context, image_uuid) return self._translate_uuid_to_id(context, image) @staticmethod def _conn(context): # NOTE(vish): access and secret keys for s3 server are not # checked in nova-objectstore access = CONF.s3_access_key if CONF.s3_affix_tenant: access = '%s:%s' % (access, context.project_id) secret = CONF.s3_secret_key calling = boto.s3.connection.OrdinaryCallingFormat() return boto.s3.connection.S3Connection(aws_access_key_id=access, aws_secret_access_key=secret, is_secure=CONF.s3_use_ssl, calling_format=calling, port=CONF.s3_port, host=CONF.s3_host) @staticmethod def _download_file(bucket, filename, local_dir): key = bucket.get_key(filename) local_filename = os.path.join(local_dir, os.path.basename(filename)) key.get_contents_to_filename(local_filename) return local_filename def _s3_parse_manifest(self, context, metadata, manifest): manifest = etree.fromstring(manifest) image_format = 'ami' image_type = 'machine' try: kernel_id = manifest.find('machine_configuration/kernel_id').text if kernel_id == 'true': image_format = 'aki' image_type = 'kernel' kernel_id = None except Exception: kernel_id = None try: ramdisk_id = manifest.find('machine_configuration/ramdisk_id').text if ramdisk_id == 'true': image_format = 'ari' image_type = 'ramdisk' ramdisk_id = None except Exception: ramdisk_id = None try: arch = manifest.find('machine_configuration/architecture').text except Exception: arch = 'x86_64' # NOTE(yamahata): # EC2 ec2-budlne-image --block-device-mapping accepts # <virtual name>=<device name> where # virtual name = {ami, root, swap, ephemeral<N>} # where N is no negative integer # device name = the device name seen by guest kernel. # They are converted into # block_device_mapping/mapping/{virtual, device} # # Do NOT confuse this with ec2-register's block device mapping # argument. mappings = [] try: block_device_mapping = manifest.findall('machine_configuration/' 'block_device_mapping/' 'mapping') for bdm in block_device_mapping: mappings.append({'virtual': bdm.find('virtual').text, 'device': bdm.find('device').text}) except Exception: mappings = [] properties = metadata['properties'] properties['architecture'] = arch def _translate_dependent_image_id(image_key, image_id): image_uuid = ec2utils.ec2_id_to_glance_id(context, image_id) properties[image_key] = image_uuid if kernel_id: _translate_dependent_image_id('kernel_id', kernel_id) if ramdisk_id: _translate_dependent_image_id('ramdisk_id', ramdisk_id) if mappings: properties['mappings'] = mappings metadata.update({'disk_format': image_format, 'container_format': image_format, 'status': 'queued', 'is_public': False, 'properties': properties}) metadata['properties']['image_state'] = 'pending' #TODO(bcwaldon): right now, this removes user-defined ids. # We need to re-enable this. image_id = metadata.pop('id', None) image = self.service.create(context, metadata) # extract the new uuid and generate an int id to present back to user image_uuid = image['id'] image['id'] = ec2utils.glance_id_to_id(context, image_uuid) # return image_uuid so the caller can still make use of image_service return manifest, image, image_uuid def _s3_create(self, context, metadata): """Gets a manifest from s3 and makes an image.""" image_path = tempfile.mkdtemp(dir=CONF.image_decryption_dir) image_location = metadata['properties']['image_location'] bucket_name = image_location.split('/')[0] manifest_path = image_location[len(bucket_name) + 1:] bucket = self._conn(context).get_bucket(bucket_name) key = bucket.get_key(manifest_path) manifest = key.get_contents_as_string() manifest, image, image_uuid = self._s3_parse_manifest(context, metadata, manifest) def delayed_create(): """This handles the fetching and decrypting of the part files.""" context.update_store() log_vars = {'image_location': image_location, 'image_path': image_path} def _update_image_state(context, image_uuid, image_state): metadata = {'properties': {'image_state': image_state}} self.service.update(context, image_uuid, metadata, purge_props=False) def _update_image_data(context, image_uuid, image_data): metadata = {} self.service.update(context, image_uuid, metadata, image_data, purge_props=False) _update_image_state(context, image_uuid, 'downloading') try: parts = [] elements = manifest.find('image').getiterator('filename') for fn_element in elements: part = self._download_file(bucket, fn_element.text, image_path) parts.append(part) # NOTE(vish): this may be suboptimal, should we use cat? enc_filename = os.path.join(image_path, 'image.encrypted') with open(enc_filename, 'w') as combined: for filename in parts: with open(filename) as part: shutil.copyfileobj(part, combined) except Exception: LOG.exception(_("Failed to download %(image_location)s " "to %(image_path)s"), log_vars) _update_image_state(context, image_uuid, 'failed_download') return _update_image_state(context, image_uuid, 'decrypting') try: hex_key = manifest.find('image/ec2_encrypted_key').text encrypted_key = binascii.a2b_hex(hex_key) hex_iv = manifest.find('image/ec2_encrypted_iv').text encrypted_iv = binascii.a2b_hex(hex_iv) dec_filename = os.path.join(image_path, 'image.tar.gz') self._decrypt_image(context, enc_filename, encrypted_key, encrypted_iv, dec_filename) except Exception: LOG.exception(_("Failed to decrypt %(image_location)s " "to %(image_path)s"), log_vars) _update_image_state(context, image_uuid, 'failed_decrypt') return _update_image_state(context, image_uuid, 'untarring') try: unz_filename = self._untarzip_image(image_path, dec_filename) except Exception: LOG.exception(_("Failed to untar %(image_location)s " "to %(image_path)s"), log_vars) _update_image_state(context, image_uuid, 'failed_untar') return _update_image_state(context, image_uuid, 'uploading') try: with open(unz_filename) as image_file: _update_image_data(context, image_uuid, image_file) except Exception: LOG.exception(_("Failed to upload %(image_location)s " "to %(image_path)s"), log_vars) _update_image_state(context, image_uuid, 'failed_upload') return metadata = {'status': 'active', 'properties': {'image_state': 'available'}} self.service.update(context, image_uuid, metadata, purge_props=False) shutil.rmtree(image_path) eventlet.spawn_n(delayed_create) return image def _decrypt_image(self, context, encrypted_filename, encrypted_key, encrypted_iv, decrypted_filename): elevated = context.elevated() try: key = self.cert_rpcapi.decrypt_text(elevated, project_id=context.project_id, text=base64.b64encode(encrypted_key)) except Exception, exc: msg = _('Failed to decrypt private key: %s') % exc raise exception.NovaException(msg) try: iv = self.cert_rpcapi.decrypt_text(elevated, project_id=context.project_id, text=base64.b64encode(encrypted_iv)) except Exception, exc: raise exception.NovaException(_('Failed to decrypt initialization ' 'vector: %s') % exc) try: utils.execute('openssl', 'enc', '-d', '-aes-128-cbc', '-in', '%s' % (encrypted_filename,), '-K', '%s' % (key,), '-iv', '%s' % (iv,), '-out', '%s' % (decrypted_filename,)) except exception.ProcessExecutionError, exc: raise exception.NovaException(_('Failed to decrypt image file ' '%(image_file)s: %(err)s') % {'image_file': encrypted_filename, 'err': exc.stdout}) @staticmethod def _test_for_malicious_tarball(path, filename): """Raises exception if extracting tarball would escape extract path""" tar_file = tarfile.open(filename, 'r|gz') for n in tar_file.getnames(): if not os.path.abspath(os.path.join(path, n)).startswith(path): tar_file.close() raise exception.NovaException(_('Unsafe filenames in image')) tar_file.close() @staticmethod def _untarzip_image(path, filename): S3ImageService._test_for_malicious_tarball(path, filename) tar_file = tarfile.open(filename, 'r|gz') tar_file.extractall(path) image_file = tar_file.getnames()[0] tar_file.close() return os.path.join(path, image_file)
aristanetworks/arista-ovs-nova
nova/image/s3.py
Python
apache-2.0
16,276
# coding: utf-8 """ OpenAPI spec version: Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pprint import pformat from six import iteritems import re class V1PersistentVolumeStatus(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ operations = [ ] # The key is attribute name # and the value is attribute type. swagger_types = { 'phase': 'str', 'message': 'str', 'reason': 'str' } # The key is attribute name # and the value is json key in definition. attribute_map = { 'phase': 'phase', 'message': 'message', 'reason': 'reason' } def __init__(self, phase=None, message=None, reason=None): """ V1PersistentVolumeStatus - a model defined in Swagger """ self._phase = phase self._message = message self._reason = reason @property def phase(self): """ Gets the phase of this V1PersistentVolumeStatus. Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#phase :return: The phase of this V1PersistentVolumeStatus. :rtype: str """ return self._phase @phase.setter def phase(self, phase): """ Sets the phase of this V1PersistentVolumeStatus. Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#phase :param phase: The phase of this V1PersistentVolumeStatus. :type: str """ self._phase = phase @property def message(self): """ Gets the message of this V1PersistentVolumeStatus. A human-readable message indicating details about why the volume is in this state. :return: The message of this V1PersistentVolumeStatus. :rtype: str """ return self._message @message.setter def message(self, message): """ Sets the message of this V1PersistentVolumeStatus. A human-readable message indicating details about why the volume is in this state. :param message: The message of this V1PersistentVolumeStatus. :type: str """ self._message = message @property def reason(self): """ Gets the reason of this V1PersistentVolumeStatus. Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. :return: The reason of this V1PersistentVolumeStatus. :rtype: str """ return self._reason @reason.setter def reason(self, reason): """ Sets the reason of this V1PersistentVolumeStatus. Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. :param reason: The reason of this V1PersistentVolumeStatus. :type: str """ self._reason = reason def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(V1PersistentVolumeStatus.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
detiber/lib_openshift
lib_openshift/models/v1_persistent_volume_status.py
Python
apache-2.0
5,219
from django.conf import settings from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.test import TestCase from juakstore.registration.forms import RegistrationForm class SimpleBackendViewTests(TestCase): urls = 'registration.backends.simple.urls' def test_allow(self): """ The setting ``REGISTRATION_OPEN`` appropriately controls whether registration is permitted. """ old_allowed = getattr(settings, 'REGISTRATION_OPEN', True) settings.REGISTRATION_OPEN = True resp = self.client.get(reverse('registration_register')) self.assertEqual(200, resp.status_code) settings.REGISTRATION_OPEN = False # Now all attempts to hit the register view should redirect to # the 'registration is closed' message. resp = self.client.get(reverse('registration_register')) self.assertRedirects(resp, reverse('registration_disallowed')) resp = self.client.post(reverse('registration_register'), data={'username': 'bob', 'email': 'bob@example.com', 'password1': 'secret', 'password2': 'secret'}) self.assertRedirects(resp, reverse('registration_disallowed')) settings.REGISTRATION_OPEN = old_allowed def test_registration_get(self): """ HTTP ``GET`` to the registration view uses the appropriate template and populates a registration form into the context. """ resp = self.client.get(reverse('registration_register')) self.assertEqual(200, resp.status_code) self.assertTemplateUsed(resp, 'registration/registration_form.html') self.failUnless(isinstance(resp.context['form'], RegistrationForm)) def test_registration(self): """ Registration creates a new account and logs the user in. """ resp = self.client.post(reverse('registration_register'), data={'username': 'bob', 'email': 'bob@example.com', 'password1': 'secret', 'password2': 'secret'}) new_user = User.objects.get(username='bob') self.assertEqual(302, resp.status_code) self.failUnless(new_user.get_absolute_url() in resp['Location']) self.failUnless(new_user.check_password('secret')) self.assertEqual(new_user.email, 'bob@example.com') # New user must be active. self.failUnless(new_user.is_active) # New user must be logged in. resp = self.client.get(reverse('registration_register')) self.failUnless(resp.context['user'].is_authenticated()) def test_registration_failure(self): """ Registering with invalid data fails. """ resp = self.client.post(reverse('registration_register'), data={'username': 'bob', 'email': 'bob@example.com', 'password1': 'secret', 'password2': 'notsecret'}) self.assertEqual(200, resp.status_code) self.failIf(resp.context['form'].is_valid())
CSC301H-Fall2013/JuakStore
Storefront/juakstore/juakregister/tests/simple_backend.py
Python
mit
3,480
#!/usr/bin/env python import os import re import sys import codecs import argparse import itertools from pylinkgrammar.linkgrammar import Parser, ParseOptions PARSE_VIA_REGEX = True # Make sure utf8 chars don't break consumers (e.g. if consuming via a pipe from Node) sys.stdout = codecs.getwriter('utf8')(sys.stdout) argparser = argparse.ArgumentParser(description="Analyze statements for a Logic Puzzle game") argparser.add_argument('-v', '--verbose', action="store_true", help='whether or not to print verbose logs') argparser.add_argument('-q', '--quiet', action="store_true", help='whether or not to trim out unnecessary logs') argparser.add_argument('-d', '--directory', action="store_true", help='whether or not the "-i" input is a directory of puzzle directories') argparser.add_argument('-i', '--input', nargs="+", type=str, help="an input directory containing the files \"entities.txt\", \"clues.txt\", and \"answers.txt\"\n\n" + "Entitites file: an input file containing information about the Entity Types and specific Entities in use. Structure should be \"Type1,Type2,...\n\nEntity1a,Entity1b,...\nEntity2a,...\"\n" + "Clues files: an input file containing statements for a logic puzzle. Each statement should be on a separate line (e.g separated by newline)") def main(inputDir, verbose=False, quiet=False): # Inputs entitiesFile = '/'.join([inputDir, 'entities.txt']) statementsFile = '/'.join([inputDir, 'clues.txt']) expectedParseFile = '/'.join([inputDir, 'parseExpected.txt']) actualParseFile = '/'.join([inputDir, 'parseActual.txt']) # ERROR! if not statementsFile: argparser.print_help() return sys.exit() # Read in the file inputs statements = readStatements(statementsFile) entitiesByType = readEntities(entitiesFile) try: expectedParses = readStatements(expectedParseFile) except: expectedParses = None # Link-grammar parser p = Parser(max_null_count=2, verbosity=0) print "" # Whitespace after the Parser's printing print promptColors()['BLUE'] + inputDir + promptColors()['COLOR_NONE'] # Book keeping as we parse all the statements total, success, fail = [0, 0, 0] actuals = [] i = 0 # Parse each clue statement for s in statements: expected = expectedParses[i] if expectedParses else None i = i + 1 if not s: continue entities, comparison, quantifier = [None, None, None] try: entities, comparison, quantifier = parseSentence(i, p, s, entitiesByType) # NAIVE PARSE FAILURE # Try NER parse... except (LinkageError, ParseError) as e: if verbose: print "Problem parsing unadulterated sentence: {0}".format(e) print 'Attempting NER-replacement to help the parser do better...' try: replacedEntitiesByType, replacedSentence = replaceEntities(entitiesByType, s) i_orig = i i = str(i) + ' [NER]' replacedEntities, comparison, quantifier = parseSentence(i, p, replacedSentence, replacedEntitiesByType) entities = unreplaceEntities(replacedEntities, replacedEntitiesByType, entitiesByType) except (LinkageError, ParseError) as eNer: if verbose: print "Problem parsing NER-replaced sentence: {0}".format(eNer) i = i_orig # PARSE FAILURE if not entities: if expected: print promptColors()['PURPLE'] + " " + expected + "\t (Expected)" fail += 1 # PARSE SUCCESS else: try: prettyParsed = ', '.join(entities) + ((', ' + ' '.join(quantifier)) if quantifier else '') except (TypeError) as e: print " Entities: {0}, Quantifier: {1}".format(entities, quantifier) prettyParsed = "{0}Problem formatting parse: {1}{2}".format(promptColors()['RED'], e, promptColors()['COLOR_NONE']) actual = comparison + "(" + prettyParsed + ")" actuals.append(actual) if verbose: print "" # Correct parse if not expected or actual == expected: if not expectedParses: print " " + actual + " (Actual)" elif not quiet: print promptColors()['LIGHT_GREEN'] + (u"\u2713" if expected else " ") + promptColors()['COLOR_NONE'] \ + " " + actual + promptColors()['COLOR_NONE'] success += 1 # Incorrect parse else: print promptColors()['YELLOW'] + u"\u0078" + promptColors()[ 'COLOR_NONE'] + " " + actual + "\t (Actual)" if expected: print promptColors()['YELLOW'] + " " + expected + "\t (Expected)" \ + promptColors()['COLOR_NONE'] fail += 1 if verbose: print "" total += 1 # # DONE PARSING *THIS* STATEMENT ################################ # # DONE PARSING *ALL* STATEMENTS ################################ writeActual(actualParseFile, actuals) return [total, success, fail] # ************************************************************************************ def parseSentence(i, p, s, entitiesByType): # # PARSE THE STATEMENT # print promptColors()['LIGHT_GRAY'] + str(i) + ". " + s + promptColors()['COLOR_NONE'] l = p.parse_sent(s) # PARSE FAILURE if len(l) < 1: raise LinkageError('No linkages found in link-grammar parser') # PARSE SUCCESS outFile = 'out/linkage_' + str(i) + '.ps' # # LINKAGE PARSE # # Choose the best linkage from the parse best = [None, None, None] for linkage in l: entities, comparison, quantifier = parseLinkage(linkage, s, entitiesByType, outFile, verbose) if entities and ((entities[0] and 'xor' in entities[1]) or (comparison and quantifier)): best = [entities, comparison, quantifier] break elif entities and comparison: if not best[0]: best = [entities, comparison, quantifier] entities, comparison, quantifier = best if not entities: # Try to default so we don't totally fail to parse the sentence entities = parseAllEntities(entitiesByType, s, verbose) comparison, quantifier = parseComparisons([], None, entitiesByType, verbose) # Get default comparison ("is") if (len(entities) >= 2): return [entities, comparison, quantifier] print promptColors()['LIGHT_RED'] + u"\u0078" \ + " Failed to parse any linkages (tried " + str(len(l)) + ")!" \ + promptColors()['COLOR_NONE'] raise ParseError('No viable entity/comparison/quantifier parses found') return entities, comparison, quantifier # ************************************************************************************ def parseLinkage(linkage, sentence, entitiesByType, outFile, verbose=False): if verbose: if outFile: open(outFile, 'w').write(linkage.postscript) print linkage.constituent_phrases_nested print linkage.diagram print "\nParsing linkage's constituent phrases..." partsOfStatement = linkage.constituent_phrases_flat return parseConstituentParts(entitiesByType, partsOfStatement, sentence, verbose) # ************************************************************************************ def replaceEntities(entitiesByType, sentence): # TODO replacedEntitiesByType = dict(entitiesByType) replacedSentence = str(sentence) if 'ages' not in replacedEntitiesByType: return replacedEntitiesByType, sentence replacedEntitiesByType['ages'] = ['Entity1', 'Entity2', 'Entity3', 'Entity4'] replacedSentence = replacedSentence.replace(entitiesByType['ages'][0], 'Entity1') replacedSentence = replacedSentence.replace(entitiesByType['ages'][1], 'Entity2') replacedSentence = replacedSentence.replace(entitiesByType['ages'][2], 'Entity3') replacedSentence = replacedSentence.replace(entitiesByType['ages'][3], 'Entity4') return replacedEntitiesByType, replacedSentence # ************************************************************************************ def unreplaceEntities(replacedEntities, replacedEntitiesByType, entitiesByType): entities = [] for entity in replacedEntities: for category in replacedEntitiesByType: replacedEntities = replacedEntitiesByType[category] if entity in replacedEntities: # Un-replace the entity string entities.append(entitiesByType[category][replacedEntities.index(entity)]) return entities # ************************************************************************************ def parseFirstEntity(entitiesByType, words, verbose=False): allEntities = parseAllEntities(entitiesByType, words, verbose) # Hm.. this shouldn't happen if len(allEntities) < 1: if verbose: print promptColors()['RED'] + "Didn't find any entities, but expected to find 1" + promptColors()[ 'COLOR_NONE'] return None # Ah! This wasn't expected! if verbose and len(allEntities) > 1: print promptColors()['RED'] + "Found multiple entities, but only expected 1: " + ', '.join(allEntities) + \ promptColors()['COLOR_NONE'] return allEntities[0] # ************************************************************************************ def getAllEntities(entitiesByType, verbose=False): allEntities = list(itertools.chain.from_iterable(entitiesByType.values())) allEntities.sort(key=len, reverse=True) # sorts by descending length return [entity.strip() for entity in allEntities] # ************************************************************************************ def parseAllEntities(entitiesByType, words, verbose=False): allEntities = getAllEntities(entitiesByType) sentence = ' '.join(words).lower() # Some entities (like 'sailboat' get parsed into 2 words 'sail boat', # so this is a hack to detect those as well sentenceNoSpace = noSpace(sentence) # Pseudo Named Entity Recognition # Are any of the provided words a known Entity? # NOTE: Logic is done from the known entity perspective so that it will work on multi-word entities entities = [entity for entity in allEntities if (entity.lower() in sentence or noSpace(entity) in sentenceNoSpace)] # Sort by the order they appeared in the sentence entities.sort(key=lambda entity: (sentence.index(entity.lower()) if entity.lower() in sentence else (sentenceNoSpace.index(noSpace(entity)) if noSpace(entity) in sentenceNoSpace else None))) # Ensure we don't mistakenly get extra entities in the case where some entity names exist in other # E.g. "12 silver" would naively be parsed as ["12 silver", "2 silver"] for entity in entities: otherEntities = list(entities) otherEntities.remove(entity) # If this entity is a subsctring of another entity, remove it! if bool([e for e in otherEntities if entity in e]): entities.remove(entity) return entities # ************************************************************************************ def noSpace(x): return ''.join(x.lower().split()) # ************************************************************************************ def readStatements(statementsFile): return [statement for statement in readFile(statementsFile).split("\n") if statement] # ************************************************************************************ def readEntities(entitiesFile): content = readFile(entitiesFile) x = content.split("\n\n") types = x[0].split(", ") entitiesByType = {} i = 0 for entities in x[1].split("\n"): if not entities: continue entityType = types[i] if entityType: entitiesByType[entityType] = entities.split(', ') i += 1 return entitiesByType # ************************************************************************************ def writeActual(actualFile, actuals): return writeFile(actualFile, '\n'.join(actuals)) # ************************************************************************************ def readFile(file): input_file = open(file) return input_file.read() # ************************************************************************************ def writeFile(file, contents): input_file = open(file, 'w') return input_file.write(contents + '\n') # ************************************************************************************ def invertEntityMap(entitiesByType): inverted = {} for entityType, entities in entitiesByType.items(): for entity in entities: inverted[entity] = entityType return inverted def getWordsByPosIdxLUT(parts): charIdx = 0 index = {} for node in parts: index[charIdx] = node.words # Add 1 because the POS tags are separated by spaces charIdx = charIdx + len(node.type) + 1 return index # lut - a wordsByPosIdx lookup table # idxRange - a start (inclusive) and end (exclusive) POS idx # returns the words found in the POS Idx range def getWordsByPosIdx(lut, posIdxRange): words = [v for k, v in lut.iteritems() if posIdxRange[0] <= k < posIdxRange[1]] return flatten(words) def parseConstituentParts(entitiesByType, parts, sentence, verbose=False): ENTITY_PHRASE = r"((NP )?(NP )?((VP )?PP )?)?NP" X = ENTITY_PHRASE.count('(') ENTITY_PHRASE_SIMPLE = r"(NP )?NP" Y = ENTITY_PHRASE_SIMPLE.count('(') ENTITY_PHRASE_PASSIVE = r"(NP )?NP SBAR WHNP S (VP (PP )?(NP )?NP)" Z = ENTITY_PHRASE_PASSIVE.count('(') CMP_ADVP = r"^S (" + ENTITY_PHRASE + ").* (VP (NP )?(ADVP )(PP)) (" + ENTITY_PHRASE + ")$" CMP_VPVPVP = r"^S (" + ENTITY_PHRASE + ").* VP (VP )+(NP )?(PP )(" + ENTITY_PHRASE + ")$" CMP_ADJP = r"^S (" + ENTITY_PHRASE + ").* (VP (NP )?(ADJP )PP) (" + ENTITY_PHRASE + ")$" CMP_PP = r"^S (" + ENTITY_PHRASE + ") (VP (NP )?(PP )(NP )?)(" + ENTITY_PHRASE + ")$" CMP_NP = r"^S (" + ENTITY_PHRASE + ").* (PP )?(NP )PP (" + ENTITY_PHRASE + ")$" CMP_VP = r"^S (" + ENTITY_PHRASE + ").* (VP )(" + ENTITY_PHRASE + ")$" # TODO: Convert CMP_WHNP_VP to use ENTITY_PHRASE_PASSIVE CMP_WHNP_VP = r"^S .* SBAR WHNP S (VP (PP )?)?(" + ENTITY_PHRASE_SIMPLE + ") (VP )(" + ENTITY_PHRASE_SIMPLE + ")" CMP_WHNP_PP = r"^S .*?(" + ENTITY_PHRASE_PASSIVE + ") (VP (NP )?(PP )?NP) (PP )?(" + ENTITY_PHRASE + ")$" CMP_WHNP_ADJP_WHNP = r"^S .*?(" + ENTITY_PHRASE_PASSIVE + ") (VP (NP )?(PP )?NP) (ADJP ADVP (PP )?)(" + ENTITY_PHRASE_PASSIVE + "$)" CMP_ADJP_ADVP = r"^S (" + ENTITY_PHRASE + ").* PP (NP )?(ADJP )?(ADVP )PP (" + ENTITY_PHRASE + ")$" EQUALITY_VP = r"^S .*?(" + ENTITY_PHRASE + ") (VP )?VP (" + ENTITY_PHRASE + ")$" # # Regexes for identifying Entities, Comparisons, and Quantifiers ordered by priority # These should be sorted by order of preference. First match found is used. # # NOTE: 'entities' values are **both** used # 'comparison' values are only used for the first match # 'quantifier' values are only used for the first match # # TODO: Use official English tenses (https://www.ego4u.com/en/cram-up/grammar/tenses) # Include examples of each tense PHRASE_MATCHERS = [ {'name': 'Future (ADVP)', 'reg': CMP_ADVP, 'entities': [1, X + 6], 'comparison': [X + 5], 'quantifier': [X + 4, X + 3]}, {'name': 'Future (VPVPVP)', 'reg': CMP_VPVPVP, 'entities': [1, X + 5], 'comparison': [X + 4], 'quantifier': [X + 3]}, {'name': 'Simple (NP)', 'reg': CMP_NP, 'entities': [1, X + 4], 'comparison': [X + 3], 'quantifier': [X + 3]}, {'name': 'Present (ADJP)', 'reg': CMP_ADJP, 'entities': [1, X + 5], 'comparison': [X + 4], 'quantifier': [X + 3, X + 2]}, {'name': 'Present (PP)', 'reg': CMP_PP, 'entities': [1, X + 6], 'comparison': [X + 4], 'quantifier': [X + 3, X + 5]}, {'name': 'Simple (VP)', 'reg': CMP_VP, 'entities': [1, X + 3], 'comparison': [X + 2], 'quantifier': []}, {'name': 'Past Passive (WHNP_VP)', 'reg': CMP_WHNP_VP, 'entities': [3, Y + 5], 'comparison': [Y + 4], 'quantifier': []}, {'name': 'Past Passive (WHNP_PP)', 'reg': CMP_WHNP_PP, 'entities': [1, Z + 6], 'comparison': [Z + 2], 'quantifier': []}, {'name': 'Past Passive (WHNP_ADJP)', 'reg': CMP_WHNP_ADJP_WHNP, 'entities': [1, Z + 7], 'comparison': [Z + 5], 'quantifier': [Z + 5]}, ] # Get the Types of the parts as a single string so we can check the pattern with a Regex # Should look like 'S NP VP PP NP NP' posParts = [p.type for p in parts] posStr = ' '.join(posParts) if verbose: print "POS: " + posStr print parts # Knowing how many entities are in the sentence helps us make some top-level decisions allWords = [part.words for part in parts] allEntities = parseAllEntities(entitiesByType, flatten(allWords), verbose) # # Special case for really long sentences # isEitherOr = ("either" in sentence) and ("or" in sentence) isDoubleEitherOr = ('Of' in sentence) and (len(allEntities) == 4) isBigNot = (not isEitherOr) and (len(allEntities) > 2) # Statements saying one of X and Y are W and Z # E.g. "Of the Irish Pride and the 28 ft vessel, one is owned by Ernesto Ellis and the other is owned by Betsy Becker." # => is(xor(Irish Pride, 28 ft), xor(Ernesto Ellis, Betsy Becker)) if isDoubleEitherOr: entities = [None, None] eitherParts = [' '.join(allEntities[0:2]), ' '.join(allEntities[2:4])] entities[0] = parseEitherEntities(entitiesByType, eitherParts[0], verbose) entities[1] = parseEitherEntities(entitiesByType, eitherParts[1], verbose) comparison, quantifier = parseComparisons([], None, entitiesByType, verbose) # Get default comparison ("is") return [entities, comparison, quantifier] # Statements saying X is (either Y or Z). This is effectively an XOR # E.g. "The vacation with Dustin is either the 2004 holiday or the hang gliding holiday" # => before(Greg, maroon, 2 minutes) elif isEitherOr: entities = [None, None] eitherParts = sentence.split('either') entities[0] = parseEitherEntities(entitiesByType, eitherParts[0], verbose) entities[1] = parseEitherEntities(entitiesByType, eitherParts[1], verbose) comparison, quantifier = parseComparisons([], None, entitiesByType, verbose) # Get default comparison ("is") return [entities, comparison, quantifier] # Statements like # "The five projects are the study on the Orion, Beulah's study, Henrietta's assignment, # the project beginning in July and the assignment beginning in March." # This means all of the listed entities are mutually exclusive elif isBigNot: entities = allEntities comparison = "not" quantifier = None return [entities, comparison, quantifier] # # Try to generically parse the Entities/Comparisons/Quantifier based on regex results # wordLUT = getWordsByPosIdxLUT(parts) candidates = [] for matcher in PHRASE_MATCHERS: match = re.match(matcher['reg'], posStr) if bool(match): try: results = parseViaRegex(match, entitiesByType, wordLUT, matcher['entities'], matcher['comparison'], matcher['quantifier']) candidates = addResultCandidate(candidates, results) except: if verbose: print 'No valid matches found despite regex match for ' + matcher['name'] return candidates[0] if len(candidates) > 0 else [None, None, None] # ************************************************************************************ def addResultCandidate(candidates, results): candidates.append(results) return sorted(candidates, cmp=compareResults) # ************************************************************************************ # -1 => result1 is a better match # +1 => result2 is a better match # 0 => equally-good matches def compareResults(result1, result2): # A) Prefer comparisons of equalities if ('is' in result1 and 'is' not in result2): return +1 if ('is' not in result1 and 'is' in result2): return -1 size1 = len([x for x in result1 if x]) size2 = len([x for x in result2 if x]) # B) Prefer more detailed results if size1 > size2: return -1 if size1 < size2: return +1 else: return 0 # ************************************************************************************ def parseViaRegex(match, entitiesByType, wordLUT, entitiesIdx, comparisonsIdx, quantifiersIdx): entities = [getWordsByPosIdx(wordLUT, match.regs[idx]) for idx in entitiesIdx] # Named Entity Recognition: Filter down to known entities entities = [' '.join(parseAllEntities(entitiesByType, entity, verbose)) for entity in entities] entities = [entity for entity in entities if entity] assert (len(entities) == len(entitiesIdx)), 'Unable to find expected number of entities' comparisons = [getWordsByPosIdx(wordLUT, match.regs[idx]) for idx in comparisonsIdx] comparisons = [comp for comp in comparisons if comp][0] quantifiers = [getWordsByPosIdx(wordLUT, match.regs[idx]) for idx in quantifiersIdx] quantifiers = [mod for mod in quantifiers if mod] if quantifiers: # Naively remove the comparison from the quantifier. This is NOT robust. quantifier = [word for word in quantifiers[0] if word not in comparisons] # Remove abstract comparisons (e.g. 'somewhat', 'sometime', etc...) quantifier = [word for word in quantifier if 'some' not in word] else: quantifier = None [comparison, quantifier] = parseComparisons(comparisons, quantifier, entitiesByType, verbose) return [entities, comparison, quantifier] # ************************************************************************************ def flatten(list): return [val for sublist in list for val in sublist] # ************************************************************************************ def parseComparisons(comparisons, quantifier, entitiesByType, verbose=False): KNOWN_COMPARATORS = set([ 'after', 'before', 'more', 'less', 'more', 'fewer', 'larger', 'smaller', 'taller', 'shorter', 'higher', 'lower', 'older', 'younger', 'ahead', 'behind', 'farther', 'closer', 'further', 'nearer', 'longer', 'shorter' ]) matches = list(KNOWN_COMPARATORS & set(comparisons)) if len(matches) > 0: # Default the quantifier in case it ended up within the comparison instead of the quantifier quantifier = quantifier if quantifier else [word for word in comparisons if word not in matches] if quantifier: types = [type.lower() for type in entitiesByType] value, type = [None, None] # Try to find a value and a type for item in quantifier: try: int(item) value = item if not value else value except: candidates = [t for t in types if item.strip().lower() in t.lower()] type = item if candidates else type # Prefer our strictly parsed value/type if we found any quantifier = [value, type] if value and type else ([value] if value else []) return [matches[0], quantifier] # Unknown comparison. Defaulting to "is", meaning it's an equality, not comparison return ["is", None] # ************************************************************************************ def parseEitherEntities(entitiesByType, words, verbose=False): entities = parseAllEntities(entitiesByType, words, verbose) if len(entities) == 1: return entities[0] elif len(entities) > 1: return "xor(" + ', '.join(entities) + ")" return None # ************************************************************************************ class LinkageError(Exception): pass # ************************************************************************************ class ParseError(Exception): pass # ************************************************************************************ def promptColors(): colors = {} colors['RED'] = "\033[0;31m" colors['LIGHT_RED'] = "\033[1;31m" colors['YELLOW'] = "\033[1;33m" colors['GREEN'] = "\033[0;32m" colors['LIGHT_GREEN'] = "\033[1;32m" colors['BLUE'] = "\033[1;94m" colors['LIGHT_BLUE'] = "\033[1;36m" colors['PURPLE'] = "\033[1;34m" colors['WHITE'] = "\033[1;37m" colors['LIGHT_GRAY'] = "\033[0;37m" colors['COLOR_NONE'] = "\033[0m" return colors # ************************************************************************************ # see: http://stackoverflow.com/a/800201/1624707 def get_immediate_subdirectories(a_dir): return [(a_dir + '/' + name) for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name))] # ************************************************************************************ if __name__ == "__main__": if len(sys.argv) <= 1: argparser.print_help() sys.exit() args = argparser.parse_args() verbose = args.verbose quiet = args.quiet inputDirs = args.input nestedDirs = args.directory total, success, fail = [0, 0, 0] if nestedDirs: inputDirs = get_immediate_subdirectories(inputDirs[0]) inputDirs.sort() print "NESTED DIRS:" print inputDirs for inputDir in inputDirs: total_i, success_i, fail_i = main(inputDir, verbose, quiet) total += total_i success += success_i fail += fail_i print "" print "" if (success / float(total)) < 0.70: print promptColors()['RED'] + '## FAILURE' elif (success / float(total)) < 0.90: print promptColors()['YELLOW'] + '## DECENT' else: print promptColors()['GREEN'] + '## SUCCESS' print promptColors()['WHITE'] + str(100 * success / total) + "% success - " \ + str(success) + " of " + str(total) + " total statements" print ""
ross-nordstrom/LogicSolver
parser/lib/parse.py
Python
mit
26,869
listaidademenorde18 = [] listaidademaiorde18 = [] while True: try: idade = int(input('Digite a idade da pessoa: ')) break except ValueError: pass if idade < 18: listaidademenorde18.append(idade) else: listaidademaiorde18.append(idade) print(listaidademenorde18) print(listaidademaiorde18) #https://pt.stackoverflow.com/q/456654/101
bigown/SOpt
Python/Algorithm/ExceptElse.py
Python
mit
370
# -*- coding:utf-8 -*- import time from spiders.__Global_function import get_localtime REPORT_SAVEDIR = "/var/lib/spider_save/reports" LOGGING_SAVEDIR = "/var/lib/spider_save/logging" # now_time = get_localtime(time.strftime("%Y-%m-%d", time.localtime())) now_time = 20150101 end_time = 20991212
AnselCmy/ARPS
report_crawler/report_crawler/spiders/__Global_variable.py
Python
mit
299
from django import forms from django.contrib.auth import get_user_model from django.contrib.auth.forms import ( ReadOnlyPasswordHashField, PasswordResetForm as DjangoPasswordResetForm) from django.contrib.auth.tokens import default_token_generator from django.contrib.sites.models import get_current_site from django.utils.http import int_to_base36 from django.utils.translation import ugettext_lazy as _ from funky_user import conf User = get_user_model() class UserChangeForm(forms.ModelForm): """ User change form for the Django Admin. Overrides the password field to explain it's content. """ password = ReadOnlyPasswordHashField(label=_("Password"), help_text=_("Raw passwords are not stored, so there is no way to see " "this user's password, but you can change the password " "using <a href=\"password/\">this form</a>.")) class Meta: model = User class SignupForm(forms.ModelForm): """ Signup form for new users. It displays email and password fields by default, and adds the fields defined in `REQUIRED_FIELDS` in the current user model. """ password = forms.CharField(label=_('Password'), widget=forms.PasswordInput) class Meta: model = User fields = [ 'email', 'password', ] + User.REQUIRED_FIELDS def clean_email(self): """ Validate that the email is not already in use. """ try: User.objects.get(email__iexact=self.cleaned_data['email']) except User.DoesNotExist: return self.cleaned_data['email'] raise forms.ValidationError(_('That email is already registered.')) class PasswordResetForm(DjangoPasswordResetForm): """ Overrides the save method on Django's PasswordResetForm to enable HTML email. """ def save(self, domain_override=None, email_template_prefix='auth/emails/password_reset', use_https=False, token_generator=default_token_generator, from_email=None, request=None, **kwargs): """ Generates a one-use only link for resetting password and sends to the user. """ for user in self.users_cache: if not domain_override: current_site = get_current_site(request) site_name = current_site.name domain = current_site.domain else: site_name = domain = domain_override c = { 'email': user.email, 'domain': domain, 'site_name': site_name, 'uid': int_to_base36(user.pk), 'user': user, 'token': token_generator.make_token(user), 'protocol': use_https and 'https' or 'http', 'BASE_TEMPLATE_HTML_EMAIL': conf.BASE_TEMPLATE_HTML_EMAIL, 'BASE_TEMPLATE_TEXT_EMAIL': conf.BASE_TEMPLATE_TEXT_EMAIL, } # Send HTML mail with Django Groove #from groove.email.html import send_html_email #send_html_email(user.email, email_template_prefix, c, from_email)
funkbit/django-funky-user
funky_user/forms.py
Python
bsd-2-clause
3,202
#! /usr/bin/env python # Copyright 2007 Andreas Berger # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import string from dvbobjects.MPEG.Section import Section from dvbobjects.utils import * from dvbobjects.DVB.Descriptors import * ###################################################################### class ip_mac_notification_section(Section): table_id = 0x4c section_max_size = 4096 def pack_section_body(self): self.action_type = 0x01 self.platform_id_hash = ( (self.platform_id>>16) & 0xff ) ^ ( (self.platform_id>>8) & 0xff ) ^ ( self.platform_id & 0xff ) # pack platform descriptor loop pdl_bytes = string.join( map(lambda x: x.pack(), self.platform_descriptor_loop), "") # pack associaton_loop al_bytes = string.join( map(lambda x: x.pack(), self.association_loop), "") pdl_bytes_length = len(pdl_bytes) self.table_id_extension = self.action_type << 8 | self.platform_id_hash fmt = "!BBBBBB%ds%ds" % (len(pdl_bytes), len(al_bytes)) return pack(fmt, (self.platform_id >> 16) & 0xFF, (self.platform_id >> 8) & 0xFF, self.platform_id & 0xFF, self.processing_order, 0xF0 << 8 | (pdl_bytes_length >> 8) & 0x0F, pdl_bytes_length & 0xFF, pdl_bytes, al_bytes ) class association_loop_item(DVBobject): def pack(self): # pack target descriptor loop tdl_bytes = string.join( map(lambda x: x.pack(), self.target_descriptor_loop), "") # pack operational descriptor loop odl_bytes = string.join( map(lambda x: x.pack(), self.operational_descriptor_loop), "") tdl_bytes_length = len(tdl_bytes) odl_bytes_length = len(odl_bytes) fmt = "!BB%dsBB%ds" % (tdl_bytes_length, odl_bytes_length) return pack(fmt, 0xF0 << 8 | (tdl_bytes_length >> 8) & 0x0F, tdl_bytes_length & 0xFF, tdl_bytes, 0xF0 << 8 | (odl_bytes_length >> 8) & 0x0F, odl_bytes_length & 0xFF, odl_bytes )
0xalen/opencaster_isdb-tb
libs/dvbobjects/dvbobjects/PSI/INT.py
Python
gpl-2.0
2,848
# Programar em Python #27 - Ficheiros (Ler e Escrever) escreverFicheiro = open('27-Ficheiros-Ler-e-Escrever.txt', 'w'); escreverFicheiro.write('Eu estou a escrever na primeira linha deste ficheiro!\n'); escreverFicheiro.write('Agora estou a escrever na segunda linha!'); escreverFicheiro.close(); lerFicheiro = open('27-Ficheiros-Ler-e-Escrever.txt', 'r'); print(lerFicheiro.read()); lerFicheiro.close();
caffeinealgorithm/youtube-videos-source-code
Programar em Python/27-Ficheiros-Ler-e-Escrever.py
Python
mit
406
#!/usr/bin/env python3 from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup d = generate_distutils_setup( packages=['lg_builder'], package_dir={'': 'src'}, scripts=['scripts/lg-ros-build'], install_requires=['catkin_pkg', 'python-debian', 'rospkg'] ) setup(**d)
EndPointCorp/lg_ros_nodes
lg_builder/setup.py
Python
apache-2.0
322
"""codebloga URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.9/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from datetime import datetime from django.conf.urls import url, include from blog.forms import BootstrapAuthenticationForm from django.contrib.auth.views import login, logout from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from blog.views import HomeView, register, register_success, post_list urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^$', HomeView.as_view(), name='home'), # url(r'^$', post_list, name='home'), url(r'^blog/', include('blog.urls', namespace="blog")), url(r'^register', register, name='register'), url(r'^register_success$', register_success, name='register_success'), url(r'^login/$', login, { 'template_name': 'blog/login.html', 'authentication_form': BootstrapAuthenticationForm, 'extra_context': { 'title': 'Log in', 'year': datetime.now().year, } }, name='login'), url(r'^logout$', logout, { 'next_page': '/', }, name='logout'), ] if settings.DEBUG: urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
sartim/codebloga
codebloga/urls.py
Python
gpl-3.0
1,956
"""Pretraining on TPUs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl import app from absl import flags import absl.logging as _logging # pylint: disable=unused-import import numpy as np import tensorflow as tf import model_utils import tpu_estimator import function_builder import data_utils # TPU parameters flags.DEFINE_string("master", default=None, help="master") flags.DEFINE_string("tpu", default=None, help="The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.") flags.DEFINE_string("gcp_project", default=None, help="Project name for the Cloud TPU-enabled project. If not specified, " "we will attempt to automatically detect the GCE project from metadata.") flags.DEFINE_string("tpu_zone",default=None, help="GCE zone where the Cloud TPU is located in. If not specified, we " "will attempt to automatically detect the GCE project from metadata.") flags.DEFINE_bool("use_tpu", default=True, help="Use TPUs rather than plain CPUs.") flags.DEFINE_integer("num_hosts", default=1, help="number of TPU hosts") flags.DEFINE_integer("num_core_per_host", default=8, help="number of cores per host") flags.DEFINE_bool("track_mean", default=False, help="Whether to track mean loss.") # Experiment (data/checkpoint/directory) config flags.DEFINE_integer("num_passes", default=1, help="Number of passed used for training.") flags.DEFINE_string("record_info_dir", default=None, help="Path to local directory containing `record_info-lm.json`.") flags.DEFINE_string("model_dir", default=None, help="Estimator model_dir.") flags.DEFINE_string("init_checkpoint", default=None, help="Checkpoint path for initializing the model.") # Optimization config flags.DEFINE_float("learning_rate", default=1e-4, help="Maximum learning rate.") flags.DEFINE_float("clip", default=1.0, help="Gradient clipping value.") # lr decay flags.DEFINE_float("min_lr_ratio", default=0.001, help="Minimum ratio learning rate.") flags.DEFINE_integer("warmup_steps", default=0, help="Number of steps for linear lr warmup.") flags.DEFINE_float("adam_epsilon", default=1e-8, help="Adam epsilon.") flags.DEFINE_string("decay_method", default="poly", help="Poly or cos.") flags.DEFINE_float("weight_decay", default=0.0, help="Weight decay rate.") # Training config flags.DEFINE_integer("train_batch_size", default=16, help="Size of the train batch across all hosts.") flags.DEFINE_integer("train_steps", default=100000, help="Total number of training steps.") flags.DEFINE_integer("iterations", default=1000, help="Number of iterations per repeat loop.") flags.DEFINE_integer("save_steps", default=None, help="Number of steps for model checkpointing. " "None for not saving checkpoints") flags.DEFINE_integer("max_save", default=100000, help="Maximum number of checkpoints to save.") # Data config flags.DEFINE_integer("seq_len", default=0, help="Sequence length for pretraining.") flags.DEFINE_integer("reuse_len", default=0, help="How many tokens to be reused in the next batch. " "Could be half of `seq_len`.") flags.DEFINE_bool("uncased", False, help="Use uncased inputs or not.") flags.DEFINE_integer("perm_size", 0, help="Window size of permutation.") flags.DEFINE_bool("bi_data", default=True, help="Use bidirectional data streams, i.e., forward & backward.") flags.DEFINE_integer("mask_alpha", default=6, help="How many tokens to form a group.") flags.DEFINE_integer("mask_beta", default=1, help="How many tokens to mask within each group.") flags.DEFINE_integer("num_predict", default=None, help="Number of tokens to predict in partial prediction.") flags.DEFINE_integer("n_token", 32000, help="Vocab size") # Model config flags.DEFINE_integer("mem_len", default=0, help="Number of steps to cache") flags.DEFINE_bool("same_length", default=False, help="Same length attention") flags.DEFINE_integer("clamp_len", default=-1, help="Clamp length") flags.DEFINE_integer("n_layer", default=6, help="Number of layers.") flags.DEFINE_integer("d_model", default=32, help="Dimension of the model.") flags.DEFINE_integer("d_embed", default=32, help="Dimension of the embeddings.") flags.DEFINE_integer("n_head", default=4, help="Number of attention heads.") flags.DEFINE_integer("d_head", default=8, help="Dimension of each attention head.") flags.DEFINE_integer("d_inner", default=32, help="Dimension of inner hidden size in positionwise feed-forward.") flags.DEFINE_float("dropout", default=0.0, help="Dropout rate.") flags.DEFINE_float("dropatt", default=0.0, help="Attention dropout rate.") flags.DEFINE_bool("untie_r", default=False, help="Untie r_w_bias and r_r_bias") flags.DEFINE_string("summary_type", default="last", help="Method used to summarize a sequence into a compact vector.") flags.DEFINE_string("ff_activation", default="relu", help="Activation type used in position-wise feed-forward.") flags.DEFINE_bool("use_bfloat16", False, help="Whether to use bfloat16.") # Parameter initialization flags.DEFINE_enum("init", default="normal", enum_values=["normal", "uniform"], help="Initialization method.") flags.DEFINE_float("init_std", default=0.02, help="Initialization std when init is normal.") flags.DEFINE_float("init_range", default=0.1, help="Initialization std when init is uniform.") FLAGS = flags.FLAGS def get_model_fn(): """doc.""" def model_fn(features, labels, mode, params): """doc.""" #### Training or Evaluation is_training = (mode == tf.estimator.ModeKeys.TRAIN) assert is_training #### Retrieve `mems` from `params["cache"]` mems = {} idx = 0 if FLAGS.mem_len > 0: mems["mems"] = params["cache"] #### Get loss from inputs total_loss, new_mems, monitor_dict = function_builder.get_loss( FLAGS, features, labels, mems, is_training) #### Turn `new_mems` into `new_cache` new_cache = [] if FLAGS.mem_len > 0: new_cache += new_mems["mems"] #### Check model parameters num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()]) tf.logging.info("#params: {}".format(num_params)) #### Configuring the optimizer train_op, learning_rate, gnorm = model_utils.get_train_op( FLAGS, total_loss) monitor_dict["lr"] = learning_rate monitor_dict["gnorm"] = gnorm #### Customized initial checkpoint scaffold_fn = model_utils.init_from_checkpoint(FLAGS, global_vars=True) #### Creating host calls host_call = function_builder.construct_scalar_host_call( monitor_dict=monitor_dict, model_dir=FLAGS.model_dir, prefix="train/", reduce_fn=tf.reduce_mean) #### Constucting training TPUEstimatorSpec with new cache. train_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, host_call=host_call, scaffold_fn=scaffold_fn) train_spec.cache = new_cache return train_spec return model_fn def get_cache_fn(mem_len): """doc.""" tf_float = tf.bfloat16 if FLAGS.use_bfloat16 else tf.float32 def cache_fn(batch_size): mems = [] if FLAGS.mem_len > 0: for _ in range(FLAGS.n_layer): zeros = tf.zeros( [mem_len, batch_size, FLAGS.d_model], dtype=tf_float) mems.append(zeros) return mems if mem_len > 0: return cache_fn else: return None def get_input_fn(split): """doc.""" assert split == "train" batch_size = FLAGS.train_batch_size input_fn, record_info_dict = data_utils.get_input_fn( tfrecord_dir=FLAGS.record_info_dir, split=split, bsz_per_host=batch_size // FLAGS.num_hosts, seq_len=FLAGS.seq_len, reuse_len=FLAGS.reuse_len, bi_data=FLAGS.bi_data, num_hosts=FLAGS.num_hosts, num_core_per_host=FLAGS.num_core_per_host, perm_size=FLAGS.perm_size, mask_alpha=FLAGS.mask_alpha, mask_beta=FLAGS.mask_beta, uncased=FLAGS.uncased, num_passes=FLAGS.num_passes, use_bfloat16=FLAGS.use_bfloat16, num_predict=FLAGS.num_predict) return input_fn, record_info_dict def main(unused_argv): del unused_argv # Unused tf.logging.set_verbosity(tf.logging.INFO) assert FLAGS.seq_len > 0 assert FLAGS.perm_size > 0 FLAGS.n_token = data_utils.VOCAB_SIZE tf.logging.info("n_token {}".format(FLAGS.n_token)) if not tf.gfile.Exists(FLAGS.model_dir): tf.gfile.MakeDirs(FLAGS.model_dir) # Get train input function train_input_fn, train_record_info_dict = get_input_fn("train") tf.logging.info("num of batches {}".format( train_record_info_dict["num_batch"])) # Get train cache function train_cache_fn = get_cache_fn(FLAGS.mem_len) ##### Get model function model_fn = get_model_fn() ##### Create TPUEstimator # TPU Configuration run_config = model_utils.configure_tpu(FLAGS) # TPU Estimator estimator = tpu_estimator.TPUEstimator( model_fn=model_fn, train_cache_fn=train_cache_fn, use_tpu=FLAGS.use_tpu, config=run_config, params={"track_mean": FLAGS.track_mean}, train_batch_size=FLAGS.train_batch_size, eval_on_tpu=FLAGS.use_tpu) #### Training estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_steps) if __name__ == "__main__": app.run(main)
zihangdai/xlnet
train.py
Python
apache-2.0
9,662
#! /usr/bin/env python """ plock[pbtest]% python ~/mwa/bin/make_beam.py -f P00_w.fits -v # INFO:make_beam: Computing for 2011-09-27 14:05:06+00:00 # INFO:make_beam: Created primary beam for 154.24 MHz and delays=0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 # INFO:make_beam: XX beam written to P00_w_beamXX.fits # INFO:make_beam: YY beam written to P00_w_beamYY.fits """ import sys,os,logging,shutil,datetime,re,subprocess,math,tempfile,string,glob from optparse import OptionParser,OptionGroup import numpy,math,os from mwapy.pb import primary_beam,make_beam import mwapy try: import astropy.io.fits as pyfits except ImportError: import pyfits # configure the logging logging.basicConfig(format='# %(levelname)s:%(name)s: %(message)s') logger=logging.getLogger('make_beam') logger.setLevel(logging.WARNING) ###################################################################### def main(): usage="Usage: %prog [options]\n" usage+='\tMakes primary beams associated with a FITS image\n' parser = OptionParser(usage=usage,version=mwapy.__version__) parser.add_option('-f','--filename',dest="filename",default=None, help="Create primary beam for <FILE>",metavar="FILE") parser.add_option('-e','--ext',dest='ext',type=str,default='0', help='FITS extension name or number [default=%default]') parser.add_option('-m','--metafits',dest='metafits',default=None, help="FITS file to get delays from (can be metafits)") parser.add_option('-d','--delays',dest="delays",default=None, help="Beamformer delays to use; 16 comma-separated values") parser.add_option('--analytic',action="store_true",dest="analytic_model",default=False, help="Use the old analytic dipole model, instead of the default Sutinjo 2014 model.") parser.add_option('--jones',dest='jones',default=False, action='store_true', help="Compute Jones matrix instead of power beam? [default=False]") parser.add_option('--noprecess',action='store_false', dest='precess',default=True, help='Do not precess coordinates to current epoch (faster but less accurate) [default=False]') parser.add_option('--height',dest='height',default=primary_beam._DIPOLE_HEIGHT, type=float, help='Dipole height (m) (only an option for analytic beam model) [default=%default]') parser.add_option('--sep',dest='separation',default=primary_beam._DIPOLE_SEPARATION, type=float, help='Dipole separation (m) (only an option for analytic beam model) [default=%default]') parser.add_option('-v','--verbose',action="store_true",dest="verbose",default=False, help="Increase verbosity of output") (options, args) = parser.parse_args() if (options.verbose): logger.setLevel(logging.INFO) try: extnum=int(options.ext) ext=extnum except: ext=options.ext pass if options.delays is not None: try: options.delays=[int(x) for x in options.delays.split(',')] except Exception,e: logger.error('Unable to parse beamformer delays %s: %s' % (options.delays,e)) sys.exit(1) if options.metafits is not None: try: f=pyfits.open(options.metafits) except Exception,e: logger.error('Unable to open FITS file %s: %s' % (options.metafits,e)) sys.exit(1) if not 'DELAYS' in f[0].header.keys(): logger.error('Cannot find DELAYS in %s' % options.metafits) sys.exit(1) options.delays=f[0].header['DELAYS'] try: options.delays=[int(x) for x in options.delays.split(',')] except Exception,e: logger.error('Unable to parse beamformer delays %s: %s' % (options.delays,e)) sys.exit(1) if options.filename is None: logger.error('Must supply a filename') sys.exit(1) out=make_beam.make_beam(options.filename, ext=ext, delays=options.delays, analytic_model=options.analytic_model, jones=options.jones, precess=options.precess, dipheight=options.height, dip_sep=options.separation) if out is None: logger.error('Problem creating primary beams') sys.exit(1) sys.exit(0) ###################################################################### if __name__=="__main__": main()
ryandougherty/mwa-capstone
MWA_Tools/scripts/make_beam.py
Python
gpl-2.0
4,701
from django.utils.translation import ugettext_lazy as _ from livesettings import * SHIP_MODULES = config_get('SHIPPING', 'MODULES') # No need to add the choice, since it is in by default # SHIP_MODULES.add_choice(('shipping.modules.per', _('Per piece'))) SHIPPING_GROUP = config_get_group('SHIPPING') config_register_list( DecimalValue(SHIPPING_GROUP, 'PER_RATE', description=_("Per item price"), requires=SHIP_MODULES, requiresvalue='shipping.modules.per', default="4.00"), StringValue(SHIPPING_GROUP, 'PER_SERVICE', description=_("Per Item Shipping Service"), help_text=_("Shipping service used with per item shipping"), requires=SHIP_MODULES, requiresvalue='shipping.modules.per', default=u"U.S. Mail"), StringValue(SHIPPING_GROUP, 'PER_DAYS', description=_("Per Item Delivery Days"), requires=SHIP_MODULES, requiresvalue='shipping.modules.per', default="3 - 4 business days") )
mitchellzen/pops
satchmo/apps/shipping/modules/per/config.py
Python
bsd-3-clause
1,029
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' validation.py |github| ---------------------- Validates the code against :py:obj:`batman` for transit light curves and against a direct (brute force) integration of the radiance map for planet-planet occultations. .. plot:: :align: center from scripts import validation validation._test() .. role:: raw-html(raw) :format: html .. |github| replace:: :raw-html:`<a href = "https://github.com/rodluger/planetplanet/blob/master/scripts/validation.py"><i class="fa fa-github" aria-hidden="true"></i></a>` ''' from __future__ import division, print_function, absolute_import, \ unicode_literals from planetplanet.constants import * from planetplanet import Star, Planet, System import matplotlib.pyplot as pl import numpy as np try: import batman except: batman = None from tqdm import tqdm def _test(): ''' ''' ValidateTransits() ValidateOccultations() pl.show() def ZenithAngle(x, y, r, theta): ''' Compute the zenith angle. ''' # Normalize x = x / r y = y / r x2 = x * x y2 = y * y # This is a solution to a quadratic equation in z = sin(za) ** 2 z = 0.5 * ((1 - 2 * x2 - y2) * np.cos(2 * theta) + 2 * x * \ np.sqrt(1 - x2 - y2) * np.sin(2 * theta) + y2 + 1) # Where are we relative to the terminator? xterm = np.sin(theta) * np.sqrt(np.abs(1 - y2)) # Solve for the zenith angle if np.abs(theta) <= np.pi / 2: if (x <= xterm): return np.arcsin(np.sqrt(z)) else: return np.pi - np.arcsin(np.sqrt(z)) else: if (x >= -xterm): return np.arcsin(np.sqrt(z)) else: return np.pi - np.arcsin(np.sqrt(z)) def Radiance(z, irrad, lam = 15, albedo = 0.3, tnight = 40): ''' Compute the radiance at a point on the planet's surface at a given wavelength. ''' # Cosine law if (z < np.pi / 2): temp = ((irrad * np.cos(z) * (1 - albedo)) / SBOLTZ) ** 0.25 if (temp < tnight): temp = tnight else: temp = tnight # Planck's law lam /= 1e6 a = 2 * HPLANCK * CLIGHT * CLIGHT / (lam * lam * lam * lam * lam) b = HPLANCK * CLIGHT / (lam * KBOLTZ * temp) return a / (np.exp(b) - 1) def ValidateTransits(): ''' ''' if batman is None: print("Please install the `batman` package.") return # System params time = np.arange(-0.12, 0.12, 0.001) mstar = 1. rstar = 1. limbdark = [0.4, 0.26] per = 5. inc = 90. r = 10. t0 = 0. w = 60. ecc = 0.3 # planetplanet star = Star('A', m = mstar, r = rstar, nz = 99, limbdark = limbdark) b = Planet('b', m = 0., per = per, inc = inc, r = r, t0 = t0, nz = 1, Omega = 0., w = w, ecc = ecc, phasecurve = False) system = System(star, b, batmanopt = False, circleopt = False) system.compute(time) flux_pp = system.A.flux[:,0] flux_pp /= flux_pp[0] # batman params = batman.TransitParams() params.t0 = t0 params.per = per params.inc = inc params.ecc = ecc params.w = w - 180. params.limb_dark = "quadratic" params.u = limbdark params.rp = r / (rstar * RSUNREARTH) params.a = ((b.per) ** 2 * GEARTH * (mstar * MSUNMEARTH) / (4 * np.pi ** 2)) ** (1. / 3.) / (rstar * RSUNREARTH) m = batman.TransitModel(params, time) flux_bm = m.light_curve(params) # Ensure the two are equal to within 0.1 ppm assert np.max(np.abs((flux_pp - flux_bm) * 1e6)) < 0.1, \ "Flux mismatch between `planetplanet` and `batman`." # Plot the comparison fig, ax = pl.subplots(2, sharex = True) ax[0].plot(time, flux_pp, color = 'b', label = 'planetplanet (pp)') ax[0].plot(time, flux_bm, color = 'g', ls = '--', label = 'batman (bm)') ax[1].plot(time, (flux_pp - flux_bm) * 1e6, color = 'k') ax[0].legend(loc = 9) ax[0].set_ylabel('Flux', fontweight = 'bold') ax[1].set_ylabel('pp - bm [ppm]', fontweight = 'bold') ax[1].set_xlabel('Time [days]', fontweight = 'bold') return fig, ax def ValidateOccultations(): ''' ''' # Instantiate the star mstar = 0.0802 rstar = 0.121 teff = (0.000524 * LSUN / (4 * np.pi * (rstar * RSUN) ** 2 * SBOLTZ)) ** 0.25 star = Star('A', m = mstar, r = rstar, teff = teff, color = 'k') # Instantiate `c` RpRs = np.sqrt(0.687 / 100) r = RpRs * rstar * RSUN / REARTH c = Planet('c', m = 1.38, per = 2.4218233, inc = 89.67 - 0.05, r = r, t0 = 0, Omega = 0, w = 0, ecc = 0, color = 'coral', tnight = 40., albedo = 0.3, phasecurve = False, nz = 31) # Instantiate `d` RpRs = np.sqrt(0.367 / 100) r = RpRs * rstar * RSUN / REARTH d = Planet('d', m = 0.41, per = 4.049610, inc = 89.75 + 0.16, r = r, t0 = 0, Omega = 0, w = 0, ecc = 0, color = 'firebrick', tnight = 40., albedo = 0.3, phasecurve = False) # Instantiate the system system = System(star, c, d, distance = 12, oversample = 1, nbody = False) # There's a triple occultation of `c` at this time time = np.arange(-259.684 + 2 * 0.00025, -259.665, 0.01 * MINUTE) # Compute the light curve using planetplanet system = System(star, c, d) system.compute(time, lambda2 = 15) flux_pp = np.array(c.flux[:,-1]) / c.total_flux[-1] # Rescale the time array time = (time - np.nanmedian(time)) / MINUTE # Now compute the light curve by brute force direct integration flux_bf = np.zeros_like(time) for t in tqdm(range(len(time))): # Grid up the planet xarr = c.x[t] + np.linspace(-c.r, c.r, 30) yarr = c.y[t] + np.linspace(-c.r, c.r, 30) rad = np.zeros((len(xarr), len(yarr))) for i, x in enumerate(xarr): for j, y in enumerate(yarr): # Are we outside the planet? if (x - c.x[t]) ** 2 + (y - c.y[t]) ** 2 >= c.r ** 2: continue # Are we outside the occultor? if (x - d.x[t]) ** 2 + (y - d.y[t]) ** 2 >= d.r ** 2: continue # Get the orbital phase theta = np.arctan2(c.z[t], c.x[t]) # Zenith angle z = ZenithAngle(x - c.x[t], y - c.y[t], c.r, theta) # Get the irradiance on the planet d2 = c.x[t] ** 2 + c.y[t] ** 2 + c.z[t] ** 2 irrad = star._r ** 2 * SBOLTZ * star.teff ** 4 / d2 # Get the planet's radiance rad[i,j] = Radiance(z, irrad) flux_bf[t] = -np.sum(rad) # Normalize it dbf = -np.min(flux_bf) dpp = 1 - np.min(flux_pp) flux_bf = flux_bf * dpp / dbf + 1 # Ensure the two are equal to within one percent assert np.max(np.abs((flux_pp - flux_bf))) < 0.01, \ "Flux mismatch between `planetplanet` and brute force integration." # Plot the light curve fig = pl.figure() pl.plot(time, flux_pp, '-', label = 'planetplanet') pl.plot(time, flux_bf, '-', label = 'brute force') pl.ylabel('Flux', fontweight = 'bold') pl.xlabel('Time [days]', fontweight = 'bold') pl.legend() return fig, pl.gca() if __name__ == '__main__': ValidateTransits() ValidateOccultations() pl.show()
rodluger/planetplanet
scripts/validation.py
Python
gpl-3.0
7,672
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Command-line interface""" import argparse import os import textwrap from argparse import RawTextHelpFormatter from typing import Callable from tabulate import tabulate_formats from airflow import api, settings from airflow.configuration import conf from airflow.utils.cli import alternative_conn_specs from airflow.utils.module_loading import import_string from airflow.utils.timezone import parse as parsedate api.load_auth() DAGS_FOLDER = settings.DAGS_FOLDER if "BUILDING_AIRFLOW_DOCS" in os.environ: DAGS_FOLDER = '[AIRFLOW_HOME]/dags' def lazy_load_command(import_path: str) -> Callable: """Create a lazy loader for command""" _, _, name = import_path.rpartition('.') def command(*args, **kwargs): func = import_string(import_path) return func(*args, **kwargs) command.__name__ = name # type: ignore return command class Arg: """Class to keep information about command line argument""" # pylint: disable=redefined-builtin def __init__(self, flags=None, help=None, action=None, default=None, nargs=None, type=None, choices=None, required=None, metavar=None): self.flags = flags self.help = help self.action = action self.default = default self.nargs = nargs self.type = type self.choices = choices self.required = required self.metavar = metavar # pylint: enable=redefined-builtin class CLIFactory: """ Factory class which generates command line argument parser and holds information about all available Airflow commands """ args = { # Shared 'dag_id': Arg(("dag_id",), "The id of the dag"), 'task_id': Arg(("task_id",), "The id of the task"), 'execution_date': Arg( ("execution_date",), help="The execution date of the DAG", type=parsedate), 'task_regex': Arg( ("-t", "--task_regex"), "The regex to filter specific task_ids to backfill (optional)"), 'subdir': Arg( ("-sd", "--subdir"), "File location or directory from which to look for the dag. " "Defaults to '[AIRFLOW_HOME]/dags' where [AIRFLOW_HOME] is the " "value you set for 'AIRFLOW_HOME' config you set in 'airflow.cfg' ", default=DAGS_FOLDER), 'start_date': Arg( ("-s", "--start_date"), "Override start_date YYYY-MM-DD", type=parsedate), 'end_date': Arg( ("-e", "--end_date"), "Override end_date YYYY-MM-DD", type=parsedate), 'dry_run': Arg( ("-dr", "--dry_run"), "Perform a dry run", "store_true"), 'pid': Arg( ("--pid",), "PID file location", nargs='?'), 'daemon': Arg( ("-D", "--daemon"), "Daemonize instead of running " "in the foreground", "store_true"), 'stderr': Arg( ("--stderr",), "Redirect stderr to this file"), 'stdout': Arg( ("--stdout",), "Redirect stdout to this file"), 'log_file': Arg( ("-l", "--log-file"), "Location of the log file"), 'yes': Arg( ("-y", "--yes"), "Do not prompt to confirm reset. Use with care!", "store_true", default=False), 'output': Arg( ("--output",), ( "Output table format. The specified value is passed to " "the tabulate module (https://pypi.org/project/tabulate/). " "Valid values are: ({})".format("|".join(tabulate_formats)) ), choices=tabulate_formats, default="fancy_grid"), # list_dag_runs 'no_backfill': Arg( ("--no_backfill",), "filter all the backfill dagruns given the dag id", "store_true"), 'state': Arg( ("--state",), "Only list the dag runs corresponding to the state" ), # list_jobs 'limit': Arg( ("--limit",), "Return a limited number of records" ), # backfill 'mark_success': Arg( ("-m", "--mark_success"), "Mark jobs as succeeded without running them", "store_true"), 'verbose': Arg( ("-v", "--verbose"), "Make logging output more verbose", "store_true"), 'local': Arg( ("-l", "--local"), "Run the task using the LocalExecutor", "store_true"), 'donot_pickle': Arg( ("-x", "--donot_pickle"), ( "Do not attempt to pickle the DAG object to send over " "to the workers, just tell the workers to run their version " "of the code."), "store_true"), 'bf_ignore_dependencies': Arg( ("-i", "--ignore_dependencies"), ( "Skip upstream tasks, run only the tasks " "matching the regexp. Only works in conjunction " "with task_regex"), "store_true"), 'bf_ignore_first_depends_on_past': Arg( ("-I", "--ignore_first_depends_on_past"), ( "Ignores depends_on_past dependencies for the first " "set of tasks only (subsequent executions in the backfill " "DO respect depends_on_past)."), "store_true"), 'pool': Arg(("--pool",), "Resource pool to use"), 'delay_on_limit': Arg( ("--delay_on_limit",), help=("Amount of time in seconds to wait when the limit " "on maximum active dag runs (max_active_runs) has " "been reached before trying to execute a dag run " "again."), type=float, default=1.0), 'reset_dag_run': Arg( ("--reset_dagruns",), ( "if set, the backfill will delete existing " "backfill-related DAG runs and start " "anew with fresh, running DAG runs"), "store_true"), 'rerun_failed_tasks': Arg( ("--rerun_failed_tasks",), ( "if set, the backfill will auto-rerun " "all the failed tasks for the backfill date range " "instead of throwing exceptions"), "store_true"), 'run_backwards': Arg( ("-B", "--run_backwards",), ( "if set, the backfill will run tasks from the most " "recent day first. if there are tasks that depend_on_past " "this option will throw an exception"), "store_true"), # list_tasks 'tree': Arg(("-t", "--tree"), "Tree view", "store_true"), # list_dags 'report': Arg( ("-r", "--report"), "Show DagBag loading report", "store_true"), # clear 'upstream': Arg( ("-u", "--upstream"), "Include upstream tasks", "store_true"), 'only_failed': Arg( ("-f", "--only_failed"), "Only failed jobs", "store_true"), 'only_running': Arg( ("-r", "--only_running"), "Only running jobs", "store_true"), 'downstream': Arg( ("-d", "--downstream"), "Include downstream tasks", "store_true"), 'exclude_subdags': Arg( ("-x", "--exclude_subdags"), "Exclude subdags", "store_true"), 'exclude_parentdag': Arg( ("-xp", "--exclude_parentdag"), "Exclude ParentDAGS if the task cleared is a part of a SubDAG", "store_true"), 'dag_regex': Arg( ("-dx", "--dag_regex"), "Search dag_id as regex instead of exact string", "store_true"), # show_dag 'save': Arg( ("-s", "--save"), "Saves the result to the indicated file.\n" "\n" "The file format is determined by the file extension. For more information about supported " "format, see: https://www.graphviz.org/doc/info/output.html\n" "\n" "If you want to create a PNG file then you should execute the following command:\n" "airflow dags show <DAG_ID> --save output.png\n" "\n" "If you want to create a DOT file then you should execute the following command:\n" "airflow dags show <DAG_ID> --save output.dot\n" ), 'imgcat': Arg( ("--imgcat", ), "Displays graph using the imgcat tool. \n" "\n" "For more information, see: https://www.iterm2.com/documentation-images.html", action='store_true'), # trigger_dag 'run_id': Arg(("-r", "--run_id"), "Helps to identify this run"), 'conf': Arg( ('-c', '--conf'), "JSON string that gets pickled into the DagRun's conf attribute"), 'exec_date': Arg( ("-e", "--exec_date"), help="The execution date of the DAG", type=parsedate), # pool 'pool_name': Arg( ("pool",), metavar='NAME', help="Pool name"), 'pool_slots': Arg( ("slots",), type=int, help="Pool slots"), 'pool_description': Arg( ("description",), help="Pool description"), 'pool_import': Arg( ("file",), metavar="FILEPATH", help="Import pool from JSON file"), 'pool_export': Arg( ("file",), metavar="FILEPATH", help="Export pool to JSON file"), # variables 'var': Arg( ("key",), help="Variable key"), 'var_value': Arg( ("value",), metavar='VALUE', help="Variable value"), 'default': Arg( ("-d", "--default"), metavar="VAL", default=None, help="Default value returned if variable does not exist"), 'json': Arg( ("-j", "--json"), help="Deserialize JSON variable", action="store_true"), 'var_import': Arg( ("file",), help="Import variables from JSON file"), 'var_export': Arg( ("file",), help="Export variables to JSON file"), # kerberos 'principal': Arg( ("principal",), "kerberos principal", nargs='?'), 'keytab': Arg( ("-kt", "--keytab"), "keytab", nargs='?', default=conf.get('kerberos', 'keytab')), # run # TODO(aoen): "force" is a poor choice of name here since it implies it overrides # all dependencies (not just past success), e.g. the ignore_depends_on_past # dependency. This flag should be deprecated and renamed to 'ignore_ti_state' and # the "ignore_all_dependencies" command should be called the"force" command # instead. 'interactive': Arg( ('-int', '--interactive'), help='Do not capture standard output and error streams ' '(useful for interactive debugging)', action='store_true'), 'force': Arg( ("-f", "--force"), "Ignore previous task instance state, rerun regardless if task already " "succeeded/failed", "store_true"), 'raw': Arg(("-r", "--raw"), argparse.SUPPRESS, "store_true"), 'ignore_all_dependencies': Arg( ("-A", "--ignore_all_dependencies"), "Ignores all non-critical dependencies, including ignore_ti_state and " "ignore_task_deps", "store_true"), # TODO(aoen): ignore_dependencies is a poor choice of name here because it is too # vague (e.g. a task being in the appropriate state to be run is also a dependency # but is not ignored by this flag), the name 'ignore_task_dependencies' is # slightly better (as it ignores all dependencies that are specific to the task), # so deprecate the old command name and use this instead. 'ignore_dependencies': Arg( ("-i", "--ignore_dependencies"), "Ignore task-specific dependencies, e.g. upstream, depends_on_past, and " "retry delay dependencies", "store_true"), 'ignore_depends_on_past': Arg( ("-I", "--ignore_depends_on_past"), "Ignore depends_on_past dependencies (but respect " "upstream dependencies)", "store_true"), 'ship_dag': Arg( ("--ship_dag",), "Pickles (serializes) the DAG and ships it to the worker", "store_true"), 'pickle': Arg( ("-p", "--pickle"), "Serialized pickle object of the entire dag (used internally)"), 'job_id': Arg(("-j", "--job_id"), argparse.SUPPRESS), 'cfg_path': Arg( ("--cfg_path",), "Path to config file to use instead of airflow.cfg"), # webserver 'port': Arg( ("-p", "--port"), default=conf.get('webserver', 'WEB_SERVER_PORT'), type=int, help="The port on which to run the server"), 'ssl_cert': Arg( ("--ssl_cert",), default=conf.get('webserver', 'WEB_SERVER_SSL_CERT'), help="Path to the SSL certificate for the webserver"), 'ssl_key': Arg( ("--ssl_key",), default=conf.get('webserver', 'WEB_SERVER_SSL_KEY'), help="Path to the key to use with the SSL certificate"), 'workers': Arg( ("-w", "--workers"), default=conf.get('webserver', 'WORKERS'), type=int, help="Number of workers to run the webserver on"), 'workerclass': Arg( ("-k", "--workerclass"), default=conf.get('webserver', 'WORKER_CLASS'), choices=['sync', 'eventlet', 'gevent', 'tornado'], help="The worker class to use for Gunicorn"), 'worker_timeout': Arg( ("-t", "--worker_timeout"), default=conf.get('webserver', 'WEB_SERVER_WORKER_TIMEOUT'), type=int, help="The timeout for waiting on webserver workers"), 'hostname': Arg( ("-hn", "--hostname"), default=conf.get('webserver', 'WEB_SERVER_HOST'), help="Set the hostname on which to run the web server"), 'debug': Arg( ("-d", "--debug"), "Use the server that ships with Flask in debug mode", "store_true"), 'access_logfile': Arg( ("-A", "--access_logfile"), default=conf.get('webserver', 'ACCESS_LOGFILE'), help="The logfile to store the webserver access log. Use '-' to print to " "stderr."), 'error_logfile': Arg( ("-E", "--error_logfile"), default=conf.get('webserver', 'ERROR_LOGFILE'), help="The logfile to store the webserver error log. Use '-' to print to " "stderr."), # scheduler 'dag_id_opt': Arg(("-d", "--dag_id"), help="The id of the dag to run"), 'num_runs': Arg( ("-n", "--num_runs"), default=conf.getint('scheduler', 'num_runs'), type=int, help="Set the number of runs to execute before exiting"), # worker 'do_pickle': Arg( ("-p", "--do_pickle"), default=False, help=( "Attempt to pickle the DAG object to send over " "to the workers, instead of letting workers run their version " "of the code."), action="store_true"), 'queues': Arg( ("-q", "--queues"), help="Comma delimited list of queues to serve", default=conf.get('celery', 'DEFAULT_QUEUE')), 'concurrency': Arg( ("-c", "--concurrency"), type=int, help="The number of worker processes", default=conf.get('celery', 'worker_concurrency')), 'celery_hostname': Arg( ("-cn", "--celery_hostname"), help=("Set the hostname of celery worker " "if you have multiple workers on a single machine.")), # flower 'broker_api': Arg(("-a", "--broker_api"), help="Broker api"), 'flower_hostname': Arg( ("-hn", "--hostname"), default=conf.get('celery', 'FLOWER_HOST'), help="Set the hostname on which to run the server"), 'flower_port': Arg( ("-p", "--port"), default=conf.get('celery', 'FLOWER_PORT'), type=int, help="The port on which to run the server"), 'flower_conf': Arg( ("-fc", "--flower_conf"), help="Configuration file for flower"), 'flower_url_prefix': Arg( ("-u", "--url_prefix"), default=conf.get('celery', 'FLOWER_URL_PREFIX'), help="URL prefix for Flower"), 'flower_basic_auth': Arg( ("-ba", "--basic_auth"), default=conf.get('celery', 'FLOWER_BASIC_AUTH'), help=("Securing Flower with Basic Authentication. " "Accepts user:password pairs separated by a comma. " "Example: flower_basic_auth = user1:password1,user2:password2")), 'task_params': Arg( ("-tp", "--task_params"), help="Sends a JSON params dict to the task"), 'post_mortem': Arg( ("-pm", "--post_mortem"), action="store_true", help="Open debugger on uncaught exception", ), # connections 'conn_id': Arg( ('conn_id',), help='Connection id, required to add/delete a connection', type=str), 'conn_uri': Arg( ('--conn_uri',), help='Connection URI, required to add a connection without conn_type', type=str), 'conn_type': Arg( ('--conn_type',), help='Connection type, required to add a connection without conn_uri', type=str), 'conn_host': Arg( ('--conn_host',), help='Connection host, optional when adding a connection', type=str), 'conn_login': Arg( ('--conn_login',), help='Connection login, optional when adding a connection', type=str), 'conn_password': Arg( ('--conn_password',), help='Connection password, optional when adding a connection', type=str), 'conn_schema': Arg( ('--conn_schema',), help='Connection schema, optional when adding a connection', type=str), 'conn_port': Arg( ('--conn_port',), help='Connection port, optional when adding a connection', type=str), 'conn_extra': Arg( ('--conn_extra',), help='Connection `Extra` field, optional when adding a connection', type=str), # users 'username': Arg( ('--username',), help='Username of the user', required=True, type=str), 'username_optional': Arg( ('--username',), help='Username of the user', type=str), 'firstname': Arg( ('--firstname',), help='First name of the user', required=True, type=str), 'lastname': Arg( ('--lastname',), help='Last name of the user', required=True, type=str), 'role': Arg( ('--role',), help='Role of the user. Existing roles include Admin, ' 'User, Op, Viewer, and Public.', required=True, type=str, ), 'email': Arg( ('--email',), help='Email of the user', required=True, type=str), 'email_optional': Arg( ('--email',), help='Email of the user', type=str), 'password': Arg( ('--password',), help='Password of the user, required to create a user ' 'without --use_random_password', type=str), 'use_random_password': Arg( ('--use_random_password',), help='Do not prompt for password. Use random string instead.' ' Required to create a user without --password ', default=False, action='store_true'), 'user_import': Arg( ("import",), metavar="FILEPATH", help="Import users from JSON file. Example format:" + textwrap.dedent(''' [ { "email": "foo@bar.org", "firstname": "Jon", "lastname": "Doe", "roles": ["Public"], "username": "jondoe" } ]'''), ), 'user_export': Arg( ("export",), metavar="FILEPATH", help="Export users to JSON file"), # roles 'create_role': Arg( ('-c', '--create'), help='Create a new role', action='store_true'), 'list_roles': Arg( ('-l', '--list'), help='List roles', action='store_true'), 'roles': Arg( ('role',), help='The name of a role', nargs='*'), 'autoscale': Arg( ('-a', '--autoscale'), help="Minimum and Maximum number of worker to autoscale"), 'skip_serve_logs': Arg( ("-s", "--skip_serve_logs"), default=False, help="Don't start the serve logs process along with the workers.", action="store_true"), } subparsers = ( { 'help': 'List and manage DAGs', 'name': 'dags', 'subcommands': ( { 'func': lazy_load_command('airflow.cli.commands.dag_command.dag_list_dags'), 'name': 'list', 'help': "List all the DAGs", 'args': ('subdir', 'report'), }, { 'func': lazy_load_command('airflow.cli.commands.dag_command.dag_list_dag_runs'), 'name': 'list_runs', 'help': "List dag runs given a DAG id. If state option is given, it will only " "search for all the dagruns with the given state. " "If no_backfill option is given, it will filter out " "all backfill dagruns for given dag id.", 'args': ('dag_id', 'no_backfill', 'state', 'output',), }, { 'func': lazy_load_command('airflow.cli.commands.dag_command.dag_list_jobs'), 'name': 'list_jobs', 'help': "List the jobs", 'args': ('dag_id_opt', 'state', 'limit', 'output',), }, { 'func': lazy_load_command('airflow.cli.commands.dag_command.dag_state'), 'name': 'state', 'help': "Get the status of a dag run", 'args': ('dag_id', 'execution_date', 'subdir'), }, { 'func': lazy_load_command('airflow.cli.commands.dag_command.dag_next_execution'), 'name': 'next_execution', 'help': "Get the next execution datetime of a DAG.", 'args': ('dag_id', 'subdir'), }, { 'func': lazy_load_command('airflow.cli.commands.dag_command.dag_pause'), 'name': 'pause', 'help': 'Pause a DAG', 'args': ('dag_id', 'subdir'), }, { 'func': lazy_load_command('airflow.cli.commands.dag_command.dag_unpause'), 'name': 'unpause', 'help': 'Resume a paused DAG', 'args': ('dag_id', 'subdir'), }, { 'func': lazy_load_command('airflow.cli.commands.dag_command.dag_trigger'), 'name': 'trigger', 'help': 'Trigger a DAG run', 'args': ('dag_id', 'subdir', 'run_id', 'conf', 'exec_date'), }, { 'func': lazy_load_command('airflow.cli.commands.dag_command.dag_delete'), 'name': 'delete', 'help': "Delete all DB records related to the specified DAG", 'args': ('dag_id', 'yes'), }, { 'func': lazy_load_command('airflow.cli.commands.dag_command.dag_show'), 'name': 'show', 'help': "Displays DAG's tasks with their dependencies", 'args': ('dag_id', 'subdir', 'save', 'imgcat',), }, { 'func': lazy_load_command('airflow.cli.commands.dag_command.dag_backfill'), 'name': 'backfill', 'help': "Run subsections of a DAG for a specified date range. " "If reset_dag_run option is used," " backfill will first prompt users whether airflow " "should clear all the previous dag_run and task_instances " "within the backfill date range. " "If rerun_failed_tasks is used, backfill " "will auto re-run the previous failed task instances" " within the backfill date range.", 'args': ( 'dag_id', 'task_regex', 'start_date', 'end_date', 'mark_success', 'local', 'donot_pickle', 'yes', 'bf_ignore_dependencies', 'bf_ignore_first_depends_on_past', 'subdir', 'pool', 'delay_on_limit', 'dry_run', 'verbose', 'conf', 'reset_dag_run', 'rerun_failed_tasks', 'run_backwards' ), }, ), }, { 'help': 'List and manage tasks', 'name': 'tasks', 'subcommands': ( { 'func': lazy_load_command('airflow.cli.commands.task_command.task_list'), 'name': 'list', 'help': "List the tasks within a DAG", 'args': ('dag_id', 'tree', 'subdir'), }, { 'func': lazy_load_command('airflow.cli.commands.task_command.task_clear'), 'name': 'clear', 'help': "Clear a set of task instance, as if they never ran", 'args': ( 'dag_id', 'task_regex', 'start_date', 'end_date', 'subdir', 'upstream', 'downstream', 'yes', 'only_failed', 'only_running', 'exclude_subdags', 'exclude_parentdag', 'dag_regex'), }, { 'func': lazy_load_command('airflow.cli.commands.task_command.task_state'), 'name': 'state', 'help': "Get the status of a task instance", 'args': ('dag_id', 'task_id', 'execution_date', 'subdir'), }, { 'func': lazy_load_command('airflow.cli.commands.task_command.task_failed_deps'), 'name': 'failed_deps', 'help': ( "Returns the unmet dependencies for a task instance from the perspective " "of the scheduler. In other words, why a task instance doesn't get " "scheduled and then queued by the scheduler, and then run by an " "executor)."), 'args': ('dag_id', 'task_id', 'execution_date', 'subdir'), }, { 'func': lazy_load_command('airflow.cli.commands.task_command.task_render'), 'name': 'render', 'help': "Render a task instance's template(s)", 'args': ('dag_id', 'task_id', 'execution_date', 'subdir'), }, { 'func': lazy_load_command('airflow.cli.commands.task_command.task_run'), 'name': 'run', 'help': "Run a single task instance", 'args': ( 'dag_id', 'task_id', 'execution_date', 'subdir', 'mark_success', 'force', 'pool', 'cfg_path', 'local', 'raw', 'ignore_all_dependencies', 'ignore_dependencies', 'ignore_depends_on_past', 'ship_dag', 'pickle', 'job_id', 'interactive',), }, { 'func': lazy_load_command('airflow.cli.commands.task_command.task_test'), 'name': 'test', 'help': ( "Test a task instance. This will run a task without checking for " "dependencies or recording its state in the database."), 'args': ( 'dag_id', 'task_id', 'execution_date', 'subdir', 'dry_run', 'task_params', 'post_mortem'), }, ), }, { 'help': "CRUD operations on pools", 'name': 'pools', 'subcommands': ( { 'func': lazy_load_command('airflow.cli.commands.pool_command.pool_list'), 'name': 'list', 'help': 'List pools', 'args': ('output',), }, { 'func': lazy_load_command('airflow.cli.commands.pool_command.pool_get'), 'name': 'get', 'help': 'Get pool size', 'args': ('pool_name', 'output',), }, { 'func': lazy_load_command('airflow.cli.commands.pool_command.pool_set'), 'name': 'set', 'help': 'Configure pool', 'args': ('pool_name', 'pool_slots', 'pool_description', 'output',), }, { 'func': lazy_load_command('airflow.cli.commands.pool_command.pool_delete'), 'name': 'delete', 'help': 'Delete pool', 'args': ('pool_name', 'output',), }, { 'func': lazy_load_command('airflow.cli.commands.pool_command.pool_import'), 'name': 'import', 'help': 'Import pool', 'args': ('pool_import', 'output',), }, { 'func': lazy_load_command('airflow.cli.commands.pool_command.pool_export'), 'name': 'export', 'help': 'Export pool', 'args': ('pool_export', 'output',), }, ), }, { 'help': "CRUD operations on variables", 'name': 'variables', 'subcommands': ( { 'func': lazy_load_command('airflow.cli.commands.variable_command.variables_list'), 'name': 'list', 'help': 'List variables', 'args': (), }, { 'func': lazy_load_command('airflow.cli.commands.variable_command.variables_get'), 'name': 'get', 'help': 'Get variable', 'args': ('var', 'json', 'default'), }, { 'func': lazy_load_command('airflow.cli.commands.variable_command.variables_set'), 'name': 'set', 'help': 'Set variable', 'args': ('var', 'var_value', 'json'), }, { 'func': lazy_load_command('airflow.cli.commands.variable_command.variables_delete'), 'name': 'delete', 'help': 'Delete variable', 'args': ('var',), }, { 'func': lazy_load_command('airflow.cli.commands.variable_command.variables_import'), 'name': 'import', 'help': 'Import variables', 'args': ('var_import',), }, { 'func': lazy_load_command('airflow.cli.commands.variable_command.variables_export'), 'name': 'export', 'help': 'Export variables', 'args': ('var_export',), }, ), "args": ('set', 'get', 'json', 'default', 'var_import', 'var_export', 'var_delete'), }, { 'help': "Database operations", 'name': 'db', 'subcommands': ( { 'func': lazy_load_command('airflow.cli.commands.db_command.initdb'), 'name': 'init', 'help': "Initialize the metadata database", 'args': (), }, { 'func': lazy_load_command('airflow.cli.commands.db_command.resetdb'), 'name': 'reset', 'help': "Burn down and rebuild the metadata database", 'args': ('yes',), }, { 'func': lazy_load_command('airflow.cli.commands.db_command.upgradedb'), 'name': 'upgrade', 'help': "Upgrade the metadata database to latest version", 'args': tuple(), }, { 'func': lazy_load_command('airflow.cli.commands.db_command.shell'), 'name': 'shell', 'help': "Runs a shell to access the database", 'args': tuple(), }, ), }, { 'name': 'kerberos', 'func': lazy_load_command('airflow.cli.commands.kerberos_command.kerberos'), 'help': "Start a kerberos ticket renewer", 'args': ('principal', 'keytab', 'pid', 'daemon', 'stdout', 'stderr', 'log_file'), }, { 'name': 'serve_logs', 'func': lazy_load_command('airflow.cli.commands.serve_logs_command.serve_logs'), 'help': "Serve logs generate by worker", 'args': tuple(), }, { 'name': 'webserver', 'func': lazy_load_command('airflow.cli.commands.webserver_command.webserver'), 'help': "Start a Airflow webserver instance", 'args': ('port', 'workers', 'workerclass', 'worker_timeout', 'hostname', 'pid', 'daemon', 'stdout', 'stderr', 'access_logfile', 'error_logfile', 'log_file', 'ssl_cert', 'ssl_key', 'debug'), }, { 'name': 'scheduler', 'func': lazy_load_command('airflow.cli.commands.scheduler_command.scheduler'), 'help': "Start a scheduler instance", 'args': ('dag_id_opt', 'subdir', 'num_runs', 'do_pickle', 'pid', 'daemon', 'stdout', 'stderr', 'log_file'), }, { 'name': 'worker', 'func': lazy_load_command('airflow.cli.commands.worker_command.worker'), 'help': "Start a Celery worker node", 'args': ('do_pickle', 'queues', 'concurrency', 'celery_hostname', 'pid', 'daemon', 'stdout', 'stderr', 'log_file', 'autoscale', 'skip_serve_logs'), }, { 'name': 'flower', 'func': lazy_load_command('airflow.cli.commands.flower_command.flower'), 'help': "Start a Celery Flower", 'args': ('flower_hostname', 'flower_port', 'flower_conf', 'flower_url_prefix', 'flower_basic_auth', 'broker_api', 'pid', 'daemon', 'stdout', 'stderr', 'log_file'), }, { 'name': 'version', 'func': lazy_load_command('airflow.cli.commands.version_command.version'), 'help': "Show the version", 'args': tuple(), }, { 'help': "List/Add/Delete connections", 'name': 'connections', 'subcommands': ( { 'func': lazy_load_command('airflow.cli.commands.connection_command.connections_list'), 'name': 'list', 'help': 'List connections', 'args': ('output',), }, { 'func': lazy_load_command('airflow.cli.commands.connection_command.connections_add'), 'name': 'add', 'help': 'Add a connection', 'args': ('conn_id', 'conn_uri', 'conn_extra') + tuple(alternative_conn_specs), }, { 'func': lazy_load_command('airflow.cli.commands.connection_command.connections_delete'), 'name': 'delete', 'help': 'Delete a connection', 'args': ('conn_id',), }, ), }, { 'help': "List/Create/Delete/Update users", 'name': 'users', 'subcommands': ( { 'func': lazy_load_command('airflow.cli.commands.user_command.users_list'), 'name': 'list', 'help': 'List users', 'args': ('output',), }, { 'func': lazy_load_command('airflow.cli.commands.user_command.users_create'), 'name': 'create', 'help': 'Create a user', 'args': ('role', 'username', 'email', 'firstname', 'lastname', 'password', 'use_random_password') }, { 'func': lazy_load_command('airflow.cli.commands.user_command.users_delete'), 'name': 'delete', 'help': 'Delete a user', 'args': ('username',), }, { 'func': lazy_load_command('airflow.cli.commands.user_command.add_role'), 'name': 'add_role', 'help': 'Add role to a user', 'args': ('username_optional', 'email_optional', 'role'), }, { 'func': lazy_load_command('airflow.cli.commands.user_command.remove_role'), 'name': 'remove_role', 'help': 'Remove role from a user', 'args': ('username_optional', 'email_optional', 'role'), }, { 'func': lazy_load_command('airflow.cli.commands.user_command.users_import'), 'name': 'import', 'help': 'Import a user', 'args': ('user_import',), }, { 'func': lazy_load_command('airflow.cli.commands.user_command.users_export'), 'name': 'export', 'help': 'Export a user', 'args': ('user_export',), }, ), }, { 'help': 'Create/List roles', 'name': 'roles', 'subcommands': ( { 'func': lazy_load_command('airflow.cli.commands.role_command.roles_list'), 'name': 'list', 'help': 'List roles', 'args': ('output',), }, { 'func': lazy_load_command('airflow.cli.commands.role_command.roles_create'), 'name': 'create', 'help': 'Create role', 'args': ('roles',), }, ), }, { 'name': 'sync_perm', 'func': lazy_load_command('airflow.cli.commands.sync_perm_command.sync_perm'), 'help': "Update permissions for existing roles and DAGs.", 'args': tuple(), }, { 'name': 'rotate_fernet_key', 'func': lazy_load_command('airflow.cli.commands.rotate_fernet_key_command.rotate_fernet_key'), 'help': 'Rotate all encrypted connection credentials and variables; see ' 'https://airflow.readthedocs.io/en/stable/howto/secure-connections.html' '#rotating-encryption-keys.', 'args': (), }, ) subparsers_dict = {sp.get('name') or sp['func'].__name__: sp for sp in subparsers} # type: ignore dag_subparsers = ( 'list_tasks', 'backfill', 'test', 'run', 'pause', 'unpause', 'list_dag_runs') @classmethod def get_parser(cls, dag_parser=False): """Creates and returns command line argument parser""" parser = argparse.ArgumentParser() subparsers = parser.add_subparsers( help='sub-command help', dest='subcommand') subparsers.required = True subparser_list = cls.dag_subparsers if dag_parser else cls.subparsers_dict.keys() for sub in sorted(subparser_list): sub = cls.subparsers_dict[sub] cls._add_subcommand(subparsers, sub) return parser @classmethod def _add_subcommand(cls, subparsers, sub): dag_parser = False sub_proc = subparsers.add_parser( sub.get('name') or sub['func'].__name__, help=sub['help'] # type: ignore ) sub_proc.formatter_class = RawTextHelpFormatter subcommands = sub.get('subcommands', []) if subcommands: sub_subparsers = sub_proc.add_subparsers(dest='subcommand') sub_subparsers.required = True for command in subcommands: cls._add_subcommand(sub_subparsers, command) else: for arg in sub['args']: if 'dag_id' in arg and dag_parser: continue arg = cls.args[arg] kwargs = { f: v for f, v in vars(arg).items() if f != 'flags' and v} sub_proc.add_argument(*arg.flags, **kwargs) sub_proc.set_defaults(func=sub['func']) def get_parser(): """Calls static method inside factory which creates argument parser""" return CLIFactory.get_parser()
Fokko/incubator-airflow
airflow/bin/cli.py
Python
apache-2.0
44,259
#coding:utf-8 ################################# #Copyright(c) 2014 dtysky ################################# import G2R class HpcBgSp(G2R.SpSyntax): def Show(self,Flag,Attrs,UT,Tmp,FS): sw='' AttrsOrg=dict(Attrs) nameHpc,AttrsHpc=self.Check(Flag['hpc'],Attrs['hpc'],UT,FS) nameBg,AttrsBg=self.Check(Flag,Attrs[Flag],UT,FS) if Attrs['t']=='Hide': Attrs['t']='True' sw=' call HPC(' sw+="ModeM='"+AttrsHpc['mm']+"',ModeS='"+AttrsHpc['ms']+"',Hi="+AttrsHpc['hide']+',' sw+="Owner='"+AttrsOrg['hpc']['o']+"',Pos="+AttrsHpc['l']+',Trans='+AttrsHpc['t']+',' sw+='Bg=('+nameBg+AttrsBg['s']+AttrsBg['w']+'HPC,'+AttrsBg['l']+',' if AttrsHpc['mm']=='PC': sw+=AttrsBg['l']+',0.9)\n' else: sw+=AttrsBg['l']+',0.8)\n' return sw
dtysky/Gal2Renpy
Gal2Renpy/SpSyntax/HpcBgSp.py
Python
mit
750
import pandas as pd
masterxyth/bcvotes
testing.py
Python
gpl-3.0
20
""" Processes to fetch data from GBIF data base Author: Nils Hempelmann (info@nilshempelmann.de) """ from pywps.Process import WPSProcess from flyingpigeon.log import init_process_logger import logging logger = logging.getLogger(__name__) class GBIFfetchProcess(WPSProcess): def __init__(self): WPSProcess.__init__( self, identifier="sdm_gbiffetch", title="SDM -- GBIF data fetch", version="0.1", metadata=[ {"title": "Documentation", "href": "http://flyingpigeon.readthedocs.io/en/latest/"}, {"title": "GBIF", "href": "http://gbif.org/"} ], abstract="Species occurence search in Global Biodiversity \ Infrastructure Facillity (GBIF)", statusSupported=True, storeSupported=True ) # Literal Input Data # ------------------ self.taxon_name = self.addLiteralInput( identifier="taxon_name", title="Tree Species", abstract="Scientific name of tree species", type=type(''), minOccurs=1, maxOccurs=1, default='Fagus sylvatica' ) self.BBox = self.addBBoxInput( identifier="BBox", title="Bounding Box", abstract="coordinates to define the region for occurence data fetch", minOccurs=1, maxOccurs=1, crss=['EPSG:4326'] ) ########### # OUTPUTS ########### self.output_csv = self.addComplexOutput( identifier="output_csv", title="Tree species table", abstract="Extracted CSV file containing the tree species table ", formats=[{"mimeType": "text/csv"}], asReference=True, ) self.output_map = self.addComplexOutput( identifier="output_map", title="Graphic of species occurences", abstract="PNG graphic file showing the presence of tree species \ according to GBIF data fetch", formats=[{"mimeType": "image/png"}], asReference=True, ) self.output_log = self.addComplexOutput( identifier="output_log", title="Logging information", abstract="Collected logs during process run.", formats=[{"mimeType": "text/plain"}], asReference=True, ) def execute(self): init_process_logger('log.txt') self.output_log.setValue('log.txt') self.status.set('Start process', 0) from flyingpigeon import sdm try: logger.info('reading the arguments') taxon_name = self.getInputValues(identifier='taxon_name')[0] bbox_obj = self.BBox.getValue() bbox = [bbox_obj.coords[0][0], bbox_obj.coords[0][1], bbox_obj.coords[1][0], bbox_obj.coords[1][1]] logger.info("bbox={0}".format(bbox)) logger.info("Taxon Name = %s", taxon_name) except Exception as e: msg = 'failed to read in the arguments.' logger.exception(msg) raise Exception(msg) try: self.status.set('Fetching GBIF Data', 10) gbifdic = sdm.get_gbif(taxon_name, bbox=bbox) except Exception as e: msg = 'failed to search gbif.' logger.exception(msg) raise Exception(msg) try: self.status.set('write csv file', 70) gbifcsv = sdm.gbifdic2csv(gbifdic) except Exception as e: msg = 'failed to write csv file.' logger.exception(msg) raise Exception(msg) try: self.status.set('plot map', 80) from flyingpigeon.visualisation import map_gbifoccurrences latlon = sdm.latlon_gbifdic(gbifdic) occurence_map = map_gbifoccurrences(latlon) except Exception as e: msg = 'failed to plot occurence map.' logger.exception(msg) raise Exception(msg) self.output_map.setValue(occurence_map) self.output_csv.setValue(gbifcsv) # write folder statistics: import shlex import subprocess import os import socket logger.debug('HOSTNAME: %s ' % socket.gethostname()) cmd = 'stat %s/' % os.path.abspath(os.curdir) args = shlex.split(cmd) output, error = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE ).communicate() logger.debug('temp folder statistics: %s ERRORS: %s' % (output, error)) self.status.set('done', 100)
KatiRG/flyingpigeon
flyingpigeon/processes/wps_sdm_gbiffetch.py
Python
apache-2.0
4,860
import shutil import os import hashlib import dockbot def gen_hash(data): return hashlib.sha256(data).hexdigest() class Image(object): def __init__(self, root, name, path, platform = None, projects = [], modes = None, slave = False, remote = False): self.root = root self.conf = root.conf self.name = name self.qname = self.conf['namespace'] + '-' + name self.platform = platform self.modes = modes if not remote: self.path = path self.dir = os.path.dirname(path) self.context = self.conf.get_list('context', platform, slave) self.containers = [] if not slave: self.context += [ dockbot.get_resource('dockbot/data/master/nginx.conf')] self.containers.append(dockbot.Master(self)) return # Slave only from here for mode in modes: self.containers.append(self.create_slave(mode)) # Slave projects self.projects = set() for project in projects: self.projects.update(self.conf.get_project_deps(project)) # Get project overrides self.project_overrides = \ self.conf.get_sub_key(platform).get('projects', {}) def __eq__(self, other): return self.name == other.name def __ne__(self, other): return not self.__eq__(other) def kind(self): return 'Image' def create_slave(self, mode): return dockbot.Slave(self, mode) def is_running(self): for container in self.containers: if container.is_running(): return True return False def get_context_path(self): return 'run/docker/' + self.name def get_hash_path(self): return self.get_context_path() + '.sha256' def get_data_hash(self): path = self.get_hash_path() if os.path.exists(path): f = None try: f = open(path, 'rt') return f.read() finally: if f is not None: f.close() def is_dirty(self): if dockbot.args.force: return False return self.get_data_hash() != gen_hash(self.gen_dockerfile()) def gen_dockerfile(self): libpath = [os.path.dirname(self.path)] libpath += self.conf.get('libpath', ['lib']) libpath += [dockbot.get_resource('dockbot/data/lib')] cmd = ['m4'] + sum([['-I', x] for x in libpath], []) + [self.path] ret, out, err = dockbot.system(cmd, True) if ret: raise dockbot.Error('Failed to construct Docker file: ' + err.decode('utf-8')) return out def get_project(self, name): import copy for project in self.conf.projects: if project['name'] == name: p = copy.deepcopy(project) p.update(self.project_overrides.get(name, {})) return p raise dockbot.Error('Project "%s" not found' % name) def exists(self): return dockbot.inspect(self.qname) != dockbot.NOT_FOUND def cmd_delete(self): if self.exists(): for container in self.containers: container.cmd_delete() dockbot.status_line(self.qname, *dockbot.DELETING) dockbot.system(['docker', 'rmi', '--no-prune', self.qname], True, 'remove image') def container_exists(self): for container in self.containers: if container.exists(): return True return False def cmd_status(self): for container in self.containers: container.cmd_status() def cmd_config(self): for container in self.containers: container.cmd_config() def cmd_shell(self): raise dockbot.Error('Cannot open shell in image') def cmd_start(self): for container in self.containers: container.cmd_start() def cmd_stop(self): for container in self.containers: container.cmd_stop() def cmd_restart(self): self.cmd_stop() self.cmd_start() def cmd_build(self): # Check if image is running if self.is_running(): if dockbot.args.all and (self.is_dirty() or dockbot.args.force): self.cmd_stop() else: dockbot.status_line(self.qname, *dockbot.RUNNING) return if self.is_dirty() or dockbot.args.force: self.cmd_delete() # Delete image if it exists elif self.exists(): dockbot.status_line(self.qname, *dockbot.BUILT) return dockbot.status_line(self.qname, *dockbot.BUILDING) # Generate Dockerfile data = self.gen_dockerfile() data_hash = gen_hash(data) # Clean up old context ctx_path = self.get_context_path() if os.path.exists(ctx_path): shutil.rmtree(ctx_path) # Construct Dockerfile os.makedirs(ctx_path) dockerfile = ctx_path + '/Dockerfile' f = None try: f = open(dockerfile, 'w') f.write(data.decode('utf-8')) f.close() f = open(self.get_hash_path(), 'w') f.write(data_hash) finally: if f is not None: f.close() # Link context for path in self.context: target = os.path.join(ctx_path, os.path.basename(path)) if dockbot.args.verbose: print('%s -> %s' % (path, target)) shutil.copy(path, target) # Build command cmd = ['docker', 'build', '--rm', '-t', self.qname] # Extra args cmd += dockbot.args.args # Do build dockbot.system(cmd + ['.'], False, 'build ' + self.qname, cwd = ctx_path) def cmd_trigger(self): for container in self.containers: if isinstance(container, dockbot.Slave): container.cmd_trigger() def cmd_publish(self): for container in self.containers: container.cmd_publish()
CauldronDevelopmentLLC/dockbot
dockbot/Image.py
Python
gpl-3.0
6,147
#!/usr/bin/python import logging import pyvips logging.basicConfig(level=logging.DEBUG) a = pyvips.Image.black(100, 100) a.write_to_file("x.v")
kleisauke/pyvips
examples/try9.py
Python
mit
149
# This file was created automatically by SWIG 1.3.29. # Don't modify this file, modify the SWIG interface instead. import _richtext import new new_instancemethod = new.instancemethod def _swig_setattr_nondynamic(self,class_type,name,value,static=1): if (name == "thisown"): return self.this.own(value) if (name == "this"): if type(value).__name__ == 'PySwigObject': self.__dict__[name] = value return method = class_type.__swig_setmethods__.get(name,None) if method: return method(self,value) if (not static) or hasattr(self,name): self.__dict__[name] = value else: raise AttributeError("You cannot add attributes to %s" % self) def _swig_setattr(self,class_type,name,value): return _swig_setattr_nondynamic(self,class_type,name,value,0) def _swig_getattr(self,class_type,name): if (name == "thisown"): return self.this.own() method = class_type.__swig_getmethods__.get(name,None) if method: return method(self) raise AttributeError,name def _swig_repr(self): try: strthis = "proxy of " + self.this.__repr__() except: strthis = "" return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) import types try: _object = types.ObjectType _newclass = 1 except AttributeError: class _object : pass _newclass = 0 del types def _swig_setattr_nondynamic_method(set): def set_attr(self,name,value): if (name == "thisown"): return self.this.own(value) if hasattr(self,name) or (name == "this"): set(self,name,value) else: raise AttributeError("You cannot add attributes to %s" % self) return set_attr import _windows import _core import _controls wx = _core __docfilter__ = wx.__DocFilter(globals()) #--------------------------------------------------------------------------- RICHTEXT_TYPE_ANY = _richtext.RICHTEXT_TYPE_ANY RICHTEXT_TYPE_TEXT = _richtext.RICHTEXT_TYPE_TEXT RICHTEXT_TYPE_XML = _richtext.RICHTEXT_TYPE_XML RICHTEXT_TYPE_HTML = _richtext.RICHTEXT_TYPE_HTML RICHTEXT_TYPE_RTF = _richtext.RICHTEXT_TYPE_RTF RICHTEXT_TYPE_PDF = _richtext.RICHTEXT_TYPE_PDF RICHTEXT_FIXED_WIDTH = _richtext.RICHTEXT_FIXED_WIDTH RICHTEXT_FIXED_HEIGHT = _richtext.RICHTEXT_FIXED_HEIGHT RICHTEXT_VARIABLE_WIDTH = _richtext.RICHTEXT_VARIABLE_WIDTH RICHTEXT_VARIABLE_HEIGHT = _richtext.RICHTEXT_VARIABLE_HEIGHT RICHTEXT_LAYOUT_SPECIFIED_RECT = _richtext.RICHTEXT_LAYOUT_SPECIFIED_RECT RICHTEXT_DRAW_IGNORE_CACHE = _richtext.RICHTEXT_DRAW_IGNORE_CACHE RICHTEXT_FORMATTED = _richtext.RICHTEXT_FORMATTED RICHTEXT_UNFORMATTED = _richtext.RICHTEXT_UNFORMATTED RICHTEXT_CACHE_SIZE = _richtext.RICHTEXT_CACHE_SIZE RICHTEXT_HEIGHT_ONLY = _richtext.RICHTEXT_HEIGHT_ONLY RICHTEXT_SETSTYLE_NONE = _richtext.RICHTEXT_SETSTYLE_NONE RICHTEXT_SETSTYLE_WITH_UNDO = _richtext.RICHTEXT_SETSTYLE_WITH_UNDO RICHTEXT_SETSTYLE_OPTIMIZE = _richtext.RICHTEXT_SETSTYLE_OPTIMIZE RICHTEXT_SETSTYLE_PARAGRAPHS_ONLY = _richtext.RICHTEXT_SETSTYLE_PARAGRAPHS_ONLY RICHTEXT_SETSTYLE_CHARACTERS_ONLY = _richtext.RICHTEXT_SETSTYLE_CHARACTERS_ONLY RICHTEXT_SETSTYLE_RENUMBER = _richtext.RICHTEXT_SETSTYLE_RENUMBER RICHTEXT_SETSTYLE_SPECIFY_LEVEL = _richtext.RICHTEXT_SETSTYLE_SPECIFY_LEVEL RICHTEXT_SETSTYLE_RESET = _richtext.RICHTEXT_SETSTYLE_RESET RICHTEXT_SETSTYLE_REMOVE = _richtext.RICHTEXT_SETSTYLE_REMOVE RICHTEXT_INSERT_NONE = _richtext.RICHTEXT_INSERT_NONE RICHTEXT_INSERT_WITH_PREVIOUS_PARAGRAPH_STYLE = _richtext.RICHTEXT_INSERT_WITH_PREVIOUS_PARAGRAPH_STYLE RICHTEXT_INSERT_INTERACTIVE = _richtext.RICHTEXT_INSERT_INTERACTIVE TEXT_ATTR_KEEP_FIRST_PARA_STYLE = _richtext.TEXT_ATTR_KEEP_FIRST_PARA_STYLE RICHTEXT_HITTEST_NONE = _richtext.RICHTEXT_HITTEST_NONE RICHTEXT_HITTEST_BEFORE = _richtext.RICHTEXT_HITTEST_BEFORE RICHTEXT_HITTEST_AFTER = _richtext.RICHTEXT_HITTEST_AFTER RICHTEXT_HITTEST_ON = _richtext.RICHTEXT_HITTEST_ON RICHTEXT_HITTEST_OUTSIDE = _richtext.RICHTEXT_HITTEST_OUTSIDE RICHTEXT_HITTEST_NO_NESTED_OBJECTS = _richtext.RICHTEXT_HITTEST_NO_NESTED_OBJECTS RICHTEXT_HITTEST_NO_FLOATING_OBJECTS = _richtext.RICHTEXT_HITTEST_NO_FLOATING_OBJECTS TEXT_BOX_ATTR_FLOAT = _richtext.TEXT_BOX_ATTR_FLOAT TEXT_BOX_ATTR_CLEAR = _richtext.TEXT_BOX_ATTR_CLEAR TEXT_BOX_ATTR_COLLAPSE_BORDERS = _richtext.TEXT_BOX_ATTR_COLLAPSE_BORDERS TEXT_BOX_ATTR_VERTICAL_ALIGNMENT = _richtext.TEXT_BOX_ATTR_VERTICAL_ALIGNMENT TEXT_BOX_ATTR_BOX_STYLE_NAME = _richtext.TEXT_BOX_ATTR_BOX_STYLE_NAME TEXT_ATTR_UNITS_TENTHS_MM = _richtext.TEXT_ATTR_UNITS_TENTHS_MM TEXT_ATTR_UNITS_PIXELS = _richtext.TEXT_ATTR_UNITS_PIXELS TEXT_ATTR_UNITS_PERCENTAGE = _richtext.TEXT_ATTR_UNITS_PERCENTAGE TEXT_ATTR_UNITS_POINTS = _richtext.TEXT_ATTR_UNITS_POINTS TEXT_ATTR_UNITS_MASK = _richtext.TEXT_ATTR_UNITS_MASK TEXT_BOX_ATTR_POSITION_STATIC = _richtext.TEXT_BOX_ATTR_POSITION_STATIC TEXT_BOX_ATTR_POSITION_RELATIVE = _richtext.TEXT_BOX_ATTR_POSITION_RELATIVE TEXT_BOX_ATTR_POSITION_ABSOLUTE = _richtext.TEXT_BOX_ATTR_POSITION_ABSOLUTE TEXT_BOX_ATTR_POSITION_MASK = _richtext.TEXT_BOX_ATTR_POSITION_MASK #--------------------------------------------------------------------------- class TextAttrDimension(object): """Proxy of C++ TextAttrDimension class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): """ __init__(self) -> TextAttrDimension __init__(self, int value, int units=TEXT_ATTR_UNITS_TENTHS_MM) -> TextAttrDimension """ _richtext.TextAttrDimension_swiginit(self,_richtext.new_TextAttrDimension(*args)) __swig_destroy__ = _richtext.delete_TextAttrDimension __del__ = lambda self : None; def Reset(*args, **kwargs): """Reset(self)""" return _richtext.TextAttrDimension_Reset(*args, **kwargs) def EqPartial(*args, **kwargs): """EqPartial(self, TextAttrDimension dim) -> bool""" return _richtext.TextAttrDimension_EqPartial(*args, **kwargs) def Apply(*args, **kwargs): """Apply(self, TextAttrDimension dim, TextAttrDimension compareWith=None) -> bool""" return _richtext.TextAttrDimension_Apply(*args, **kwargs) def CollectCommonAttributes(*args, **kwargs): """ CollectCommonAttributes(self, TextAttrDimension attr, TextAttrDimension clashingAttr, TextAttrDimension absentAttr) """ return _richtext.TextAttrDimension_CollectCommonAttributes(*args, **kwargs) def __eq__(*args, **kwargs): """__eq__(self, TextAttrDimension dim) -> bool""" return _richtext.TextAttrDimension___eq__(*args, **kwargs) def GetValue(*args, **kwargs): """GetValue(self) -> int""" return _richtext.TextAttrDimension_GetValue(*args, **kwargs) def GetValueMM(*args, **kwargs): """GetValueMM(self) -> float""" return _richtext.TextAttrDimension_GetValueMM(*args, **kwargs) def SetValueMM(*args, **kwargs): """SetValueMM(self, float value)""" return _richtext.TextAttrDimension_SetValueMM(*args, **kwargs) def SetValue(*args): """ SetValue(self, int value) SetValue(self, int value, TextAttrDimensionFlags flags) SetValue(self, TextAttrDimension dim) """ return _richtext.TextAttrDimension_SetValue(*args) def GetUnits(*args, **kwargs): """GetUnits(self) -> int""" return _richtext.TextAttrDimension_GetUnits(*args, **kwargs) def SetUnits(*args, **kwargs): """SetUnits(self, int units)""" return _richtext.TextAttrDimension_SetUnits(*args, **kwargs) def GetPosition(*args, **kwargs): """GetPosition(self) -> int""" return _richtext.TextAttrDimension_GetPosition(*args, **kwargs) def SetPosition(*args, **kwargs): """SetPosition(self, int pos)""" return _richtext.TextAttrDimension_SetPosition(*args, **kwargs) def GetFlags(*args, **kwargs): """GetFlags(self) -> TextAttrDimensionFlags""" return _richtext.TextAttrDimension_GetFlags(*args, **kwargs) def SetFlags(*args, **kwargs): """SetFlags(self, TextAttrDimensionFlags flags)""" return _richtext.TextAttrDimension_SetFlags(*args, **kwargs) m_value = property(_richtext.TextAttrDimension_m_value_get, _richtext.TextAttrDimension_m_value_set) m_flags = property(_richtext.TextAttrDimension_m_flags_get, _richtext.TextAttrDimension_m_flags_set) _richtext.TextAttrDimension_swigregister(TextAttrDimension) class TextAttrDimensions(object): """Proxy of C++ TextAttrDimensions class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """__init__(self) -> TextAttrDimensions""" _richtext.TextAttrDimensions_swiginit(self,_richtext.new_TextAttrDimensions(*args, **kwargs)) __swig_destroy__ = _richtext.delete_TextAttrDimensions __del__ = lambda self : None; def Reset(*args, **kwargs): """Reset(self)""" return _richtext.TextAttrDimensions_Reset(*args, **kwargs) def __eq__(*args, **kwargs): """__eq__(self, TextAttrDimensions dims) -> bool""" return _richtext.TextAttrDimensions___eq__(*args, **kwargs) def EqPartial(*args, **kwargs): """EqPartial(self, TextAttrDimensions dims) -> bool""" return _richtext.TextAttrDimensions_EqPartial(*args, **kwargs) def Apply(*args, **kwargs): """Apply(self, TextAttrDimensions dims, TextAttrDimensions compareWith=None) -> bool""" return _richtext.TextAttrDimensions_Apply(*args, **kwargs) def CollectCommonAttributes(*args, **kwargs): """ CollectCommonAttributes(self, TextAttrDimensions attr, TextAttrDimensions clashingAttr, TextAttrDimensions absentAttr) """ return _richtext.TextAttrDimensions_CollectCommonAttributes(*args, **kwargs) def RemoveStyle(*args, **kwargs): """RemoveStyle(self, TextAttrDimensions attr) -> bool""" return _richtext.TextAttrDimensions_RemoveStyle(*args, **kwargs) def GetLeft(*args, **kwargs): """GetLeft(self) -> TextAttrDimension""" return _richtext.TextAttrDimensions_GetLeft(*args, **kwargs) def GetRight(*args, **kwargs): """GetRight(self) -> TextAttrDimension""" return _richtext.TextAttrDimensions_GetRight(*args, **kwargs) def GetTop(*args, **kwargs): """GetTop(self) -> TextAttrDimension""" return _richtext.TextAttrDimensions_GetTop(*args, **kwargs) def GetBottom(*args, **kwargs): """GetBottom(self) -> TextAttrDimension""" return _richtext.TextAttrDimensions_GetBottom(*args, **kwargs) def IsValid(*args, **kwargs): """IsValid(self) -> bool""" return _richtext.TextAttrDimensions_IsValid(*args, **kwargs) m_left = property(_richtext.TextAttrDimensions_m_left_get, _richtext.TextAttrDimensions_m_left_set) m_top = property(_richtext.TextAttrDimensions_m_top_get, _richtext.TextAttrDimensions_m_top_set) m_right = property(_richtext.TextAttrDimensions_m_right_get, _richtext.TextAttrDimensions_m_right_set) m_bottom = property(_richtext.TextAttrDimensions_m_bottom_get, _richtext.TextAttrDimensions_m_bottom_set) _richtext.TextAttrDimensions_swigregister(TextAttrDimensions) class TextAttrDimensionConverter(object): """Proxy of C++ TextAttrDimensionConverter class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): """ __init__(self, DC dc, double scale=1.0, Size parentSize=DefaultSize) -> TextAttrDimensionConverter __init__(self, int ppi, double scale=1.0, Size parentSize=DefaultSize) -> TextAttrDimensionConverter """ _richtext.TextAttrDimensionConverter_swiginit(self,_richtext.new_TextAttrDimensionConverter(*args)) __swig_destroy__ = _richtext.delete_TextAttrDimensionConverter __del__ = lambda self : None; def GetPixels(*args, **kwargs): """GetPixels(self, TextAttrDimension dim, int direction=HORIZONTAL) -> int""" return _richtext.TextAttrDimensionConverter_GetPixels(*args, **kwargs) def GetTenthsMM(*args, **kwargs): """GetTenthsMM(self, TextAttrDimension dim) -> int""" return _richtext.TextAttrDimensionConverter_GetTenthsMM(*args, **kwargs) def ConvertTenthsMMToPixels(*args, **kwargs): """ConvertTenthsMMToPixels(self, int units) -> int""" return _richtext.TextAttrDimensionConverter_ConvertTenthsMMToPixels(*args, **kwargs) def ConvertPixelsToTenthsMM(*args, **kwargs): """ConvertPixelsToTenthsMM(self, int pixels) -> int""" return _richtext.TextAttrDimensionConverter_ConvertPixelsToTenthsMM(*args, **kwargs) m_ppi = property(_richtext.TextAttrDimensionConverter_m_ppi_get, _richtext.TextAttrDimensionConverter_m_ppi_set) m_scale = property(_richtext.TextAttrDimensionConverter_m_scale_get, _richtext.TextAttrDimensionConverter_m_scale_set) m_parentSize = property(_richtext.TextAttrDimensionConverter_m_parentSize_get, _richtext.TextAttrDimensionConverter_m_parentSize_set) _richtext.TextAttrDimensionConverter_swigregister(TextAttrDimensionConverter) TEXT_BOX_ATTR_BORDER_NONE = _richtext.TEXT_BOX_ATTR_BORDER_NONE TEXT_BOX_ATTR_BORDER_SOLID = _richtext.TEXT_BOX_ATTR_BORDER_SOLID TEXT_BOX_ATTR_BORDER_DOTTED = _richtext.TEXT_BOX_ATTR_BORDER_DOTTED TEXT_BOX_ATTR_BORDER_DASHED = _richtext.TEXT_BOX_ATTR_BORDER_DASHED TEXT_BOX_ATTR_BORDER_DOUBLE = _richtext.TEXT_BOX_ATTR_BORDER_DOUBLE TEXT_BOX_ATTR_BORDER_GROOVE = _richtext.TEXT_BOX_ATTR_BORDER_GROOVE TEXT_BOX_ATTR_BORDER_RIDGE = _richtext.TEXT_BOX_ATTR_BORDER_RIDGE TEXT_BOX_ATTR_BORDER_INSET = _richtext.TEXT_BOX_ATTR_BORDER_INSET TEXT_BOX_ATTR_BORDER_OUTSET = _richtext.TEXT_BOX_ATTR_BORDER_OUTSET TEXT_BOX_ATTR_BORDER_STYLE = _richtext.TEXT_BOX_ATTR_BORDER_STYLE TEXT_BOX_ATTR_BORDER_COLOUR = _richtext.TEXT_BOX_ATTR_BORDER_COLOUR TEXT_BOX_ATTR_BORDER_THIN = _richtext.TEXT_BOX_ATTR_BORDER_THIN TEXT_BOX_ATTR_BORDER_MEDIUM = _richtext.TEXT_BOX_ATTR_BORDER_MEDIUM TEXT_BOX_ATTR_BORDER_THICK = _richtext.TEXT_BOX_ATTR_BORDER_THICK TEXT_BOX_ATTR_FLOAT_NONE = _richtext.TEXT_BOX_ATTR_FLOAT_NONE TEXT_BOX_ATTR_FLOAT_LEFT = _richtext.TEXT_BOX_ATTR_FLOAT_LEFT TEXT_BOX_ATTR_FLOAT_RIGHT = _richtext.TEXT_BOX_ATTR_FLOAT_RIGHT TEXT_BOX_ATTR_CLEAR_NONE = _richtext.TEXT_BOX_ATTR_CLEAR_NONE TEXT_BOX_ATTR_CLEAR_LEFT = _richtext.TEXT_BOX_ATTR_CLEAR_LEFT TEXT_BOX_ATTR_CLEAR_RIGHT = _richtext.TEXT_BOX_ATTR_CLEAR_RIGHT TEXT_BOX_ATTR_CLEAR_BOTH = _richtext.TEXT_BOX_ATTR_CLEAR_BOTH TEXT_BOX_ATTR_COLLAPSE_NONE = _richtext.TEXT_BOX_ATTR_COLLAPSE_NONE TEXT_BOX_ATTR_COLLAPSE_FULL = _richtext.TEXT_BOX_ATTR_COLLAPSE_FULL TEXT_BOX_ATTR_VERTICAL_ALIGNMENT_NONE = _richtext.TEXT_BOX_ATTR_VERTICAL_ALIGNMENT_NONE TEXT_BOX_ATTR_VERTICAL_ALIGNMENT_TOP = _richtext.TEXT_BOX_ATTR_VERTICAL_ALIGNMENT_TOP TEXT_BOX_ATTR_VERTICAL_ALIGNMENT_CENTRE = _richtext.TEXT_BOX_ATTR_VERTICAL_ALIGNMENT_CENTRE TEXT_BOX_ATTR_VERTICAL_ALIGNMENT_BOTTOM = _richtext.TEXT_BOX_ATTR_VERTICAL_ALIGNMENT_BOTTOM class TextAttrBorder(object): """Proxy of C++ TextAttrBorder class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """__init__(self) -> TextAttrBorder""" _richtext.TextAttrBorder_swiginit(self,_richtext.new_TextAttrBorder(*args, **kwargs)) def __eq__(*args, **kwargs): """__eq__(self, TextAttrBorder border) -> bool""" return _richtext.TextAttrBorder___eq__(*args, **kwargs) def Reset(*args, **kwargs): """Reset(self)""" return _richtext.TextAttrBorder_Reset(*args, **kwargs) def EqPartial(*args, **kwargs): """EqPartial(self, TextAttrBorder border) -> bool""" return _richtext.TextAttrBorder_EqPartial(*args, **kwargs) def Apply(*args, **kwargs): """Apply(self, TextAttrBorder border, TextAttrBorder compareWith=None) -> bool""" return _richtext.TextAttrBorder_Apply(*args, **kwargs) def RemoveStyle(*args, **kwargs): """RemoveStyle(self, TextAttrBorder attr) -> bool""" return _richtext.TextAttrBorder_RemoveStyle(*args, **kwargs) def CollectCommonAttributes(*args, **kwargs): """CollectCommonAttributes(self, TextAttrBorder attr, TextAttrBorder clashingAttr, TextAttrBorder absentAttr)""" return _richtext.TextAttrBorder_CollectCommonAttributes(*args, **kwargs) def SetStyle(*args, **kwargs): """SetStyle(self, int style)""" return _richtext.TextAttrBorder_SetStyle(*args, **kwargs) def GetStyle(*args, **kwargs): """GetStyle(self) -> int""" return _richtext.TextAttrBorder_GetStyle(*args, **kwargs) def SetColour(*args): """ SetColour(self, unsigned long colour) SetColour(self, Colour colour) """ return _richtext.TextAttrBorder_SetColour(*args) def GetColourLong(*args, **kwargs): """GetColourLong(self) -> unsigned long""" return _richtext.TextAttrBorder_GetColourLong(*args, **kwargs) def GetColour(*args, **kwargs): """GetColour(self) -> Colour""" return _richtext.TextAttrBorder_GetColour(*args, **kwargs) def GetWidth(*args): """ GetWidth(self) -> TextAttrDimension GetWidth(self) -> TextAttrDimension """ return _richtext.TextAttrBorder_GetWidth(*args) def SetWidth(*args): """ SetWidth(self, TextAttrDimension width) SetWidth(self, int value, int units=TEXT_ATTR_UNITS_TENTHS_MM) """ return _richtext.TextAttrBorder_SetWidth(*args) def HasStyle(*args, **kwargs): """HasStyle(self) -> bool""" return _richtext.TextAttrBorder_HasStyle(*args, **kwargs) def HasColour(*args, **kwargs): """HasColour(self) -> bool""" return _richtext.TextAttrBorder_HasColour(*args, **kwargs) def HasWidth(*args, **kwargs): """HasWidth(self) -> bool""" return _richtext.TextAttrBorder_HasWidth(*args, **kwargs) def IsValid(*args, **kwargs): """IsValid(self) -> bool""" return _richtext.TextAttrBorder_IsValid(*args, **kwargs) def MakeValid(*args, **kwargs): """MakeValid(self)""" return _richtext.TextAttrBorder_MakeValid(*args, **kwargs) def GetFlags(*args, **kwargs): """GetFlags(self) -> int""" return _richtext.TextAttrBorder_GetFlags(*args, **kwargs) def SetFlags(*args, **kwargs): """SetFlags(self, int flags)""" return _richtext.TextAttrBorder_SetFlags(*args, **kwargs) def AddFlag(*args, **kwargs): """AddFlag(self, int flag)""" return _richtext.TextAttrBorder_AddFlag(*args, **kwargs) def RemoveFlag(*args, **kwargs): """RemoveFlag(self, int flag)""" return _richtext.TextAttrBorder_RemoveFlag(*args, **kwargs) m_borderStyle = property(_richtext.TextAttrBorder_m_borderStyle_get, _richtext.TextAttrBorder_m_borderStyle_set) m_borderColour = property(_richtext.TextAttrBorder_m_borderColour_get, _richtext.TextAttrBorder_m_borderColour_set) m_borderWidth = property(_richtext.TextAttrBorder_m_borderWidth_get, _richtext.TextAttrBorder_m_borderWidth_set) m_flags = property(_richtext.TextAttrBorder_m_flags_get, _richtext.TextAttrBorder_m_flags_set) _richtext.TextAttrBorder_swigregister(TextAttrBorder) class TextAttrBorders(object): """Proxy of C++ TextAttrBorders class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """__init__(self) -> TextAttrBorders""" _richtext.TextAttrBorders_swiginit(self,_richtext.new_TextAttrBorders(*args, **kwargs)) def __eq__(*args, **kwargs): """__eq__(self, TextAttrBorders borders) -> bool""" return _richtext.TextAttrBorders___eq__(*args, **kwargs) def SetStyle(*args, **kwargs): """SetStyle(self, int style)""" return _richtext.TextAttrBorders_SetStyle(*args, **kwargs) def SetColour(*args): """ SetColour(self, unsigned long colour) SetColour(self, Colour colour) """ return _richtext.TextAttrBorders_SetColour(*args) def SetWidth(*args): """ SetWidth(self, TextAttrDimension width) SetWidth(self, int value, int units=TEXT_ATTR_UNITS_TENTHS_MM) """ return _richtext.TextAttrBorders_SetWidth(*args) def Reset(*args, **kwargs): """Reset(self)""" return _richtext.TextAttrBorders_Reset(*args, **kwargs) def EqPartial(*args, **kwargs): """EqPartial(self, TextAttrBorders borders) -> bool""" return _richtext.TextAttrBorders_EqPartial(*args, **kwargs) def Apply(*args, **kwargs): """Apply(self, TextAttrBorders borders, TextAttrBorders compareWith=None) -> bool""" return _richtext.TextAttrBorders_Apply(*args, **kwargs) def RemoveStyle(*args, **kwargs): """RemoveStyle(self, TextAttrBorders attr) -> bool""" return _richtext.TextAttrBorders_RemoveStyle(*args, **kwargs) def CollectCommonAttributes(*args, **kwargs): """ CollectCommonAttributes(self, TextAttrBorders attr, TextAttrBorders clashingAttr, TextAttrBorders absentAttr) """ return _richtext.TextAttrBorders_CollectCommonAttributes(*args, **kwargs) def IsValid(*args, **kwargs): """IsValid(self) -> bool""" return _richtext.TextAttrBorders_IsValid(*args, **kwargs) def GetLeft(*args): """ GetLeft(self) -> TextAttrBorder GetLeft(self) -> TextAttrBorder """ return _richtext.TextAttrBorders_GetLeft(*args) def GetRight(*args): """ GetRight(self) -> TextAttrBorder GetRight(self) -> TextAttrBorder """ return _richtext.TextAttrBorders_GetRight(*args) def GetTop(*args): """ GetTop(self) -> TextAttrBorder GetTop(self) -> TextAttrBorder """ return _richtext.TextAttrBorders_GetTop(*args) def GetBottom(*args): """ GetBottom(self) -> TextAttrBorder GetBottom(self) -> TextAttrBorder """ return _richtext.TextAttrBorders_GetBottom(*args) m_left = property(_richtext.TextAttrBorders_m_left_get, _richtext.TextAttrBorders_m_left_set) m_right = property(_richtext.TextAttrBorders_m_right_get, _richtext.TextAttrBorders_m_right_set) m_top = property(_richtext.TextAttrBorders_m_top_get, _richtext.TextAttrBorders_m_top_set) m_bottom = property(_richtext.TextAttrBorders_m_bottom_get, _richtext.TextAttrBorders_m_bottom_set) _richtext.TextAttrBorders_swigregister(TextAttrBorders) class TextBoxAttr(object): """Proxy of C++ TextBoxAttr class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): """ __init__(self) -> TextBoxAttr __init__(self, TextBoxAttr attr) -> TextBoxAttr """ _richtext.TextBoxAttr_swiginit(self,_richtext.new_TextBoxAttr(*args)) def Init(*args, **kwargs): """Init(self)""" return _richtext.TextBoxAttr_Init(*args, **kwargs) def Reset(*args, **kwargs): """Reset(self)""" return _richtext.TextBoxAttr_Reset(*args, **kwargs) def __eq__(*args, **kwargs): """__eq__(self, TextBoxAttr attr) -> bool""" return _richtext.TextBoxAttr___eq__(*args, **kwargs) def EqPartial(*args, **kwargs): """EqPartial(self, TextBoxAttr attr) -> bool""" return _richtext.TextBoxAttr_EqPartial(*args, **kwargs) def Apply(*args, **kwargs): """Apply(self, TextBoxAttr style, TextBoxAttr compareWith=None) -> bool""" return _richtext.TextBoxAttr_Apply(*args, **kwargs) def CollectCommonAttributes(*args, **kwargs): """CollectCommonAttributes(self, TextBoxAttr attr, TextBoxAttr clashingAttr, TextBoxAttr absentAttr)""" return _richtext.TextBoxAttr_CollectCommonAttributes(*args, **kwargs) def RemoveStyle(*args, **kwargs): """RemoveStyle(self, TextBoxAttr attr) -> bool""" return _richtext.TextBoxAttr_RemoveStyle(*args, **kwargs) def SetFlags(*args, **kwargs): """SetFlags(self, int flags)""" return _richtext.TextBoxAttr_SetFlags(*args, **kwargs) def GetFlags(*args, **kwargs): """GetFlags(self) -> int""" return _richtext.TextBoxAttr_GetFlags(*args, **kwargs) def HasFlag(*args, **kwargs): """HasFlag(self, int flag) -> bool""" return _richtext.TextBoxAttr_HasFlag(*args, **kwargs) def RemoveFlag(*args, **kwargs): """RemoveFlag(self, int flag)""" return _richtext.TextBoxAttr_RemoveFlag(*args, **kwargs) def AddFlag(*args, **kwargs): """AddFlag(self, int flag)""" return _richtext.TextBoxAttr_AddFlag(*args, **kwargs) def GetFloatMode(*args, **kwargs): """GetFloatMode(self) -> int""" return _richtext.TextBoxAttr_GetFloatMode(*args, **kwargs) def SetFloatMode(*args, **kwargs): """SetFloatMode(self, int mode)""" return _richtext.TextBoxAttr_SetFloatMode(*args, **kwargs) def HasFloatMode(*args, **kwargs): """HasFloatMode(self) -> bool""" return _richtext.TextBoxAttr_HasFloatMode(*args, **kwargs) def IsFloating(*args, **kwargs): """IsFloating(self) -> bool""" return _richtext.TextBoxAttr_IsFloating(*args, **kwargs) def GetClearMode(*args, **kwargs): """GetClearMode(self) -> int""" return _richtext.TextBoxAttr_GetClearMode(*args, **kwargs) def SetClearMode(*args, **kwargs): """SetClearMode(self, int mode)""" return _richtext.TextBoxAttr_SetClearMode(*args, **kwargs) def HasClearMode(*args, **kwargs): """HasClearMode(self) -> bool""" return _richtext.TextBoxAttr_HasClearMode(*args, **kwargs) def GetCollapseBorders(*args, **kwargs): """GetCollapseBorders(self) -> int""" return _richtext.TextBoxAttr_GetCollapseBorders(*args, **kwargs) def SetCollapseBorders(*args, **kwargs): """SetCollapseBorders(self, int collapse)""" return _richtext.TextBoxAttr_SetCollapseBorders(*args, **kwargs) def HasCollapseBorders(*args, **kwargs): """HasCollapseBorders(self) -> bool""" return _richtext.TextBoxAttr_HasCollapseBorders(*args, **kwargs) def GetVerticalAlignment(*args, **kwargs): """GetVerticalAlignment(self) -> int""" return _richtext.TextBoxAttr_GetVerticalAlignment(*args, **kwargs) def SetVerticalAlignment(*args, **kwargs): """SetVerticalAlignment(self, int verticalAlignment)""" return _richtext.TextBoxAttr_SetVerticalAlignment(*args, **kwargs) def HasVerticalAlignment(*args, **kwargs): """HasVerticalAlignment(self) -> bool""" return _richtext.TextBoxAttr_HasVerticalAlignment(*args, **kwargs) def GetMargins(*args): """ GetMargins(self) -> TextAttrDimensions GetMargins(self) -> TextAttrDimensions """ return _richtext.TextBoxAttr_GetMargins(*args) def GetLeftMargin(*args): """ GetLeftMargin(self) -> TextAttrDimension GetLeftMargin(self) -> TextAttrDimension """ return _richtext.TextBoxAttr_GetLeftMargin(*args) def GetRightMargin(*args): """ GetRightMargin(self) -> TextAttrDimension GetRightMargin(self) -> TextAttrDimension """ return _richtext.TextBoxAttr_GetRightMargin(*args) def GetTopMargin(*args): """ GetTopMargin(self) -> TextAttrDimension GetTopMargin(self) -> TextAttrDimension """ return _richtext.TextBoxAttr_GetTopMargin(*args) def GetBottomMargin(*args): """ GetBottomMargin(self) -> TextAttrDimension GetBottomMargin(self) -> TextAttrDimension """ return _richtext.TextBoxAttr_GetBottomMargin(*args) def GetPosition(*args): """ GetPosition(self) -> TextAttrDimensions GetPosition(self) -> TextAttrDimensions """ return _richtext.TextBoxAttr_GetPosition(*args) def GetLeft(*args): """ GetLeft(self) -> TextAttrDimension GetLeft(self) -> TextAttrDimension """ return _richtext.TextBoxAttr_GetLeft(*args) def GetRight(*args): """ GetRight(self) -> TextAttrDimension GetRight(self) -> TextAttrDimension """ return _richtext.TextBoxAttr_GetRight(*args) def GetTop(*args): """ GetTop(self) -> TextAttrDimension GetTop(self) -> TextAttrDimension """ return _richtext.TextBoxAttr_GetTop(*args) def GetBottom(*args): """ GetBottom(self) -> TextAttrDimension GetBottom(self) -> TextAttrDimension """ return _richtext.TextBoxAttr_GetBottom(*args) def GetPadding(*args): """ GetPadding(self) -> TextAttrDimensions GetPadding(self) -> TextAttrDimensions """ return _richtext.TextBoxAttr_GetPadding(*args) def GetLeftPadding(*args): """ GetLeftPadding(self) -> TextAttrDimension GetLeftPadding(self) -> TextAttrDimension """ return _richtext.TextBoxAttr_GetLeftPadding(*args) def GetRightPadding(*args): """ GetRightPadding(self) -> TextAttrDimension GetRightPadding(self) -> TextAttrDimension """ return _richtext.TextBoxAttr_GetRightPadding(*args) def GetTopPadding(*args): """ GetTopPadding(self) -> TextAttrDimension GetTopPadding(self) -> TextAttrDimension """ return _richtext.TextBoxAttr_GetTopPadding(*args) def GetBottomPadding(*args): """ GetBottomPadding(self) -> TextAttrDimension GetBottomPadding(self) -> TextAttrDimension """ return _richtext.TextBoxAttr_GetBottomPadding(*args) def GetBorder(*args): """ GetBorder(self) -> TextAttrBorders GetBorder(self) -> TextAttrBorders """ return _richtext.TextBoxAttr_GetBorder(*args) def GetLeftBorder(*args): """ GetLeftBorder(self) -> TextAttrBorder GetLeftBorder(self) -> TextAttrBorder """ return _richtext.TextBoxAttr_GetLeftBorder(*args) def GetTopBorder(*args): """ GetTopBorder(self) -> TextAttrBorder GetTopBorder(self) -> TextAttrBorder """ return _richtext.TextBoxAttr_GetTopBorder(*args) def GetRightBorder(*args): """ GetRightBorder(self) -> TextAttrBorder GetRightBorder(self) -> TextAttrBorder """ return _richtext.TextBoxAttr_GetRightBorder(*args) def GetBottomBorder(*args): """ GetBottomBorder(self) -> TextAttrBorder GetBottomBorder(self) -> TextAttrBorder """ return _richtext.TextBoxAttr_GetBottomBorder(*args) def GetOutline(*args): """ GetOutline(self) -> TextAttrBorders GetOutline(self) -> TextAttrBorders """ return _richtext.TextBoxAttr_GetOutline(*args) def GetLeftOutline(*args): """ GetLeftOutline(self) -> TextAttrBorder GetLeftOutline(self) -> TextAttrBorder """ return _richtext.TextBoxAttr_GetLeftOutline(*args) def GetTopOutline(*args): """ GetTopOutline(self) -> TextAttrBorder GetTopOutline(self) -> TextAttrBorder """ return _richtext.TextBoxAttr_GetTopOutline(*args) def GetRightOutline(*args): """ GetRightOutline(self) -> TextAttrBorder GetRightOutline(self) -> TextAttrBorder """ return _richtext.TextBoxAttr_GetRightOutline(*args) def GetBottomOutline(*args): """ GetBottomOutline(self) -> TextAttrBorder GetBottomOutline(self) -> TextAttrBorder """ return _richtext.TextBoxAttr_GetBottomOutline(*args) def GetSize(*args): """ GetSize(self) -> wxTextAttrSize GetSize(self) -> wxTextAttrSize """ return _richtext.TextBoxAttr_GetSize(*args) def SetSize(*args, **kwargs): """SetSize(self, wxTextAttrSize sz)""" return _richtext.TextBoxAttr_SetSize(*args, **kwargs) def GetWidth(*args): """ GetWidth(self) -> TextAttrDimension GetWidth(self) -> TextAttrDimension """ return _richtext.TextBoxAttr_GetWidth(*args) def GetHeight(*args): """ GetHeight(self) -> TextAttrDimension GetHeight(self) -> TextAttrDimension """ return _richtext.TextBoxAttr_GetHeight(*args) def GetBoxStyleName(*args, **kwargs): """GetBoxStyleName(self) -> String""" return _richtext.TextBoxAttr_GetBoxStyleName(*args, **kwargs) def SetBoxStyleName(*args, **kwargs): """SetBoxStyleName(self, String name)""" return _richtext.TextBoxAttr_SetBoxStyleName(*args, **kwargs) def HasBoxStyleName(*args, **kwargs): """HasBoxStyleName(self) -> bool""" return _richtext.TextBoxAttr_HasBoxStyleName(*args, **kwargs) m_flags = property(_richtext.TextBoxAttr_m_flags_get, _richtext.TextBoxAttr_m_flags_set) m_margins = property(_richtext.TextBoxAttr_m_margins_get, _richtext.TextBoxAttr_m_margins_set) m_padding = property(_richtext.TextBoxAttr_m_padding_get, _richtext.TextBoxAttr_m_padding_set) m_position = property(_richtext.TextBoxAttr_m_position_get, _richtext.TextBoxAttr_m_position_set) m_size = property(_richtext.TextBoxAttr_m_size_get, _richtext.TextBoxAttr_m_size_set) m_border = property(_richtext.TextBoxAttr_m_border_get, _richtext.TextBoxAttr_m_border_set) m_outline = property(_richtext.TextBoxAttr_m_outline_get, _richtext.TextBoxAttr_m_outline_set) m_floatMode = property(_richtext.TextBoxAttr_m_floatMode_get, _richtext.TextBoxAttr_m_floatMode_set) m_clearMode = property(_richtext.TextBoxAttr_m_clearMode_get, _richtext.TextBoxAttr_m_clearMode_set) m_collapseMode = property(_richtext.TextBoxAttr_m_collapseMode_get, _richtext.TextBoxAttr_m_collapseMode_set) m_verticalAlignment = property(_richtext.TextBoxAttr_m_verticalAlignment_get, _richtext.TextBoxAttr_m_verticalAlignment_set) m_boxStyleName = property(_richtext.TextBoxAttr_m_boxStyleName_get, _richtext.TextBoxAttr_m_boxStyleName_set) _richtext.TextBoxAttr_swigregister(TextBoxAttr) #--------------------------------------------------------------------------- class RichTextAttr(_controls.TextAttr): """Proxy of C++ RichTextAttr class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): """ __init__(self, TextAttr attr) -> RichTextAttr __init__(self, RichTextAttr attr) -> RichTextAttr __init__(self) -> RichTextAttr """ _richtext.RichTextAttr_swiginit(self,_richtext.new_RichTextAttr(*args)) __swig_destroy__ = _richtext.delete_RichTextAttr __del__ = lambda self : None; def Copy(*args, **kwargs): """Copy(self, RichTextAttr attr)""" return _richtext.RichTextAttr_Copy(*args, **kwargs) def __eq__(*args, **kwargs): """__eq__(self, RichTextAttr attr) -> bool""" return _richtext.RichTextAttr___eq__(*args, **kwargs) def EqPartial(*args, **kwargs): """EqPartial(self, RichTextAttr attr) -> bool""" return _richtext.RichTextAttr_EqPartial(*args, **kwargs) def Apply(*args, **kwargs): """Apply(self, RichTextAttr style, RichTextAttr compareWith=None) -> bool""" return _richtext.RichTextAttr_Apply(*args, **kwargs) def CollectCommonAttributes(*args, **kwargs): """CollectCommonAttributes(self, RichTextAttr attr, RichTextAttr clashingAttr, RichTextAttr absentAttr)""" return _richtext.RichTextAttr_CollectCommonAttributes(*args, **kwargs) def RemoveStyle(*args, **kwargs): """RemoveStyle(self, RichTextAttr attr) -> bool""" return _richtext.RichTextAttr_RemoveStyle(*args, **kwargs) def GetTextBoxAttr(*args): """ GetTextBoxAttr(self) -> TextBoxAttr GetTextBoxAttr(self) -> TextBoxAttr """ return _richtext.RichTextAttr_GetTextBoxAttr(*args) def SetTextBoxAttr(*args, **kwargs): """SetTextBoxAttr(self, TextBoxAttr attr)""" return _richtext.RichTextAttr_SetTextBoxAttr(*args, **kwargs) m_textBoxAttr = property(_richtext.RichTextAttr_m_textBoxAttr_get, _richtext.RichTextAttr_m_textBoxAttr_set) _richtext.RichTextAttr_swigregister(RichTextAttr) class RichTextFontTable(_core.Object): """Proxy of C++ RichTextFontTable class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """__init__(self) -> RichTextFontTable""" _richtext.RichTextFontTable_swiginit(self,_richtext.new_RichTextFontTable(*args, **kwargs)) __swig_destroy__ = _richtext.delete_RichTextFontTable __del__ = lambda self : None; def IsOk(*args, **kwargs): """IsOk(self) -> bool""" return _richtext.RichTextFontTable_IsOk(*args, **kwargs) def FindFont(*args, **kwargs): """FindFont(self, RichTextAttr fontSpec) -> Font""" return _richtext.RichTextFontTable_FindFont(*args, **kwargs) def Clear(*args, **kwargs): """Clear(self)""" return _richtext.RichTextFontTable_Clear(*args, **kwargs) _richtext.RichTextFontTable_swigregister(RichTextFontTable) class RichTextRange(object): """ RichTextRange is a data structure that represents a range of text within a `RichTextCtrl`. It simply contains integer ``start`` and ``end`` properties and a few operations useful for dealing with ranges. In most places in wxPython where a RichTextRange is expected a 2-tuple containing (start, end) can be used instead. """ thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """ __init__(self, long start=0, long end=0) -> RichTextRange Creates a new range object. """ _richtext.RichTextRange_swiginit(self,_richtext.new_RichTextRange(*args, **kwargs)) __swig_destroy__ = _richtext.delete_RichTextRange __del__ = lambda self : None; def __eq__(*args, **kwargs): """ __eq__(self, PyObject other) -> bool Test for equality of RichTextRange objects. """ return _richtext.RichTextRange___eq__(*args, **kwargs) def __sub__(*args, **kwargs): """__sub__(self, RichTextRange range) -> RichTextRange""" return _richtext.RichTextRange___sub__(*args, **kwargs) def __add__(*args, **kwargs): """__add__(self, RichTextRange range) -> RichTextRange""" return _richtext.RichTextRange___add__(*args, **kwargs) def SetRange(*args, **kwargs): """SetRange(self, long start, long end)""" return _richtext.RichTextRange_SetRange(*args, **kwargs) def SetStart(*args, **kwargs): """SetStart(self, long start)""" return _richtext.RichTextRange_SetStart(*args, **kwargs) def GetStart(*args, **kwargs): """GetStart(self) -> long""" return _richtext.RichTextRange_GetStart(*args, **kwargs) start = property(GetStart, SetStart) def SetEnd(*args, **kwargs): """SetEnd(self, long end)""" return _richtext.RichTextRange_SetEnd(*args, **kwargs) def GetEnd(*args, **kwargs): """GetEnd(self) -> long""" return _richtext.RichTextRange_GetEnd(*args, **kwargs) end = property(GetEnd, SetEnd) def IsOutside(*args, **kwargs): """ IsOutside(self, RichTextRange range) -> bool Returns true if this range is completely outside 'range' """ return _richtext.RichTextRange_IsOutside(*args, **kwargs) def IsWithin(*args, **kwargs): """ IsWithin(self, RichTextRange range) -> bool Returns true if this range is completely within 'range' """ return _richtext.RichTextRange_IsWithin(*args, **kwargs) def Contains(*args, **kwargs): """ Contains(self, long pos) -> bool Returns true if the given position is within this range. Allow for the possibility of an empty range - assume the position is within this empty range. """ return _richtext.RichTextRange_Contains(*args, **kwargs) def LimitTo(*args, **kwargs): """ LimitTo(self, RichTextRange range) -> bool Limit this range to be within 'range' """ return _richtext.RichTextRange_LimitTo(*args, **kwargs) def GetLength(*args, **kwargs): """ GetLength(self) -> long Gets the length of the range """ return _richtext.RichTextRange_GetLength(*args, **kwargs) def Swap(*args, **kwargs): """ Swap(self) Swaps the start and end """ return _richtext.RichTextRange_Swap(*args, **kwargs) def ToInternal(*args, **kwargs): """ ToInternal(self) -> RichTextRange Convert to internal form: (n, n) is the range of a single character. """ return _richtext.RichTextRange_ToInternal(*args, **kwargs) def FromInternal(*args, **kwargs): """ FromInternal(self) -> RichTextRange Convert from internal to public API form: (n, n+1) is the range of a single character. """ return _richtext.RichTextRange_FromInternal(*args, **kwargs) def Get(*args, **kwargs): """ Get() -> (start,end) Returns the start and end properties as a tuple. """ return _richtext.RichTextRange_Get(*args, **kwargs) def __str__(self): return str(self.Get()) def __repr__(self): return 'RichTextRange'+str(self.Get()) def __len__(self): return len(self.Get()) def __getitem__(self, index): return self.Get()[index] def __setitem__(self, index, val): if index == 0: self.start = val elif index == 1: self.end = val else: raise IndexError def __nonzero__(self): return self.Get() != (0,0) __safe_for_unpickling__ = True def __reduce__(self): return (RichTextRange, self.Get()) End = property(GetEnd,SetEnd,doc="See `GetEnd` and `SetEnd`") Length = property(GetLength,doc="See `GetLength`") Start = property(GetStart,SetStart,doc="See `GetStart` and `SetStart`") _richtext.RichTextRange_swigregister(RichTextRange) class RichTextDrawingContext(_core.Object): """Proxy of C++ RichTextDrawingContext class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """__init__(self, RichTextBuffer buffer) -> RichTextDrawingContext""" _richtext.RichTextDrawingContext_swiginit(self,_richtext.new_RichTextDrawingContext(*args, **kwargs)) def Init(*args, **kwargs): """Init(self)""" return _richtext.RichTextDrawingContext_Init(*args, **kwargs) def HasVirtualAttributes(*args, **kwargs): """HasVirtualAttributes(self, RichTextObject obj) -> bool""" return _richtext.RichTextDrawingContext_HasVirtualAttributes(*args, **kwargs) def GetVirtualAttributes(*args, **kwargs): """GetVirtualAttributes(self, RichTextObject obj) -> RichTextAttr""" return _richtext.RichTextDrawingContext_GetVirtualAttributes(*args, **kwargs) def ApplyVirtualAttributes(*args, **kwargs): """ApplyVirtualAttributes(self, RichTextAttr attr, RichTextObject obj) -> bool""" return _richtext.RichTextDrawingContext_ApplyVirtualAttributes(*args, **kwargs) m_buffer = property(_richtext.RichTextDrawingContext_m_buffer_get, _richtext.RichTextDrawingContext_m_buffer_set) _richtext.RichTextDrawingContext_swigregister(RichTextDrawingContext) cvar = _richtext.cvar RICHTEXT_ALL = cvar.RICHTEXT_ALL RICHTEXT_NONE = cvar.RICHTEXT_NONE class RichTextObject(_core.Object): """ This is the base class for all drawable objects in a `RichTextCtrl`. The data displayed in a `RichTextCtrl` is handled by `RichTextBuffer`, and a `RichTextCtrl` always has one such buffer. The content is represented by a hierarchy of objects, all derived from `RichTextObject`. An object might be an image, a fragment of text, a paragraph, or a whole buffer. Objects store a an attribute object containing style information; a paragraph object can contain both paragraph and character information, but content objects such as text can only store character information. The final style displayed in the control or in a printout is a combination of base style, paragraph style and content (character) style. The top of the hierarchy is the buffer, a kind of `RichTextParagraphLayoutBox`. containing further `RichTextParagraph` objects, each of which can include text, images and potentially other types of objects. Each object maintains a range (start and end position) measured from the start of the main parent object. When Layout is called on an object, it is given a size which the object must limit itself to, or one or more flexible directions (vertical or horizontal). So, for example, a centred paragraph is given the page width to play with (minus any margins), but can extend indefinitely in the vertical direction. The implementation of Layout caches the calculated size and position. When the buffer is modified, a range is invalidated (marked as requiring layout), so that only the minimum amount of layout is performed. A paragraph of pure text with the same style contains just one further object, a `RichTextPlainText` object. When styling is applied to part of this object, the object is decomposed into separate objects, one object for each different character style. So each object within a paragraph always has just one attribute object to denote its character style. Of course, this can lead to fragmentation after a lot of edit operations, potentially leading to several objects with the same style where just one would do. So a Defragment function is called when updating the control's display, to ensure that the minimum number of objects is used. To implement your own RichTextObjects in Python you must derive a class from `PyRichTextObject`, which has been instrumented to forward the virtual C++ method calls to the Python methods in the derived class. (This class hasn't been implemented yet!) """ thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') def __init__(self): raise AttributeError, "No constructor defined" __repr__ = _swig_repr __swig_destroy__ = _richtext.delete_RichTextObject __del__ = lambda self : None; def Draw(*args, **kwargs): """ Draw(self, DC dc, RichTextDrawingContext context, RichTextRange range, wxRichTextSelection selection, Rect rect, int descent, int style) -> bool """ return _richtext.RichTextObject_Draw(*args, **kwargs) def Layout(*args, **kwargs): """ Layout(self, DC dc, RichTextDrawingContext context, Rect rect, Rect parentRect, int style) -> bool """ return _richtext.RichTextObject_Layout(*args, **kwargs) def HitTest(*args, **kwargs): """ HitTest(self, DC dc, RichTextDrawingContext context, Point pt, long OUTPUT, RichTextObject obj, RichTextObject contextObj, int flags=0) -> int """ return _richtext.RichTextObject_HitTest(*args, **kwargs) def FindPosition(*args, **kwargs): """ FindPosition(self, DC dc, RichTextDrawingContext context, long index, Point OUTPUT, int OUTPUT, bool forceLineStart) -> bool """ return _richtext.RichTextObject_FindPosition(*args, **kwargs) def GetBestSize(*args, **kwargs): """GetBestSize(self) -> Size""" return _richtext.RichTextObject_GetBestSize(*args, **kwargs) def GetRangeSize(*args, **kwargs): """ GetRangeSize(self, RichTextRange range, Size OUTPUT, int OUTPUT, DC dc, RichTextDrawingContext context, int flags, Point position=wxPoint(0,0)) -> bool """ return _richtext.RichTextObject_GetRangeSize(*args, **kwargs) def DoSplit(*args, **kwargs): """DoSplit(self, long pos) -> RichTextObject""" return _richtext.RichTextObject_DoSplit(*args, **kwargs) def CalculateRange(*args, **kwargs): """CalculateRange(self, long start, long OUTPUT)""" return _richtext.RichTextObject_CalculateRange(*args, **kwargs) def DeleteRange(*args, **kwargs): """DeleteRange(self, RichTextRange range) -> bool""" return _richtext.RichTextObject_DeleteRange(*args, **kwargs) def IsEmpty(*args, **kwargs): """IsEmpty(self) -> bool""" return _richtext.RichTextObject_IsEmpty(*args, **kwargs) def IsFloatable(*args, **kwargs): """IsFloatable(self) -> bool""" return _richtext.RichTextObject_IsFloatable(*args, **kwargs) def IsFloating(*args, **kwargs): """IsFloating(self) -> bool""" return _richtext.RichTextObject_IsFloating(*args, **kwargs) def GetFloatDirection(*args, **kwargs): """GetFloatDirection(self) -> int""" return _richtext.RichTextObject_GetFloatDirection(*args, **kwargs) def GetTextForRange(*args, **kwargs): """GetTextForRange(self, RichTextRange range) -> String""" return _richtext.RichTextObject_GetTextForRange(*args, **kwargs) def CanMerge(*args, **kwargs): """CanMerge(self, RichTextObject object, RichTextDrawingContext context) -> bool""" return _richtext.RichTextObject_CanMerge(*args, **kwargs) def Merge(self, obj, context): """Merge(self, RichTextObject object) -> bool""" val = _richtext.RichTextObject_Merge(self, obj, context) if val: obj.this.own(True) return val def Dump(*args, **kwargs): """Dump(self) -> String""" return _richtext.RichTextObject_Dump(*args, **kwargs) def CanEditProperties(*args, **kwargs): """CanEditProperties(self) -> bool""" return _richtext.RichTextObject_CanEditProperties(*args, **kwargs) def EditProperties(*args, **kwargs): """EditProperties(self, Window parent, RichTextBuffer buffer) -> bool""" return _richtext.RichTextObject_EditProperties(*args, **kwargs) def ImportFromXML(*args, **kwargs): """ ImportFromXML(self, RichTextBuffer buffer, wxXmlNode node, RichTextXMLHandler handler, bool recurse) -> bool """ return _richtext.RichTextObject_ImportFromXML(*args, **kwargs) def ExportXML(*args): """ ExportXML(self, wxOutputStream stream, int indent, RichTextXMLHandler handler) -> bool ExportXML(self, wxXmlNode parent, RichTextXMLHandler handler) -> bool """ return _richtext.RichTextObject_ExportXML(*args) def UsesParagraphAttributes(*args, **kwargs): """UsesParagraphAttributes(self) -> bool""" return _richtext.RichTextObject_UsesParagraphAttributes(*args, **kwargs) def GetXMLNodeName(*args, **kwargs): """GetXMLNodeName(self) -> String""" return _richtext.RichTextObject_GetXMLNodeName(*args, **kwargs) def GetCachedSize(*args, **kwargs): """GetCachedSize(self) -> Size""" return _richtext.RichTextObject_GetCachedSize(*args, **kwargs) def SetCachedSize(*args, **kwargs): """SetCachedSize(self, Size sz)""" return _richtext.RichTextObject_SetCachedSize(*args, **kwargs) CachedSize = property(GetCachedSize,SetCachedSize) def GetPosition(*args, **kwargs): """GetPosition(self) -> Point""" return _richtext.RichTextObject_GetPosition(*args, **kwargs) def SetPosition(*args, **kwargs): """SetPosition(self, Point pos)""" return _richtext.RichTextObject_SetPosition(*args, **kwargs) Position = property(GetPosition,SetPosition) def GetRect(*args, **kwargs): """GetRect(self) -> Rect""" return _richtext.RichTextObject_GetRect(*args, **kwargs) Rect = property(GetRect) def SetRange(*args, **kwargs): """SetRange(self, RichTextRange range)""" return _richtext.RichTextObject_SetRange(*args, **kwargs) def GetRange(*args, **kwargs): """GetRange(self) -> RichTextRange""" return _richtext.RichTextObject_GetRange(*args, **kwargs) Range = property(GetRange,SetRange) def IsComposite(*args, **kwargs): """IsComposite(self) -> bool""" return _richtext.RichTextObject_IsComposite(*args, **kwargs) def GetParent(*args, **kwargs): """GetParent(self) -> RichTextObject""" return _richtext.RichTextObject_GetParent(*args, **kwargs) def SetParent(*args, **kwargs): """SetParent(self, RichTextObject parent)""" return _richtext.RichTextObject_SetParent(*args, **kwargs) Parent = property(GetParent,SetParent) def SetSameMargins(*args, **kwargs): """SetSameMargins(self, int margin)""" return _richtext.RichTextObject_SetSameMargins(*args, **kwargs) def SetMargins(*args, **kwargs): """SetMargins(self, int leftMargin, int rightMargin, int topMargin, int bottomMargin)""" return _richtext.RichTextObject_SetMargins(*args, **kwargs) def GetLeftMargin(*args, **kwargs): """GetLeftMargin(self) -> int""" return _richtext.RichTextObject_GetLeftMargin(*args, **kwargs) def GetRightMargin(*args, **kwargs): """GetRightMargin(self) -> int""" return _richtext.RichTextObject_GetRightMargin(*args, **kwargs) def GetTopMargin(*args, **kwargs): """GetTopMargin(self) -> int""" return _richtext.RichTextObject_GetTopMargin(*args, **kwargs) def GetBottomMargin(*args, **kwargs): """GetBottomMargin(self) -> int""" return _richtext.RichTextObject_GetBottomMargin(*args, **kwargs) def SetAttributes(*args, **kwargs): """SetAttributes(self, RichTextAttr attr)""" return _richtext.RichTextObject_SetAttributes(*args, **kwargs) def GetAttributes(*args, **kwargs): """GetAttributes(self) -> RichTextAttr""" return _richtext.RichTextObject_GetAttributes(*args, **kwargs) Attributes = property(GetAttributes,SetAttributes) def SetDescent(*args, **kwargs): """SetDescent(self, int descent)""" return _richtext.RichTextObject_SetDescent(*args, **kwargs) def GetDescent(*args, **kwargs): """GetDescent(self) -> int""" return _richtext.RichTextObject_GetDescent(*args, **kwargs) Descent = property(GetDescent,SetDescent) def GetBuffer(*args, **kwargs): """GetBuffer(self) -> RichTextBuffer""" return _richtext.RichTextObject_GetBuffer(*args, **kwargs) def Clone(*args, **kwargs): """Clone(self) -> RichTextObject""" return _richtext.RichTextObject_Clone(*args, **kwargs) def Copy(*args, **kwargs): """Copy(self, RichTextObject obj)""" return _richtext.RichTextObject_Copy(*args, **kwargs) def Reference(*args, **kwargs): """Reference(self)""" return _richtext.RichTextObject_Reference(*args, **kwargs) def Dereference(*args, **kwargs): """Dereference(self)""" return _richtext.RichTextObject_Dereference(*args, **kwargs) def ConvertTenthsMMToPixelsDC(*args, **kwargs): """ConvertTenthsMMToPixelsDC(self, DC dc, int units) -> int""" return _richtext.RichTextObject_ConvertTenthsMMToPixelsDC(*args, **kwargs) def ConvertTenthsMMToPixels(*args, **kwargs): """ConvertTenthsMMToPixels(int ppi, int units, double scale=1.0) -> int""" return _richtext.RichTextObject_ConvertTenthsMMToPixels(*args, **kwargs) ConvertTenthsMMToPixels = staticmethod(ConvertTenthsMMToPixels) def ConvertPixelsToTenthsMM(*args): """ ConvertPixelsToTenthsMM(DC dc, int pixels) -> int ConvertPixelsToTenthsMM(int ppi, int pixels, double scale=1.0) -> int """ return _richtext.RichTextObject_ConvertPixelsToTenthsMM(*args) ConvertPixelsToTenthsMM = staticmethod(ConvertPixelsToTenthsMM) def DrawBoxAttributes(*args, **kwargs): """ DrawBoxAttributes(DC dc, RichTextBuffer buffer, RichTextAttr attr, Rect boxRect, int flags=0) -> bool """ return _richtext.RichTextObject_DrawBoxAttributes(*args, **kwargs) DrawBoxAttributes = staticmethod(DrawBoxAttributes) def DrawBorder(*args, **kwargs): """ DrawBorder(DC dc, RichTextBuffer buffer, TextAttrBorders attr, Rect rect, int flags=0) -> bool """ return _richtext.RichTextObject_DrawBorder(*args, **kwargs) DrawBorder = staticmethod(DrawBorder) def GetBoxRects(*args, **kwargs): """ GetBoxRects(DC dc, RichTextBuffer buffer, RichTextAttr attr, Rect marginRect, Rect borderRect, Rect contentRect, Rect paddingRect, Rect outlineRect) -> bool """ return _richtext.RichTextObject_GetBoxRects(*args, **kwargs) GetBoxRects = staticmethod(GetBoxRects) def GetTotalMargin(*args, **kwargs): """ GetTotalMargin(DC dc, RichTextBuffer buffer, RichTextAttr attr, int leftMargin, int rightMargin, int topMargin, int bottomMargin) -> bool """ return _richtext.RichTextObject_GetTotalMargin(*args, **kwargs) GetTotalMargin = staticmethod(GetTotalMargin) def AdjustAvailableSpace(*args, **kwargs): """ AdjustAvailableSpace(DC dc, RichTextBuffer buffer, RichTextAttr parentAttr, RichTextAttr childAttr, Rect availableParentSpace, Rect availableContainerSpace) -> Rect """ return _richtext.RichTextObject_AdjustAvailableSpace(*args, **kwargs) AdjustAvailableSpace = staticmethod(AdjustAvailableSpace) _richtext.RichTextObject_swigregister(RichTextObject) def RichTextObject_ConvertTenthsMMToPixels(*args, **kwargs): """RichTextObject_ConvertTenthsMMToPixels(int ppi, int units, double scale=1.0) -> int""" return _richtext.RichTextObject_ConvertTenthsMMToPixels(*args, **kwargs) def RichTextObject_ConvertPixelsToTenthsMM(*args): """ ConvertPixelsToTenthsMM(DC dc, int pixels) -> int RichTextObject_ConvertPixelsToTenthsMM(int ppi, int pixels, double scale=1.0) -> int """ return _richtext.RichTextObject_ConvertPixelsToTenthsMM(*args) def RichTextObject_DrawBoxAttributes(*args, **kwargs): """ RichTextObject_DrawBoxAttributes(DC dc, RichTextBuffer buffer, RichTextAttr attr, Rect boxRect, int flags=0) -> bool """ return _richtext.RichTextObject_DrawBoxAttributes(*args, **kwargs) def RichTextObject_DrawBorder(*args, **kwargs): """ RichTextObject_DrawBorder(DC dc, RichTextBuffer buffer, TextAttrBorders attr, Rect rect, int flags=0) -> bool """ return _richtext.RichTextObject_DrawBorder(*args, **kwargs) def RichTextObject_GetBoxRects(*args, **kwargs): """ RichTextObject_GetBoxRects(DC dc, RichTextBuffer buffer, RichTextAttr attr, Rect marginRect, Rect borderRect, Rect contentRect, Rect paddingRect, Rect outlineRect) -> bool """ return _richtext.RichTextObject_GetBoxRects(*args, **kwargs) def RichTextObject_GetTotalMargin(*args, **kwargs): """ RichTextObject_GetTotalMargin(DC dc, RichTextBuffer buffer, RichTextAttr attr, int leftMargin, int rightMargin, int topMargin, int bottomMargin) -> bool """ return _richtext.RichTextObject_GetTotalMargin(*args, **kwargs) def RichTextObject_AdjustAvailableSpace(*args, **kwargs): """ RichTextObject_AdjustAvailableSpace(DC dc, RichTextBuffer buffer, RichTextAttr parentAttr, RichTextAttr childAttr, Rect availableParentSpace, Rect availableContainerSpace) -> Rect """ return _richtext.RichTextObject_AdjustAvailableSpace(*args, **kwargs) class RichTextObjectList_iterator(object): """This class serves as an iterator for a wxRichTextObjectList object.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') def __init__(self): raise AttributeError, "No constructor defined" __repr__ = _swig_repr __swig_destroy__ = _richtext.delete_RichTextObjectList_iterator __del__ = lambda self : None; def next(*args, **kwargs): """next(self) -> RichTextObject""" return _richtext.RichTextObjectList_iterator_next(*args, **kwargs) _richtext.RichTextObjectList_iterator_swigregister(RichTextObjectList_iterator) class RichTextObjectList(object): """ This class wraps a wxList-based class and gives it a Python sequence-like interface. Sequence operations supported are length, index access and iteration. """ thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') def __init__(self): raise AttributeError, "No constructor defined" __repr__ = _swig_repr __swig_destroy__ = _richtext.delete_RichTextObjectList __del__ = lambda self : None; def __len__(*args, **kwargs): """__len__(self) -> size_t""" return _richtext.RichTextObjectList___len__(*args, **kwargs) def __getitem__(*args, **kwargs): """__getitem__(self, size_t index) -> RichTextObject""" return _richtext.RichTextObjectList___getitem__(*args, **kwargs) def __contains__(*args, **kwargs): """__contains__(self, RichTextObject obj) -> bool""" return _richtext.RichTextObjectList___contains__(*args, **kwargs) def __iter__(*args, **kwargs): """__iter__(self) -> RichTextObjectList_iterator""" return _richtext.RichTextObjectList___iter__(*args, **kwargs) def index(*args, **kwargs): """index(self, RichTextObject obj) -> int""" return _richtext.RichTextObjectList_index(*args, **kwargs) def __repr__(self): return "wxRichTextObjectList: " + repr(list(self)) _richtext.RichTextObjectList_swigregister(RichTextObjectList) class RichTextCompositeObject(RichTextObject): """Objects of this class can contain other rich text objects.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') def __init__(self): raise AttributeError, "No constructor defined" __repr__ = _swig_repr __swig_destroy__ = _richtext.delete_RichTextCompositeObject __del__ = lambda self : None; def GetChildren(*args, **kwargs): """GetChildren(self) -> RichTextObjectList""" return _richtext.RichTextCompositeObject_GetChildren(*args, **kwargs) def GetChildCount(*args, **kwargs): """GetChildCount(self) -> size_t""" return _richtext.RichTextCompositeObject_GetChildCount(*args, **kwargs) def GetChild(*args, **kwargs): """GetChild(self, size_t n) -> RichTextObject""" return _richtext.RichTextCompositeObject_GetChild(*args, **kwargs) def Copy(*args, **kwargs): """Copy(self, RichTextCompositeObject obj)""" return _richtext.RichTextCompositeObject_Copy(*args, **kwargs) def AppendChild(*args, **kwargs): """AppendChild(self, RichTextObject child) -> size_t""" return _richtext.RichTextCompositeObject_AppendChild(*args, **kwargs) def InsertChild(*args, **kwargs): """InsertChild(self, RichTextObject child, RichTextObject inFrontOf) -> bool""" return _richtext.RichTextCompositeObject_InsertChild(*args, **kwargs) def RemoveChild(self, child, deleteChild=False): val = _richtext.RichTextCompositeObject_RemoveChild(self, child, deleteChild) self.this.own(not deleteChild) return val def DeleteChildren(*args, **kwargs): """DeleteChildren(self) -> bool""" return _richtext.RichTextCompositeObject_DeleteChildren(*args, **kwargs) def Defragment(*args, **kwargs): """Defragment(self, RichTextDrawingContext context, RichTextRange range=wxRICHTEXT_ALL) -> bool""" return _richtext.RichTextCompositeObject_Defragment(*args, **kwargs) _richtext.RichTextCompositeObject_swigregister(RichTextCompositeObject) class RichTextParagraphLayoutBox(RichTextCompositeObject): """This box knows how to lay out paragraphs.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): """ __init__(self, RichTextObject parent=None) -> RichTextParagraphLayoutBox __init__(self, RichTextParagraphLayoutBox obj) -> RichTextParagraphLayoutBox This box knows how to lay out paragraphs. """ _richtext.RichTextParagraphLayoutBox_swiginit(self,_richtext.new_RichTextParagraphLayoutBox(*args)) __swig_destroy__ = _richtext.delete_RichTextParagraphLayoutBox __del__ = lambda self : None; def SetRichTextCtrl(*args, **kwargs): """SetRichTextCtrl(self, RichTextCtrl ctrl)""" return _richtext.RichTextParagraphLayoutBox_SetRichTextCtrl(*args, **kwargs) def GetRichTextCtrl(*args, **kwargs): """GetRichTextCtrl(self) -> RichTextCtrl""" return _richtext.RichTextParagraphLayoutBox_GetRichTextCtrl(*args, **kwargs) def SetPartialParagraph(*args, **kwargs): """SetPartialParagraph(self, bool partialPara)""" return _richtext.RichTextParagraphLayoutBox_SetPartialParagraph(*args, **kwargs) def GetPartialParagraph(*args, **kwargs): """GetPartialParagraph(self) -> bool""" return _richtext.RichTextParagraphLayoutBox_GetPartialParagraph(*args, **kwargs) def GetStyleSheet(*args, **kwargs): """GetStyleSheet(self) -> wxRichTextStyleSheet""" return _richtext.RichTextParagraphLayoutBox_GetStyleSheet(*args, **kwargs) def DrawFloats(*args, **kwargs): """ DrawFloats(self, DC dc, RichTextDrawingContext context, RichTextRange range, wxRichTextSelection selection, Rect rect, int descent, int style) """ return _richtext.RichTextParagraphLayoutBox_DrawFloats(*args, **kwargs) def MoveAnchoredObjectToParagraph(*args, **kwargs): """MoveAnchoredObjectToParagraph(self, RichTextParagraph from, RichTextParagraph to, RichTextObject obj)""" return _richtext.RichTextParagraphLayoutBox_MoveAnchoredObjectToParagraph(*args, **kwargs) def Init(*args, **kwargs): """Init(self)""" return _richtext.RichTextParagraphLayoutBox_Init(*args, **kwargs) def Clear(*args, **kwargs): """Clear(self)""" return _richtext.RichTextParagraphLayoutBox_Clear(*args, **kwargs) def Reset(*args, **kwargs): """Reset(self)""" return _richtext.RichTextParagraphLayoutBox_Reset(*args, **kwargs) def AddParagraph(*args, **kwargs): """AddParagraph(self, String text, RichTextAttr paraStyle=None) -> RichTextRange""" return _richtext.RichTextParagraphLayoutBox_AddParagraph(*args, **kwargs) def AddImage(*args, **kwargs): """AddImage(self, Image image, RichTextAttr paraStyle=None) -> RichTextRange""" return _richtext.RichTextParagraphLayoutBox_AddImage(*args, **kwargs) def AddParagraphs(*args, **kwargs): """AddParagraphs(self, String text, RichTextAttr paraStyle=None) -> RichTextRange""" return _richtext.RichTextParagraphLayoutBox_AddParagraphs(*args, **kwargs) def GetLineAtPosition(*args, **kwargs): """GetLineAtPosition(self, long pos, bool caretPosition=False) -> RichTextLine""" return _richtext.RichTextParagraphLayoutBox_GetLineAtPosition(*args, **kwargs) def GetLineAtYPosition(*args, **kwargs): """GetLineAtYPosition(self, int y) -> RichTextLine""" return _richtext.RichTextParagraphLayoutBox_GetLineAtYPosition(*args, **kwargs) def GetParagraphAtPosition(*args, **kwargs): """GetParagraphAtPosition(self, long pos, bool caretPosition=False) -> RichTextParagraph""" return _richtext.RichTextParagraphLayoutBox_GetParagraphAtPosition(*args, **kwargs) def GetLineSizeAtPosition(*args, **kwargs): """GetLineSizeAtPosition(self, long pos, bool caretPosition=False) -> Size""" return _richtext.RichTextParagraphLayoutBox_GetLineSizeAtPosition(*args, **kwargs) def GetVisibleLineNumber(*args, **kwargs): """GetVisibleLineNumber(self, long pos, bool caretPosition=False, bool startOfLine=False) -> long""" return _richtext.RichTextParagraphLayoutBox_GetVisibleLineNumber(*args, **kwargs) def GetLineForVisibleLineNumber(*args, **kwargs): """GetLineForVisibleLineNumber(self, long lineNumber) -> RichTextLine""" return _richtext.RichTextParagraphLayoutBox_GetLineForVisibleLineNumber(*args, **kwargs) def GetLeafObjectAtPosition(*args, **kwargs): """GetLeafObjectAtPosition(self, long position) -> RichTextObject""" return _richtext.RichTextParagraphLayoutBox_GetLeafObjectAtPosition(*args, **kwargs) def GetParagraphAtLine(*args, **kwargs): """GetParagraphAtLine(self, long paragraphNumber) -> RichTextParagraph""" return _richtext.RichTextParagraphLayoutBox_GetParagraphAtLine(*args, **kwargs) def GetParagraphForLine(*args, **kwargs): """GetParagraphForLine(self, RichTextLine line) -> RichTextParagraph""" return _richtext.RichTextParagraphLayoutBox_GetParagraphForLine(*args, **kwargs) def GetParagraphLength(*args, **kwargs): """GetParagraphLength(self, long paragraphNumber) -> int""" return _richtext.RichTextParagraphLayoutBox_GetParagraphLength(*args, **kwargs) def GetParagraphCount(*args, **kwargs): """GetParagraphCount(self) -> int""" return _richtext.RichTextParagraphLayoutBox_GetParagraphCount(*args, **kwargs) def GetLineCount(*args, **kwargs): """GetLineCount(self) -> int""" return _richtext.RichTextParagraphLayoutBox_GetLineCount(*args, **kwargs) def GetParagraphText(*args, **kwargs): """GetParagraphText(self, long paragraphNumber) -> String""" return _richtext.RichTextParagraphLayoutBox_GetParagraphText(*args, **kwargs) def XYToPosition(*args, **kwargs): """XYToPosition(self, long x, long y) -> long""" return _richtext.RichTextParagraphLayoutBox_XYToPosition(*args, **kwargs) def PositionToXY(*args, **kwargs): """PositionToXY(self, long pos, long x, long y) -> bool""" return _richtext.RichTextParagraphLayoutBox_PositionToXY(*args, **kwargs) def SetStyle(*args, **kwargs): """SetStyle(self, RichTextRange range, RichTextAttr style, int flags=RICHTEXT_SETSTYLE_WITH_UNDO) -> bool""" return _richtext.RichTextParagraphLayoutBox_SetStyle(*args, **kwargs) def GetStyle(*args, **kwargs): """GetStyle(self, long position, RichTextAttr style) -> bool""" return _richtext.RichTextParagraphLayoutBox_GetStyle(*args, **kwargs) def GetUncombinedStyle(*args, **kwargs): """GetUncombinedStyle(self, long position, RichTextAttr style) -> bool""" return _richtext.RichTextParagraphLayoutBox_GetUncombinedStyle(*args, **kwargs) def DoGetStyle(*args, **kwargs): """DoGetStyle(self, long position, RichTextAttr style, bool combineStyles=True) -> bool""" return _richtext.RichTextParagraphLayoutBox_DoGetStyle(*args, **kwargs) def GetStyleForRange(*args, **kwargs): """GetStyleForRange(self, RichTextRange range, RichTextAttr style) -> bool""" return _richtext.RichTextParagraphLayoutBox_GetStyleForRange(*args, **kwargs) def CollectStyle(*args, **kwargs): """ CollectStyle(self, RichTextAttr currentStyle, RichTextAttr style, RichTextAttr clashingAttr, RichTextAttr absentAttr) -> bool """ return _richtext.RichTextParagraphLayoutBox_CollectStyle(*args, **kwargs) def SetListStyle(*args): """ SetListStyle(self, RichTextRange range, wxRichTextListStyleDefinition def, int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int startFrom=1, int specifiedLevel=-1) -> bool SetListStyle(self, RichTextRange range, String defName, int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int startFrom=1, int specifiedLevel=-1) -> bool """ return _richtext.RichTextParagraphLayoutBox_SetListStyle(*args) def ClearListStyle(*args, **kwargs): """ClearListStyle(self, RichTextRange range, int flags=RICHTEXT_SETSTYLE_WITH_UNDO) -> bool""" return _richtext.RichTextParagraphLayoutBox_ClearListStyle(*args, **kwargs) def NumberList(*args): """ NumberList(self, RichTextRange range, wxRichTextListStyleDefinition def=None, int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int startFrom=1, int specifiedLevel=-1) -> bool NumberList(self, RichTextRange range, String defName, int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int startFrom=1, int specifiedLevel=-1) -> bool """ return _richtext.RichTextParagraphLayoutBox_NumberList(*args) def PromoteList(*args): """ PromoteList(self, int promoteBy, RichTextRange range, wxRichTextListStyleDefinition def=None, int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int specifiedLevel=-1) -> bool PromoteList(self, int promoteBy, RichTextRange range, String defName, int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int specifiedLevel=-1) -> bool """ return _richtext.RichTextParagraphLayoutBox_PromoteList(*args) def DoNumberList(*args, **kwargs): """ DoNumberList(self, RichTextRange range, RichTextRange promotionRange, int promoteBy, wxRichTextListStyleDefinition def, int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int startFrom=1, int specifiedLevel=-1) -> bool """ return _richtext.RichTextParagraphLayoutBox_DoNumberList(*args, **kwargs) def FindNextParagraphNumber(*args, **kwargs): """FindNextParagraphNumber(self, RichTextParagraph previousParagraph, RichTextAttr attr) -> bool""" return _richtext.RichTextParagraphLayoutBox_FindNextParagraphNumber(*args, **kwargs) def HasCharacterAttributes(*args, **kwargs): """HasCharacterAttributes(self, RichTextRange range, RichTextAttr style) -> bool""" return _richtext.RichTextParagraphLayoutBox_HasCharacterAttributes(*args, **kwargs) def HasParagraphAttributes(*args, **kwargs): """HasParagraphAttributes(self, RichTextRange range, RichTextAttr style) -> bool""" return _richtext.RichTextParagraphLayoutBox_HasParagraphAttributes(*args, **kwargs) def InsertFragment(*args, **kwargs): """InsertFragment(self, long position, RichTextParagraphLayoutBox fragment) -> bool""" return _richtext.RichTextParagraphLayoutBox_InsertFragment(*args, **kwargs) def CopyFragment(*args, **kwargs): """CopyFragment(self, RichTextRange range, RichTextParagraphLayoutBox fragment) -> bool""" return _richtext.RichTextParagraphLayoutBox_CopyFragment(*args, **kwargs) def ApplyStyleSheet(*args, **kwargs): """ApplyStyleSheet(self, wxRichTextStyleSheet styleSheet) -> bool""" return _richtext.RichTextParagraphLayoutBox_ApplyStyleSheet(*args, **kwargs) def Copy(*args, **kwargs): """Copy(self, RichTextParagraphLayoutBox obj)""" return _richtext.RichTextParagraphLayoutBox_Copy(*args, **kwargs) def UpdateRanges(*args, **kwargs): """UpdateRanges(self)""" return _richtext.RichTextParagraphLayoutBox_UpdateRanges(*args, **kwargs) def GetText(*args, **kwargs): """GetText(self) -> String""" return _richtext.RichTextParagraphLayoutBox_GetText(*args, **kwargs) def SetDefaultStyle(*args, **kwargs): """SetDefaultStyle(self, RichTextAttr style) -> bool""" return _richtext.RichTextParagraphLayoutBox_SetDefaultStyle(*args, **kwargs) def GetDefaultStyle(*args, **kwargs): """GetDefaultStyle(self) -> RichTextAttr""" return _richtext.RichTextParagraphLayoutBox_GetDefaultStyle(*args, **kwargs) def SetBasicStyle(*args, **kwargs): """SetBasicStyle(self, RichTextAttr style)""" return _richtext.RichTextParagraphLayoutBox_SetBasicStyle(*args, **kwargs) def GetBasicStyle(*args, **kwargs): """GetBasicStyle(self) -> RichTextAttr""" return _richtext.RichTextParagraphLayoutBox_GetBasicStyle(*args, **kwargs) def Invalidate(*args, **kwargs): """Invalidate(self, RichTextRange invalidRange=wxRICHTEXT_ALL)""" return _richtext.RichTextParagraphLayoutBox_Invalidate(*args, **kwargs) def UpdateFloatingObjects(*args, **kwargs): """UpdateFloatingObjects(self, Rect availableRect, RichTextObject untilObj=None) -> bool""" return _richtext.RichTextParagraphLayoutBox_UpdateFloatingObjects(*args, **kwargs) def GetInvalidRange(*args, **kwargs): """GetInvalidRange(self, bool wholeParagraphs=False) -> RichTextRange""" return _richtext.RichTextParagraphLayoutBox_GetInvalidRange(*args, **kwargs) def GetFloatCollector(*args, **kwargs): """GetFloatCollector(self) -> wxRichTextFloatCollector""" return _richtext.RichTextParagraphLayoutBox_GetFloatCollector(*args, **kwargs) _richtext.RichTextParagraphLayoutBox_swigregister(RichTextParagraphLayoutBox) class RichTextBox(RichTextCompositeObject): """Proxy of C++ RichTextBox class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): """ __init__(self, RichTextObject parent=None) -> RichTextBox __init__(self, RichTextBox obj) -> RichTextBox """ _richtext.RichTextBox_swiginit(self,_richtext.new_RichTextBox(*args)) def Copy(*args, **kwargs): """Copy(self, RichTextBox obj)""" return _richtext.RichTextBox_Copy(*args, **kwargs) _richtext.RichTextBox_swigregister(RichTextBox) class RichTextLine(object): """ This object represents a line in a paragraph, and stores offsets from the start of the paragraph representing the start and end positions of the line. """ thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """ __init__(self, RichTextParagraph parent) -> RichTextLine This object represents a line in a paragraph, and stores offsets from the start of the paragraph representing the start and end positions of the line. """ _richtext.RichTextLine_swiginit(self,_richtext.new_RichTextLine(*args, **kwargs)) __swig_destroy__ = _richtext.delete_RichTextLine __del__ = lambda self : None; def SetRange(*args, **kwargs): """SetRange(self, RichTextRange range)""" return _richtext.RichTextLine_SetRange(*args, **kwargs) def GetParent(*args, **kwargs): """GetParent(self) -> RichTextParagraph""" return _richtext.RichTextLine_GetParent(*args, **kwargs) def GetRange(*args, **kwargs): """GetRange(self) -> RichTextRange""" return _richtext.RichTextLine_GetRange(*args, **kwargs) def GetAbsoluteRange(*args, **kwargs): """GetAbsoluteRange(self) -> RichTextRange""" return _richtext.RichTextLine_GetAbsoluteRange(*args, **kwargs) def GetSize(*args, **kwargs): """GetSize(self) -> Size""" return _richtext.RichTextLine_GetSize(*args, **kwargs) def SetSize(*args, **kwargs): """SetSize(self, Size sz)""" return _richtext.RichTextLine_SetSize(*args, **kwargs) def GetPosition(*args, **kwargs): """GetPosition(self) -> Point""" return _richtext.RichTextLine_GetPosition(*args, **kwargs) def SetPosition(*args, **kwargs): """SetPosition(self, Point pos)""" return _richtext.RichTextLine_SetPosition(*args, **kwargs) def GetAbsolutePosition(*args, **kwargs): """GetAbsolutePosition(self) -> Point""" return _richtext.RichTextLine_GetAbsolutePosition(*args, **kwargs) def GetRect(*args, **kwargs): """GetRect(self) -> Rect""" return _richtext.RichTextLine_GetRect(*args, **kwargs) def SetDescent(*args, **kwargs): """SetDescent(self, int descent)""" return _richtext.RichTextLine_SetDescent(*args, **kwargs) def GetDescent(*args, **kwargs): """GetDescent(self) -> int""" return _richtext.RichTextLine_GetDescent(*args, **kwargs) def Init(*args, **kwargs): """Init(self, RichTextParagraph parent)""" return _richtext.RichTextLine_Init(*args, **kwargs) def Copy(*args, **kwargs): """Copy(self, RichTextLine obj)""" return _richtext.RichTextLine_Copy(*args, **kwargs) def Clone(*args, **kwargs): """Clone(self) -> RichTextLine""" return _richtext.RichTextLine_Clone(*args, **kwargs) _richtext.RichTextLine_swigregister(RichTextLine) class RichTextParagraph(RichTextBox): """ This object represents a single paragraph (or in a straight text editor, a line). """ thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """ __init__(self, String text, RichTextObject parent=None, RichTextAttr paraStyle=None, RichTextAttr charStyle=None) -> RichTextParagraph This object represents a single paragraph (or in a straight text editor, a line). """ _richtext.RichTextParagraph_swiginit(self,_richtext.new_RichTextParagraph(*args, **kwargs)) __swig_destroy__ = _richtext.delete_RichTextParagraph __del__ = lambda self : None; def GetLines(*args, **kwargs): """GetLines(self) -> wxRichTextLineList""" return _richtext.RichTextParagraph_GetLines(*args, **kwargs) def Copy(*args, **kwargs): """Copy(self, RichTextParagraph obj)""" return _richtext.RichTextParagraph_Copy(*args, **kwargs) def ClearLines(*args, **kwargs): """ClearLines(self)""" return _richtext.RichTextParagraph_ClearLines(*args, **kwargs) def ApplyParagraphStyle(*args, **kwargs): """ApplyParagraphStyle(self, RichTextLine line, RichTextAttr attr, Rect rect, DC dc)""" return _richtext.RichTextParagraph_ApplyParagraphStyle(*args, **kwargs) def InsertText(*args, **kwargs): """InsertText(self, long pos, String text) -> bool""" return _richtext.RichTextParagraph_InsertText(*args, **kwargs) def SplitAt(*args, **kwargs): """SplitAt(self, long pos, RichTextObject previousObject=None) -> RichTextObject""" return _richtext.RichTextParagraph_SplitAt(*args, **kwargs) def MoveToList(*args, **kwargs): """MoveToList(self, RichTextObject obj, wxList list)""" return _richtext.RichTextParagraph_MoveToList(*args, **kwargs) def MoveFromList(*args, **kwargs): """MoveFromList(self, wxList list)""" return _richtext.RichTextParagraph_MoveFromList(*args, **kwargs) def GetContiguousPlainText(*args, **kwargs): """GetContiguousPlainText(self, String text, RichTextRange range, bool fromStart=True) -> bool""" return _richtext.RichTextParagraph_GetContiguousPlainText(*args, **kwargs) def FindWrapPosition(*args, **kwargs): """ FindWrapPosition(self, RichTextRange range, DC dc, RichTextDrawingContext context, int availableSpace, long wrapPosition, wxArrayInt partialExtents) -> bool """ return _richtext.RichTextParagraph_FindWrapPosition(*args, **kwargs) def FindObjectAtPosition(*args, **kwargs): """FindObjectAtPosition(self, long position) -> RichTextObject""" return _richtext.RichTextParagraph_FindObjectAtPosition(*args, **kwargs) def GetBulletText(*args, **kwargs): """GetBulletText(self) -> String""" return _richtext.RichTextParagraph_GetBulletText(*args, **kwargs) def AllocateLine(*args, **kwargs): """AllocateLine(self, int pos) -> RichTextLine""" return _richtext.RichTextParagraph_AllocateLine(*args, **kwargs) def ClearUnusedLines(*args, **kwargs): """ClearUnusedLines(self, int lineCount) -> bool""" return _richtext.RichTextParagraph_ClearUnusedLines(*args, **kwargs) def GetCombinedAttributes(*args, **kwargs): """GetCombinedAttributes(self, RichTextAttr contentStyle=None) -> RichTextAttr""" return _richtext.RichTextParagraph_GetCombinedAttributes(*args, **kwargs) def GetFirstLineBreakPosition(*args, **kwargs): """GetFirstLineBreakPosition(self, long pos) -> long""" return _richtext.RichTextParagraph_GetFirstLineBreakPosition(*args, **kwargs) def InitDefaultTabs(*args, **kwargs): """InitDefaultTabs()""" return _richtext.RichTextParagraph_InitDefaultTabs(*args, **kwargs) InitDefaultTabs = staticmethod(InitDefaultTabs) def ClearDefaultTabs(*args, **kwargs): """ClearDefaultTabs()""" return _richtext.RichTextParagraph_ClearDefaultTabs(*args, **kwargs) ClearDefaultTabs = staticmethod(ClearDefaultTabs) def GetDefaultTabs(*args, **kwargs): """GetDefaultTabs() -> wxArrayInt""" return _richtext.RichTextParagraph_GetDefaultTabs(*args, **kwargs) GetDefaultTabs = staticmethod(GetDefaultTabs) _richtext.RichTextParagraph_swigregister(RichTextParagraph) def RichTextParagraph_InitDefaultTabs(*args): """RichTextParagraph_InitDefaultTabs()""" return _richtext.RichTextParagraph_InitDefaultTabs(*args) def RichTextParagraph_ClearDefaultTabs(*args): """RichTextParagraph_ClearDefaultTabs()""" return _richtext.RichTextParagraph_ClearDefaultTabs(*args) def RichTextParagraph_GetDefaultTabs(*args): """RichTextParagraph_GetDefaultTabs() -> wxArrayInt""" return _richtext.RichTextParagraph_GetDefaultTabs(*args) class RichTextPlainText(RichTextObject): """This object represents a single piece of text.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """ __init__(self, String text=wxEmptyString, RichTextObject parent=None, RichTextAttr style=None) -> RichTextPlainText This object represents a single piece of text. """ _richtext.RichTextPlainText_swiginit(self,_richtext.new_RichTextPlainText(*args, **kwargs)) def GetFirstLineBreakPosition(*args, **kwargs): """GetFirstLineBreakPosition(self, long pos) -> long""" return _richtext.RichTextPlainText_GetFirstLineBreakPosition(*args, **kwargs) def GetText(*args, **kwargs): """GetText(self) -> String""" return _richtext.RichTextPlainText_GetText(*args, **kwargs) def SetText(*args, **kwargs): """SetText(self, String text)""" return _richtext.RichTextPlainText_SetText(*args, **kwargs) def Copy(*args, **kwargs): """Copy(self, RichTextPlainText obj)""" return _richtext.RichTextPlainText_Copy(*args, **kwargs) _richtext.RichTextPlainText_swigregister(RichTextPlainText) class RichTextImage(RichTextObject): """This object represents an image.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): """ __init__(self, RichTextObject parent=None) -> RichTextImage __init__(self, Image image, RichTextObject parent=None, RichTextAttr charStyle=None) -> RichTextImage __init__(self, wxRichTextImageBlock imageBlock, RichTextObject parent=None, RichTextAttr charStyle=None) -> RichTextImage __init__(self, RichTextImage obj) -> RichTextImage This object represents an image. """ _richtext.RichTextImage_swiginit(self,_richtext.new_RichTextImage(*args)) def GetImageCache(*args, **kwargs): """GetImageCache(self) -> Bitmap""" return _richtext.RichTextImage_GetImageCache(*args, **kwargs) def SetImageCache(*args, **kwargs): """SetImageCache(self, Bitmap bitmap)""" return _richtext.RichTextImage_SetImageCache(*args, **kwargs) def ResetImageCache(*args, **kwargs): """ResetImageCache(self)""" return _richtext.RichTextImage_ResetImageCache(*args, **kwargs) def GetImageBlock(*args, **kwargs): """GetImageBlock(self) -> wxRichTextImageBlock""" return _richtext.RichTextImage_GetImageBlock(*args, **kwargs) def Copy(*args, **kwargs): """Copy(self, RichTextImage obj)""" return _richtext.RichTextImage_Copy(*args, **kwargs) def LoadImageCache(*args, **kwargs): """LoadImageCache(self, DC dc, bool resetCache=False) -> bool""" return _richtext.RichTextImage_LoadImageCache(*args, **kwargs) _richtext.RichTextImage_swigregister(RichTextImage) class RichTextFileHandlerList_iterator(object): """This class serves as an iterator for a wxRichTextFileHandlerList object.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') def __init__(self): raise AttributeError, "No constructor defined" __repr__ = _swig_repr __swig_destroy__ = _richtext.delete_RichTextFileHandlerList_iterator __del__ = lambda self : None; def next(*args, **kwargs): """next(self) -> RichTextFileHandler""" return _richtext.RichTextFileHandlerList_iterator_next(*args, **kwargs) _richtext.RichTextFileHandlerList_iterator_swigregister(RichTextFileHandlerList_iterator) class RichTextFileHandlerList(object): """ This class wraps a wxList-based class and gives it a Python sequence-like interface. Sequence operations supported are length, index access and iteration. """ thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') def __init__(self): raise AttributeError, "No constructor defined" __repr__ = _swig_repr __swig_destroy__ = _richtext.delete_RichTextFileHandlerList __del__ = lambda self : None; def __len__(*args, **kwargs): """__len__(self) -> size_t""" return _richtext.RichTextFileHandlerList___len__(*args, **kwargs) def __getitem__(*args, **kwargs): """__getitem__(self, size_t index) -> RichTextFileHandler""" return _richtext.RichTextFileHandlerList___getitem__(*args, **kwargs) def __contains__(*args, **kwargs): """__contains__(self, RichTextFileHandler obj) -> bool""" return _richtext.RichTextFileHandlerList___contains__(*args, **kwargs) def __iter__(*args, **kwargs): """__iter__(self) -> RichTextFileHandlerList_iterator""" return _richtext.RichTextFileHandlerList___iter__(*args, **kwargs) def __repr__(self): return "wxRichTextFileHandlerList: " + repr(list(self)) _richtext.RichTextFileHandlerList_swigregister(RichTextFileHandlerList) class RichTextBuffer(RichTextParagraphLayoutBox): """This is a kind of box, used to represent the whole buffer.""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """ __init__(self) -> RichTextBuffer This is a kind of box, used to represent the whole buffer. """ _richtext.RichTextBuffer_swiginit(self,_richtext.new_RichTextBuffer(*args, **kwargs)) __swig_destroy__ = _richtext.delete_RichTextBuffer __del__ = lambda self : None; def GetCommandProcessor(*args, **kwargs): """GetCommandProcessor(self) -> wxCommandProcessor""" return _richtext.RichTextBuffer_GetCommandProcessor(*args, **kwargs) def SetStyleSheet(*args, **kwargs): """SetStyleSheet(self, wxRichTextStyleSheet styleSheet)""" return _richtext.RichTextBuffer_SetStyleSheet(*args, **kwargs) def SetStyleSheetAndNotify(*args, **kwargs): """SetStyleSheetAndNotify(self, wxRichTextStyleSheet sheet) -> bool""" return _richtext.RichTextBuffer_SetStyleSheetAndNotify(*args, **kwargs) def PushStyleSheet(*args, **kwargs): """PushStyleSheet(self, wxRichTextStyleSheet styleSheet) -> bool""" return _richtext.RichTextBuffer_PushStyleSheet(*args, **kwargs) def PopStyleSheet(*args, **kwargs): """PopStyleSheet(self) -> wxRichTextStyleSheet""" return _richtext.RichTextBuffer_PopStyleSheet(*args, **kwargs) def GetFontTable(*args, **kwargs): """GetFontTable(self) -> RichTextFontTable""" return _richtext.RichTextBuffer_GetFontTable(*args, **kwargs) def SetFontTable(*args, **kwargs): """SetFontTable(self, RichTextFontTable table)""" return _richtext.RichTextBuffer_SetFontTable(*args, **kwargs) def Init(*args, **kwargs): """Init(self)""" return _richtext.RichTextBuffer_Init(*args, **kwargs) def ResetAndClearCommands(*args, **kwargs): """ResetAndClearCommands(self)""" return _richtext.RichTextBuffer_ResetAndClearCommands(*args, **kwargs) def LoadFile(*args, **kwargs): """LoadFile(self, String filename, int type=RICHTEXT_TYPE_ANY) -> bool""" return _richtext.RichTextBuffer_LoadFile(*args, **kwargs) def SaveFile(*args, **kwargs): """SaveFile(self, String filename, int type=RICHTEXT_TYPE_ANY) -> bool""" return _richtext.RichTextBuffer_SaveFile(*args, **kwargs) def LoadStream(*args, **kwargs): """LoadStream(self, InputStream stream, int type=RICHTEXT_TYPE_ANY) -> bool""" return _richtext.RichTextBuffer_LoadStream(*args, **kwargs) def SaveStream(*args, **kwargs): """SaveStream(self, wxOutputStream stream, int type=RICHTEXT_TYPE_ANY) -> bool""" return _richtext.RichTextBuffer_SaveStream(*args, **kwargs) def SetHandlerFlags(*args, **kwargs): """SetHandlerFlags(self, int flags)""" return _richtext.RichTextBuffer_SetHandlerFlags(*args, **kwargs) def GetHandlerFlags(*args, **kwargs): """GetHandlerFlags(self) -> int""" return _richtext.RichTextBuffer_GetHandlerFlags(*args, **kwargs) def BeginBatchUndo(*args, **kwargs): """BeginBatchUndo(self, String cmdName) -> bool""" return _richtext.RichTextBuffer_BeginBatchUndo(*args, **kwargs) def EndBatchUndo(*args, **kwargs): """EndBatchUndo(self) -> bool""" return _richtext.RichTextBuffer_EndBatchUndo(*args, **kwargs) def BatchingUndo(*args, **kwargs): """BatchingUndo(self) -> bool""" return _richtext.RichTextBuffer_BatchingUndo(*args, **kwargs) def SubmitAction(*args, **kwargs): """SubmitAction(self, RichTextAction action) -> bool""" return _richtext.RichTextBuffer_SubmitAction(*args, **kwargs) def GetBatchedCommand(*args, **kwargs): """GetBatchedCommand(self) -> RichTextCommand""" return _richtext.RichTextBuffer_GetBatchedCommand(*args, **kwargs) def BeginSuppressUndo(*args, **kwargs): """BeginSuppressUndo(self) -> bool""" return _richtext.RichTextBuffer_BeginSuppressUndo(*args, **kwargs) def EndSuppressUndo(*args, **kwargs): """EndSuppressUndo(self) -> bool""" return _richtext.RichTextBuffer_EndSuppressUndo(*args, **kwargs) def SuppressingUndo(*args, **kwargs): """SuppressingUndo(self) -> bool""" return _richtext.RichTextBuffer_SuppressingUndo(*args, **kwargs) def CopyToClipboard(*args, **kwargs): """CopyToClipboard(self, RichTextRange range) -> bool""" return _richtext.RichTextBuffer_CopyToClipboard(*args, **kwargs) def PasteFromClipboard(*args, **kwargs): """PasteFromClipboard(self, long position) -> bool""" return _richtext.RichTextBuffer_PasteFromClipboard(*args, **kwargs) def CanPasteFromClipboard(*args, **kwargs): """CanPasteFromClipboard(self) -> bool""" return _richtext.RichTextBuffer_CanPasteFromClipboard(*args, **kwargs) def BeginStyle(*args, **kwargs): """BeginStyle(self, RichTextAttr style) -> bool""" return _richtext.RichTextBuffer_BeginStyle(*args, **kwargs) def EndStyle(*args, **kwargs): """EndStyle(self) -> bool""" return _richtext.RichTextBuffer_EndStyle(*args, **kwargs) def EndAllStyles(*args, **kwargs): """EndAllStyles(self) -> bool""" return _richtext.RichTextBuffer_EndAllStyles(*args, **kwargs) def ClearStyleStack(*args, **kwargs): """ClearStyleStack(self)""" return _richtext.RichTextBuffer_ClearStyleStack(*args, **kwargs) def GetStyleStackSize(*args, **kwargs): """GetStyleStackSize(self) -> size_t""" return _richtext.RichTextBuffer_GetStyleStackSize(*args, **kwargs) def BeginBold(*args, **kwargs): """BeginBold(self) -> bool""" return _richtext.RichTextBuffer_BeginBold(*args, **kwargs) def EndBold(*args, **kwargs): """EndBold(self) -> bool""" return _richtext.RichTextBuffer_EndBold(*args, **kwargs) def BeginItalic(*args, **kwargs): """BeginItalic(self) -> bool""" return _richtext.RichTextBuffer_BeginItalic(*args, **kwargs) def EndItalic(*args, **kwargs): """EndItalic(self) -> bool""" return _richtext.RichTextBuffer_EndItalic(*args, **kwargs) def BeginUnderline(*args, **kwargs): """BeginUnderline(self) -> bool""" return _richtext.RichTextBuffer_BeginUnderline(*args, **kwargs) def EndUnderline(*args, **kwargs): """EndUnderline(self) -> bool""" return _richtext.RichTextBuffer_EndUnderline(*args, **kwargs) def BeginFontSize(*args, **kwargs): """BeginFontSize(self, int pointSize) -> bool""" return _richtext.RichTextBuffer_BeginFontSize(*args, **kwargs) def EndFontSize(*args, **kwargs): """EndFontSize(self) -> bool""" return _richtext.RichTextBuffer_EndFontSize(*args, **kwargs) def BeginFont(*args, **kwargs): """BeginFont(self, Font font) -> bool""" return _richtext.RichTextBuffer_BeginFont(*args, **kwargs) def EndFont(*args, **kwargs): """EndFont(self) -> bool""" return _richtext.RichTextBuffer_EndFont(*args, **kwargs) def BeginTextColour(*args, **kwargs): """BeginTextColour(self, Colour colour) -> bool""" return _richtext.RichTextBuffer_BeginTextColour(*args, **kwargs) def EndTextColour(*args, **kwargs): """EndTextColour(self) -> bool""" return _richtext.RichTextBuffer_EndTextColour(*args, **kwargs) def BeginAlignment(*args, **kwargs): """BeginAlignment(self, int alignment) -> bool""" return _richtext.RichTextBuffer_BeginAlignment(*args, **kwargs) def EndAlignment(*args, **kwargs): """EndAlignment(self) -> bool""" return _richtext.RichTextBuffer_EndAlignment(*args, **kwargs) def BeginLeftIndent(*args, **kwargs): """BeginLeftIndent(self, int leftIndent, int leftSubIndent=0) -> bool""" return _richtext.RichTextBuffer_BeginLeftIndent(*args, **kwargs) def EndLeftIndent(*args, **kwargs): """EndLeftIndent(self) -> bool""" return _richtext.RichTextBuffer_EndLeftIndent(*args, **kwargs) def BeginRightIndent(*args, **kwargs): """BeginRightIndent(self, int rightIndent) -> bool""" return _richtext.RichTextBuffer_BeginRightIndent(*args, **kwargs) def EndRightIndent(*args, **kwargs): """EndRightIndent(self) -> bool""" return _richtext.RichTextBuffer_EndRightIndent(*args, **kwargs) def BeginParagraphSpacing(*args, **kwargs): """BeginParagraphSpacing(self, int before, int after) -> bool""" return _richtext.RichTextBuffer_BeginParagraphSpacing(*args, **kwargs) def EndParagraphSpacing(*args, **kwargs): """EndParagraphSpacing(self) -> bool""" return _richtext.RichTextBuffer_EndParagraphSpacing(*args, **kwargs) def BeginLineSpacing(*args, **kwargs): """BeginLineSpacing(self, int lineSpacing) -> bool""" return _richtext.RichTextBuffer_BeginLineSpacing(*args, **kwargs) def EndLineSpacing(*args, **kwargs): """EndLineSpacing(self) -> bool""" return _richtext.RichTextBuffer_EndLineSpacing(*args, **kwargs) def BeginNumberedBullet(*args, **kwargs): """ BeginNumberedBullet(self, int bulletNumber, int leftIndent, int leftSubIndent, int bulletStyle=wxTEXT_ATTR_BULLET_STYLE_ARABIC|wxTEXT_ATTR_BULLET_STYLE_PERIOD) -> bool """ return _richtext.RichTextBuffer_BeginNumberedBullet(*args, **kwargs) def EndNumberedBullet(*args, **kwargs): """EndNumberedBullet(self) -> bool""" return _richtext.RichTextBuffer_EndNumberedBullet(*args, **kwargs) def BeginSymbolBullet(*args, **kwargs): """BeginSymbolBullet(self, String symbol, int leftIndent, int leftSubIndent, int bulletStyle=TEXT_ATTR_BULLET_STYLE_SYMBOL) -> bool""" return _richtext.RichTextBuffer_BeginSymbolBullet(*args, **kwargs) def EndSymbolBullet(*args, **kwargs): """EndSymbolBullet(self) -> bool""" return _richtext.RichTextBuffer_EndSymbolBullet(*args, **kwargs) def BeginStandardBullet(*args, **kwargs): """ BeginStandardBullet(self, String bulletName, int leftIndent, int leftSubIndent, int bulletStyle=TEXT_ATTR_BULLET_STYLE_STANDARD) -> bool """ return _richtext.RichTextBuffer_BeginStandardBullet(*args, **kwargs) def EndStandardBullet(*args, **kwargs): """EndStandardBullet(self) -> bool""" return _richtext.RichTextBuffer_EndStandardBullet(*args, **kwargs) def BeginCharacterStyle(*args, **kwargs): """BeginCharacterStyle(self, String characterStyle) -> bool""" return _richtext.RichTextBuffer_BeginCharacterStyle(*args, **kwargs) def EndCharacterStyle(*args, **kwargs): """EndCharacterStyle(self) -> bool""" return _richtext.RichTextBuffer_EndCharacterStyle(*args, **kwargs) def BeginParagraphStyle(*args, **kwargs): """BeginParagraphStyle(self, String paragraphStyle) -> bool""" return _richtext.RichTextBuffer_BeginParagraphStyle(*args, **kwargs) def EndParagraphStyle(*args, **kwargs): """EndParagraphStyle(self) -> bool""" return _richtext.RichTextBuffer_EndParagraphStyle(*args, **kwargs) def BeginListStyle(*args, **kwargs): """BeginListStyle(self, String listStyle, int level=1, int number=1) -> bool""" return _richtext.RichTextBuffer_BeginListStyle(*args, **kwargs) def EndListStyle(*args, **kwargs): """EndListStyle(self) -> bool""" return _richtext.RichTextBuffer_EndListStyle(*args, **kwargs) def BeginURL(*args, **kwargs): """BeginURL(self, String url, String characterStyle=wxEmptyString) -> bool""" return _richtext.RichTextBuffer_BeginURL(*args, **kwargs) def EndURL(*args, **kwargs): """EndURL(self) -> bool""" return _richtext.RichTextBuffer_EndURL(*args, **kwargs) def AddEventHandler(*args, **kwargs): """AddEventHandler(self, EvtHandler handler) -> bool""" return _richtext.RichTextBuffer_AddEventHandler(*args, **kwargs) def RemoveEventHandler(*args, **kwargs): """RemoveEventHandler(self, EvtHandler handler, bool deleteHandler=False) -> bool""" return _richtext.RichTextBuffer_RemoveEventHandler(*args, **kwargs) def ClearEventHandlers(*args, **kwargs): """ClearEventHandlers(self)""" return _richtext.RichTextBuffer_ClearEventHandlers(*args, **kwargs) def SendEvent(*args, **kwargs): """SendEvent(self, Event event, bool sendToAll=True) -> bool""" return _richtext.RichTextBuffer_SendEvent(*args, **kwargs) def Copy(*args, **kwargs): """Copy(self, RichTextBuffer obj)""" return _richtext.RichTextBuffer_Copy(*args, **kwargs) def InsertParagraphsWithUndo(*args, **kwargs): """ InsertParagraphsWithUndo(self, long pos, RichTextParagraphLayoutBox paragraphs, RichTextCtrl ctrl, int flags=0) -> bool """ return _richtext.RichTextBuffer_InsertParagraphsWithUndo(*args, **kwargs) def InsertTextWithUndo(*args, **kwargs): """InsertTextWithUndo(self, long pos, String text, RichTextCtrl ctrl, int flags=0) -> bool""" return _richtext.RichTextBuffer_InsertTextWithUndo(*args, **kwargs) def InsertNewlineWithUndo(*args, **kwargs): """InsertNewlineWithUndo(self, long pos, RichTextCtrl ctrl, int flags=0) -> bool""" return _richtext.RichTextBuffer_InsertNewlineWithUndo(*args, **kwargs) def InsertImageWithUndo(*args, **kwargs): """ InsertImageWithUndo(self, long pos, wxRichTextImageBlock imageBlock, RichTextCtrl ctrl, int flags=0) -> bool """ return _richtext.RichTextBuffer_InsertImageWithUndo(*args, **kwargs) def DeleteRangeWithUndo(*args, **kwargs): """DeleteRangeWithUndo(self, RichTextRange range, RichTextCtrl ctrl) -> bool""" return _richtext.RichTextBuffer_DeleteRangeWithUndo(*args, **kwargs) def Modify(*args, **kwargs): """Modify(self, bool modify=True)""" return _richtext.RichTextBuffer_Modify(*args, **kwargs) def IsModified(*args, **kwargs): """IsModified(self) -> bool""" return _richtext.RichTextBuffer_IsModified(*args, **kwargs) def GetStyleForNewParagraph(*args, **kwargs): """ GetStyleForNewParagraph(self, RichTextBuffer buffer, long pos, bool caretPosition=False, bool lookUpNewParaStyle=False) -> RichTextAttr """ return _richtext.RichTextBuffer_GetStyleForNewParagraph(*args, **kwargs) def GetHandlers(*args, **kwargs): """GetHandlers() -> wxRichTextFileHandlerList_t""" return _richtext.RichTextBuffer_GetHandlers(*args, **kwargs) GetHandlers = staticmethod(GetHandlers) def AddHandler(*args, **kwargs): """AddHandler(RichTextFileHandler handler)""" return _richtext.RichTextBuffer_AddHandler(*args, **kwargs) AddHandler = staticmethod(AddHandler) def InsertHandler(*args, **kwargs): """InsertHandler(RichTextFileHandler handler)""" return _richtext.RichTextBuffer_InsertHandler(*args, **kwargs) InsertHandler = staticmethod(InsertHandler) def RemoveHandler(*args, **kwargs): """RemoveHandler(String name) -> bool""" return _richtext.RichTextBuffer_RemoveHandler(*args, **kwargs) RemoveHandler = staticmethod(RemoveHandler) def FindHandlerByName(*args, **kwargs): """FindHandlerByName(String name) -> RichTextFileHandler""" return _richtext.RichTextBuffer_FindHandlerByName(*args, **kwargs) FindHandlerByName = staticmethod(FindHandlerByName) def FindHandlerByExtension(*args, **kwargs): """FindHandlerByExtension(String extension, int imageType) -> RichTextFileHandler""" return _richtext.RichTextBuffer_FindHandlerByExtension(*args, **kwargs) FindHandlerByExtension = staticmethod(FindHandlerByExtension) def FindHandlerByFilename(*args, **kwargs): """FindHandlerByFilename(String filename, int imageType) -> RichTextFileHandler""" return _richtext.RichTextBuffer_FindHandlerByFilename(*args, **kwargs) FindHandlerByFilename = staticmethod(FindHandlerByFilename) def FindHandlerByType(*args, **kwargs): """FindHandlerByType(int imageType) -> RichTextFileHandler""" return _richtext.RichTextBuffer_FindHandlerByType(*args, **kwargs) FindHandlerByType = staticmethod(FindHandlerByType) def GetExtWildcard(*args, **kwargs): """ GetExtWildcard(self, bool combine=False, bool save=False) --> (wildcards, types) Gets a wildcard string for the file dialog based on all the currently loaded richtext file handlers, and a list that can be used to map those filter types to the file handler type. """ return _richtext.RichTextBuffer_GetExtWildcard(*args, **kwargs) GetExtWildcard = staticmethod(GetExtWildcard) def CleanUpHandlers(*args, **kwargs): """CleanUpHandlers()""" return _richtext.RichTextBuffer_CleanUpHandlers(*args, **kwargs) CleanUpHandlers = staticmethod(CleanUpHandlers) def InitStandardHandlers(*args, **kwargs): """InitStandardHandlers()""" return _richtext.RichTextBuffer_InitStandardHandlers(*args, **kwargs) InitStandardHandlers = staticmethod(InitStandardHandlers) def GetRenderer(*args, **kwargs): """GetRenderer() -> RichTextRenderer""" return _richtext.RichTextBuffer_GetRenderer(*args, **kwargs) GetRenderer = staticmethod(GetRenderer) def SetRenderer(*args, **kwargs): """SetRenderer(RichTextRenderer renderer)""" return _richtext.RichTextBuffer_SetRenderer(*args, **kwargs) SetRenderer = staticmethod(SetRenderer) def GetBulletRightMargin(*args, **kwargs): """GetBulletRightMargin() -> int""" return _richtext.RichTextBuffer_GetBulletRightMargin(*args, **kwargs) GetBulletRightMargin = staticmethod(GetBulletRightMargin) def SetBulletRightMargin(*args, **kwargs): """SetBulletRightMargin(int margin)""" return _richtext.RichTextBuffer_SetBulletRightMargin(*args, **kwargs) SetBulletRightMargin = staticmethod(SetBulletRightMargin) def GetBulletProportion(*args, **kwargs): """GetBulletProportion() -> float""" return _richtext.RichTextBuffer_GetBulletProportion(*args, **kwargs) GetBulletProportion = staticmethod(GetBulletProportion) def SetBulletProportion(*args, **kwargs): """SetBulletProportion(float prop)""" return _richtext.RichTextBuffer_SetBulletProportion(*args, **kwargs) SetBulletProportion = staticmethod(SetBulletProportion) def GetScale(*args, **kwargs): """GetScale(self) -> double""" return _richtext.RichTextBuffer_GetScale(*args, **kwargs) def SetScale(*args, **kwargs): """SetScale(self, double scale)""" return _richtext.RichTextBuffer_SetScale(*args, **kwargs) _richtext.RichTextBuffer_swigregister(RichTextBuffer) def RichTextBuffer_GetHandlers(*args): """RichTextBuffer_GetHandlers() -> wxRichTextFileHandlerList_t""" return _richtext.RichTextBuffer_GetHandlers(*args) def RichTextBuffer_AddHandler(*args, **kwargs): """RichTextBuffer_AddHandler(RichTextFileHandler handler)""" return _richtext.RichTextBuffer_AddHandler(*args, **kwargs) def RichTextBuffer_InsertHandler(*args, **kwargs): """RichTextBuffer_InsertHandler(RichTextFileHandler handler)""" return _richtext.RichTextBuffer_InsertHandler(*args, **kwargs) def RichTextBuffer_RemoveHandler(*args, **kwargs): """RichTextBuffer_RemoveHandler(String name) -> bool""" return _richtext.RichTextBuffer_RemoveHandler(*args, **kwargs) def RichTextBuffer_FindHandlerByName(*args, **kwargs): """RichTextBuffer_FindHandlerByName(String name) -> RichTextFileHandler""" return _richtext.RichTextBuffer_FindHandlerByName(*args, **kwargs) def RichTextBuffer_FindHandlerByExtension(*args, **kwargs): """RichTextBuffer_FindHandlerByExtension(String extension, int imageType) -> RichTextFileHandler""" return _richtext.RichTextBuffer_FindHandlerByExtension(*args, **kwargs) def RichTextBuffer_FindHandlerByFilename(*args, **kwargs): """RichTextBuffer_FindHandlerByFilename(String filename, int imageType) -> RichTextFileHandler""" return _richtext.RichTextBuffer_FindHandlerByFilename(*args, **kwargs) def RichTextBuffer_FindHandlerByType(*args, **kwargs): """RichTextBuffer_FindHandlerByType(int imageType) -> RichTextFileHandler""" return _richtext.RichTextBuffer_FindHandlerByType(*args, **kwargs) def RichTextBuffer_GetExtWildcard(*args, **kwargs): """ GetExtWildcard(self, bool combine=False, bool save=False) --> (wildcards, types) Gets a wildcard string for the file dialog based on all the currently loaded richtext file handlers, and a list that can be used to map those filter types to the file handler type. """ return _richtext.RichTextBuffer_GetExtWildcard(*args, **kwargs) def RichTextBuffer_CleanUpHandlers(*args): """RichTextBuffer_CleanUpHandlers()""" return _richtext.RichTextBuffer_CleanUpHandlers(*args) def RichTextBuffer_InitStandardHandlers(*args): """RichTextBuffer_InitStandardHandlers()""" return _richtext.RichTextBuffer_InitStandardHandlers(*args) def RichTextBuffer_GetRenderer(*args): """RichTextBuffer_GetRenderer() -> RichTextRenderer""" return _richtext.RichTextBuffer_GetRenderer(*args) def RichTextBuffer_SetRenderer(*args, **kwargs): """RichTextBuffer_SetRenderer(RichTextRenderer renderer)""" return _richtext.RichTextBuffer_SetRenderer(*args, **kwargs) def RichTextBuffer_GetBulletRightMargin(*args): """RichTextBuffer_GetBulletRightMargin() -> int""" return _richtext.RichTextBuffer_GetBulletRightMargin(*args) def RichTextBuffer_SetBulletRightMargin(*args, **kwargs): """RichTextBuffer_SetBulletRightMargin(int margin)""" return _richtext.RichTextBuffer_SetBulletRightMargin(*args, **kwargs) def RichTextBuffer_GetBulletProportion(*args): """RichTextBuffer_GetBulletProportion() -> float""" return _richtext.RichTextBuffer_GetBulletProportion(*args) def RichTextBuffer_SetBulletProportion(*args, **kwargs): """RichTextBuffer_SetBulletProportion(float prop)""" return _richtext.RichTextBuffer_SetBulletProportion(*args, **kwargs) #--------------------------------------------------------------------------- RICHTEXT_HANDLER_INCLUDE_STYLESHEET = _richtext.RICHTEXT_HANDLER_INCLUDE_STYLESHEET RICHTEXT_HANDLER_SAVE_IMAGES_TO_MEMORY = _richtext.RICHTEXT_HANDLER_SAVE_IMAGES_TO_MEMORY RICHTEXT_HANDLER_SAVE_IMAGES_TO_FILES = _richtext.RICHTEXT_HANDLER_SAVE_IMAGES_TO_FILES RICHTEXT_HANDLER_SAVE_IMAGES_TO_BASE64 = _richtext.RICHTEXT_HANDLER_SAVE_IMAGES_TO_BASE64 RICHTEXT_HANDLER_NO_HEADER_FOOTER = _richtext.RICHTEXT_HANDLER_NO_HEADER_FOOTER RICHTEXT_HANDLER_CONVERT_FACENAMES = _richtext.RICHTEXT_HANDLER_CONVERT_FACENAMES class RichTextFileHandler(_core.Object): """Base class for file handlers""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') def __init__(self): raise AttributeError, "No constructor defined" __repr__ = _swig_repr __swig_destroy__ = _richtext.delete_RichTextFileHandler __del__ = lambda self : None; def LoadStream(*args, **kwargs): """LoadStream(self, RichTextBuffer buffer, InputStream stream) -> bool""" return _richtext.RichTextFileHandler_LoadStream(*args, **kwargs) def SaveStream(*args, **kwargs): """SaveStream(self, RichTextBuffer buffer, wxOutputStream stream) -> bool""" return _richtext.RichTextFileHandler_SaveStream(*args, **kwargs) def LoadFile(*args, **kwargs): """LoadFile(self, RichTextBuffer buffer, String filename) -> bool""" return _richtext.RichTextFileHandler_LoadFile(*args, **kwargs) def SaveFile(*args, **kwargs): """SaveFile(self, RichTextBuffer buffer, String filename) -> bool""" return _richtext.RichTextFileHandler_SaveFile(*args, **kwargs) def CanHandle(*args, **kwargs): """CanHandle(self, String filename) -> bool""" return _richtext.RichTextFileHandler_CanHandle(*args, **kwargs) def CanSave(*args, **kwargs): """CanSave(self) -> bool""" return _richtext.RichTextFileHandler_CanSave(*args, **kwargs) def CanLoad(*args, **kwargs): """CanLoad(self) -> bool""" return _richtext.RichTextFileHandler_CanLoad(*args, **kwargs) def IsVisible(*args, **kwargs): """IsVisible(self) -> bool""" return _richtext.RichTextFileHandler_IsVisible(*args, **kwargs) def SetVisible(*args, **kwargs): """SetVisible(self, bool visible)""" return _richtext.RichTextFileHandler_SetVisible(*args, **kwargs) def SetName(*args, **kwargs): """SetName(self, String name)""" return _richtext.RichTextFileHandler_SetName(*args, **kwargs) def GetName(*args, **kwargs): """GetName(self) -> String""" return _richtext.RichTextFileHandler_GetName(*args, **kwargs) Name = property(GetName,SetName) def SetExtension(*args, **kwargs): """SetExtension(self, String ext)""" return _richtext.RichTextFileHandler_SetExtension(*args, **kwargs) def GetExtension(*args, **kwargs): """GetExtension(self) -> String""" return _richtext.RichTextFileHandler_GetExtension(*args, **kwargs) Extension = property(GetExtension,SetExtension) def SetType(*args, **kwargs): """SetType(self, int type)""" return _richtext.RichTextFileHandler_SetType(*args, **kwargs) def GetType(*args, **kwargs): """GetType(self) -> int""" return _richtext.RichTextFileHandler_GetType(*args, **kwargs) Type = property(GetType,SetType) def SetFlags(*args, **kwargs): """SetFlags(self, int flags)""" return _richtext.RichTextFileHandler_SetFlags(*args, **kwargs) def GetFlags(*args, **kwargs): """GetFlags(self) -> int""" return _richtext.RichTextFileHandler_GetFlags(*args, **kwargs) Flags = property(GetFlags,SetFlags) def SetEncoding(*args, **kwargs): """SetEncoding(self, String encoding)""" return _richtext.RichTextFileHandler_SetEncoding(*args, **kwargs) def GetEncoding(*args, **kwargs): """GetEncoding(self) -> String""" return _richtext.RichTextFileHandler_GetEncoding(*args, **kwargs) Encoding = property(GetEncoding,SetEncoding) _richtext.RichTextFileHandler_swigregister(RichTextFileHandler) class RichTextPlainTextHandler(RichTextFileHandler): """Proxy of C++ RichTextPlainTextHandler class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """__init__(self, String name=TextName, String ext=TextExt, int type=RICHTEXT_TYPE_TEXT) -> RichTextPlainTextHandler""" _richtext.RichTextPlainTextHandler_swiginit(self,_richtext.new_RichTextPlainTextHandler(*args, **kwargs)) _richtext.RichTextPlainTextHandler_swigregister(RichTextPlainTextHandler) TextName = cvar.TextName TextExt = cvar.TextExt #--------------------------------------------------------------------------- class RichTextRenderer(_core.Object): """Proxy of C++ RichTextRenderer class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') def __init__(self): raise AttributeError, "No constructor defined" __repr__ = _swig_repr __swig_destroy__ = _richtext.delete_RichTextRenderer __del__ = lambda self : None; def DrawStandardBullet(*args, **kwargs): """ DrawStandardBullet(self, RichTextParagraph paragraph, DC dc, RichTextAttr attr, Rect rect) -> bool """ return _richtext.RichTextRenderer_DrawStandardBullet(*args, **kwargs) def DrawTextBullet(*args, **kwargs): """ DrawTextBullet(self, RichTextParagraph paragraph, DC dc, RichTextAttr attr, Rect rect, String text) -> bool """ return _richtext.RichTextRenderer_DrawTextBullet(*args, **kwargs) def DrawBitmapBullet(*args, **kwargs): """ DrawBitmapBullet(self, RichTextParagraph paragraph, DC dc, RichTextAttr attr, Rect rect) -> bool """ return _richtext.RichTextRenderer_DrawBitmapBullet(*args, **kwargs) def EnumerateStandardBulletNames(*args, **kwargs): """EnumerateStandardBulletNames(self, wxArrayString bulletNames) -> bool""" return _richtext.RichTextRenderer_EnumerateStandardBulletNames(*args, **kwargs) _richtext.RichTextRenderer_swigregister(RichTextRenderer) class RichTextStdRenderer(RichTextRenderer): """Proxy of C++ RichTextStdRenderer class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """__init__(self) -> RichTextStdRenderer""" _richtext.RichTextStdRenderer_swiginit(self,_richtext.new_RichTextStdRenderer(*args, **kwargs)) _richtext.RichTextStdRenderer_swigregister(RichTextStdRenderer) #--------------------------------------------------------------------------- RE_READONLY = _richtext.RE_READONLY RE_MULTILINE = _richtext.RE_MULTILINE RE_CENTER_CARET = _richtext.RE_CENTER_CARET RE_CENTRE_CARET = _richtext.RE_CENTRE_CARET RICHTEXT_SHIFT_DOWN = _richtext.RICHTEXT_SHIFT_DOWN RICHTEXT_CTRL_DOWN = _richtext.RICHTEXT_CTRL_DOWN RICHTEXT_ALT_DOWN = _richtext.RICHTEXT_ALT_DOWN class RichTextCtrl(_core.Control,_core.TextCtrlIface,_windows.ScrollHelper): """Proxy of C++ RichTextCtrl class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """ __init__(self, Window parent, int id=-1, String value=EmptyString, Point pos=DefaultPosition, Size size=DefaultSize, long style=RE_MULTILINE, Validator validator=DefaultValidator, String name=RichTextCtrlNameStr) -> RichTextCtrl """ _richtext.RichTextCtrl_swiginit(self,_richtext.new_RichTextCtrl(*args, **kwargs)) self._setOORInfo(self) def Create(*args, **kwargs): """ Create(self, Window parent, int id=-1, String value=EmptyString, Point pos=DefaultPosition, Size size=DefaultSize, long style=RE_MULTILINE, Validator validator=DefaultValidator, String name=RichTextCtrlNameStr) -> bool """ return _richtext.RichTextCtrl_Create(*args, **kwargs) def GetValue(*args, **kwargs): """GetValue(self) -> String""" return _richtext.RichTextCtrl_GetValue(*args, **kwargs) def IsSingleLine(*args, **kwargs): """IsSingleLine(self) -> bool""" return _richtext.RichTextCtrl_IsSingleLine(*args, **kwargs) def IsMultiLine(*args, **kwargs): """IsMultiLine(self) -> bool""" return _richtext.RichTextCtrl_IsMultiLine(*args, **kwargs) def GetFilename(*args, **kwargs): """GetFilename(self) -> String""" return _richtext.RichTextCtrl_GetFilename(*args, **kwargs) def SetFilename(*args, **kwargs): """SetFilename(self, String filename)""" return _richtext.RichTextCtrl_SetFilename(*args, **kwargs) def SetDelayedLayoutThreshold(*args, **kwargs): """ SetDelayedLayoutThreshold(self, long threshold) Set the threshold in character positions for doing layout optimization during sizing. """ return _richtext.RichTextCtrl_SetDelayedLayoutThreshold(*args, **kwargs) def GetDelayedLayoutThreshold(*args, **kwargs): """ GetDelayedLayoutThreshold(self) -> long Get the threshold in character positions for doing layout optimization during sizing. """ return _richtext.RichTextCtrl_GetDelayedLayoutThreshold(*args, **kwargs) def GetFullLayoutRequired(*args, **kwargs): """GetFullLayoutRequired(self) -> bool""" return _richtext.RichTextCtrl_GetFullLayoutRequired(*args, **kwargs) def SetFullLayoutRequired(*args, **kwargs): """SetFullLayoutRequired(self, bool b)""" return _richtext.RichTextCtrl_SetFullLayoutRequired(*args, **kwargs) def GetFullLayoutTime(*args, **kwargs): """GetFullLayoutTime(self) -> wxLongLong""" return _richtext.RichTextCtrl_GetFullLayoutTime(*args, **kwargs) def SetFullLayoutTime(*args, **kwargs): """SetFullLayoutTime(self, wxLongLong t)""" return _richtext.RichTextCtrl_SetFullLayoutTime(*args, **kwargs) def GetFullLayoutSavedPosition(*args, **kwargs): """GetFullLayoutSavedPosition(self) -> long""" return _richtext.RichTextCtrl_GetFullLayoutSavedPosition(*args, **kwargs) def SetFullLayoutSavedPosition(*args, **kwargs): """SetFullLayoutSavedPosition(self, long p)""" return _richtext.RichTextCtrl_SetFullLayoutSavedPosition(*args, **kwargs) def ForceDelayedLayout(*args, **kwargs): """ForceDelayedLayout(self)""" return _richtext.RichTextCtrl_ForceDelayedLayout(*args, **kwargs) def SetTextCursor(*args, **kwargs): """ SetTextCursor(self, Cursor cursor) Set text cursor """ return _richtext.RichTextCtrl_SetTextCursor(*args, **kwargs) def GetTextCursor(*args, **kwargs): """ GetTextCursor(self) -> Cursor Get text cursor """ return _richtext.RichTextCtrl_GetTextCursor(*args, **kwargs) def SetURLCursor(*args, **kwargs): """ SetURLCursor(self, Cursor cursor) Set URL cursor """ return _richtext.RichTextCtrl_SetURLCursor(*args, **kwargs) def GetURLCursor(*args, **kwargs): """ GetURLCursor(self) -> Cursor Get URL cursor """ return _richtext.RichTextCtrl_GetURLCursor(*args, **kwargs) def GetCaretAtLineStart(*args, **kwargs): """GetCaretAtLineStart(self) -> bool""" return _richtext.RichTextCtrl_GetCaretAtLineStart(*args, **kwargs) def SetCaretAtLineStart(*args, **kwargs): """SetCaretAtLineStart(self, bool atStart)""" return _richtext.RichTextCtrl_SetCaretAtLineStart(*args, **kwargs) def GetDragging(*args, **kwargs): """GetDragging(self) -> bool""" return _richtext.RichTextCtrl_GetDragging(*args, **kwargs) def SetDragging(*args, **kwargs): """SetDragging(self, bool dragging)""" return _richtext.RichTextCtrl_SetDragging(*args, **kwargs) def GetPreDrag(*args, **kwargs): """GetPreDrag(self) -> bool""" return _richtext.RichTextCtrl_GetPreDrag(*args, **kwargs) def SetPreDrag(*args, **kwargs): """SetPreDrag(self, bool pd)""" return _richtext.RichTextCtrl_SetPreDrag(*args, **kwargs) def GetDragStartPoint(*args, **kwargs): """GetDragStartPoint(self) -> Point""" return _richtext.RichTextCtrl_GetDragStartPoint(*args, **kwargs) def SetDragStartPoint(*args, **kwargs): """SetDragStartPoint(self, Point sp)""" return _richtext.RichTextCtrl_SetDragStartPoint(*args, **kwargs) def GetDragStartTime(*args, **kwargs): """GetDragStartTime(self) -> DateTime""" return _richtext.RichTextCtrl_GetDragStartTime(*args, **kwargs) def SetDragStartTime(*args, **kwargs): """SetDragStartTime(self, DateTime st)""" return _richtext.RichTextCtrl_SetDragStartTime(*args, **kwargs) def GetBufferBitmap(*args, **kwargs): """GetBufferBitmap(self) -> Bitmap""" return _richtext.RichTextCtrl_GetBufferBitmap(*args, **kwargs) def GetContextMenu(*args, **kwargs): """GetContextMenu(self) -> Menu""" return _richtext.RichTextCtrl_GetContextMenu(*args, **kwargs) def SetContextMenu(*args, **kwargs): """SetContextMenu(self, Menu menu)""" return _richtext.RichTextCtrl_SetContextMenu(*args, **kwargs) def GetSelectionAnchor(*args, **kwargs): """GetSelectionAnchor(self) -> long""" return _richtext.RichTextCtrl_GetSelectionAnchor(*args, **kwargs) def SetSelectionAnchor(*args, **kwargs): """SetSelectionAnchor(self, long anchor)""" return _richtext.RichTextCtrl_SetSelectionAnchor(*args, **kwargs) def LoadFile(*args, **kwargs): """ LoadFile(self, String file, int type=RICHTEXT_TYPE_ANY) -> bool Load the contents of the document from the given filename. """ return _richtext.RichTextCtrl_LoadFile(*args, **kwargs) def SaveFile(*args, **kwargs): """ SaveFile(self, String file=EmptyString, int type=RICHTEXT_TYPE_ANY) -> bool Save the contents of the document to the given filename, or if the empty string is passed then to the filename set with `SetFilename`. """ return _richtext.RichTextCtrl_SaveFile(*args, **kwargs) def SetHandlerFlags(*args, **kwargs): """ SetHandlerFlags(self, int flags) Set the handler flags, controlling loading and saving. """ return _richtext.RichTextCtrl_SetHandlerFlags(*args, **kwargs) def GetHandlerFlags(*args, **kwargs): """ GetHandlerFlags(self) -> int Get the handler flags, controlling loading and saving. """ return _richtext.RichTextCtrl_GetHandlerFlags(*args, **kwargs) def SetMaxLength(*args, **kwargs): """ SetMaxLength(self, unsigned long len) Set the max number of characters which may be entered in a single line text control. """ return _richtext.RichTextCtrl_SetMaxLength(*args, **kwargs) def SetStyle(*args, **kwargs): """ SetStyle(self, RichTextRange range, RichTextAttr style) -> bool Set the style for the text in ``range`` to ``style`` """ return _richtext.RichTextCtrl_SetStyle(*args, **kwargs) def GetStyle(*args, **kwargs): """ GetStyle(self, long position, RichTextAttr style) -> bool Retrieve the style used at the given position. Copies the style values at ``position`` into the ``style`` parameter and returns ``True`` if successful. Returns ``False`` otherwise. """ return _richtext.RichTextCtrl_GetStyle(*args, **kwargs) def GetStyleForRange(*args, **kwargs): """ GetStyleForRange(self, RichTextRange range, RichTextAttr style) -> bool Get the common set of styles for the range """ return _richtext.RichTextCtrl_GetStyleForRange(*args, **kwargs) def SetStyleEx(*args, **kwargs): """ SetStyleEx(self, RichTextRange range, RichTextAttr style, int flags=RICHTEXT_SETSTYLE_WITH_UNDO) -> bool Extended style setting operation with flags including: RICHTEXT_SETSTYLE_WITH_UNDO, RICHTEXT_SETSTYLE_OPTIMIZE, RICHTEXT_SETSTYLE_PARAGRAPHS_ONLY, RICHTEXT_SETSTYLE_CHARACTERS_ONLY """ return _richtext.RichTextCtrl_SetStyleEx(*args, **kwargs) def GetUncombinedStyle(*args, **kwargs): """ GetUncombinedStyle(self, long position, RichTextAttr style) -> bool Get the content (uncombined) attributes for this position. Copies the style values at ``position`` into the ``style`` parameter and returns ``True`` if successful. Returns ``False`` otherwise. """ return _richtext.RichTextCtrl_GetUncombinedStyle(*args, **kwargs) def SetDefaultStyle(*args, **kwargs): """ SetDefaultStyle(self, RichTextAttr style) -> bool Set the style used by default for the rich text document. """ return _richtext.RichTextCtrl_SetDefaultStyle(*args, **kwargs) def GetDefaultStyle(*args, **kwargs): """ GetDefaultStyle(self) -> RichTextAttr Retrieves a copy of the default style object. """ return _richtext.RichTextCtrl_GetDefaultStyle(*args, **kwargs) def SetListStyle(*args, **kwargs): """ SetListStyle(self, RichTextRange range, String defName, int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int startFrom=1, int specifiedLevel=-1) -> bool """ return _richtext.RichTextCtrl_SetListStyle(*args, **kwargs) def ClearListStyle(*args, **kwargs): """ClearListStyle(self, RichTextRange range, int flags=RICHTEXT_SETSTYLE_WITH_UNDO) -> bool""" return _richtext.RichTextCtrl_ClearListStyle(*args, **kwargs) def NumberList(*args, **kwargs): """ NumberList(self, RichTextRange range, String defName, int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int startFrom=1, int specifiedLevel=-1) -> bool """ return _richtext.RichTextCtrl_NumberList(*args, **kwargs) def PromoteList(*args, **kwargs): """ PromoteList(self, int promoteBy, RichTextRange range, String defName, int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int specifiedLevel=-1) -> bool """ return _richtext.RichTextCtrl_PromoteList(*args, **kwargs) def Delete(*args, **kwargs): """Delete(self, RichTextRange range) -> bool""" return _richtext.RichTextCtrl_Delete(*args, **kwargs) def HitTestXY(*args, **kwargs): """ HitTestRC(self, Point pt) --> (result, col, row) Returns the column and row of the given point in pixels. Note that ``pt`` should be given in device coordinates, and not be adjusted for the client area origin nor for scrolling. The return value is a tuple of the hit test result and the column and row values. """ return _richtext.RichTextCtrl_HitTestXY(*args, **kwargs) def FindContainerAtPoint(*args, **kwargs): """ FindContainerAtPoint(self, Point pt, long position, int hit, RichTextObject hitObj, int flags=0) -> RichTextParagraphLayoutBox """ return _richtext.RichTextCtrl_FindContainerAtPoint(*args, **kwargs) def DeleteSelection(*args, **kwargs): """ DeleteSelection(self) Remove the current selection. """ return _richtext.RichTextCtrl_DeleteSelection(*args, **kwargs) def CanDeleteSelection(*args, **kwargs): """ CanDeleteSelection(self) -> bool Returns ``True`` if the selection can be removed from the document. """ return _richtext.RichTextCtrl_CanDeleteSelection(*args, **kwargs) def HasSelection(*args, **kwargs): """HasSelection(self) -> bool""" return _richtext.RichTextCtrl_HasSelection(*args, **kwargs) def WriteImage(*args, **kwargs): """ WriteImage(self, Image image, int bitmapType=BITMAP_TYPE_PNG) -> bool Write an image at the current insertion point. Supply optional type to use for internal and file storage of the raw data. """ return _richtext.RichTextCtrl_WriteImage(*args, **kwargs) def WriteBitmap(*args, **kwargs): """ WriteBitmap(self, Bitmap bitmap, int bitmapType=BITMAP_TYPE_PNG) -> bool Write a bitmap at the current insertion point. Supply optional type to use for internal and file storage of the raw data. """ return _richtext.RichTextCtrl_WriteBitmap(*args, **kwargs) def WriteImageFile(*args, **kwargs): """ WriteImageFile(self, String filename, int bitmapType) -> bool Load an image from file and write at the current insertion point. """ return _richtext.RichTextCtrl_WriteImageFile(*args, **kwargs) def WriteImageBlock(*args, **kwargs): """ WriteImageBlock(self, wxRichTextImageBlock imageBlock) -> bool Write an image block at the current insertion point. """ return _richtext.RichTextCtrl_WriteImageBlock(*args, **kwargs) def Newline(*args, **kwargs): """ Newline(self) -> bool Insert a newline (actually paragraph) at the current insertion point. """ return _richtext.RichTextCtrl_Newline(*args, **kwargs) def LineBreak(*args, **kwargs): """ LineBreak(self) -> bool Insert a line break at the current insertion point. """ return _richtext.RichTextCtrl_LineBreak(*args, **kwargs) def SetBasicStyle(*args, **kwargs): """SetBasicStyle(self, RichTextAttr style)""" return _richtext.RichTextCtrl_SetBasicStyle(*args, **kwargs) def GetBasicStyle(*args, **kwargs): """ GetBasicStyle(self) -> RichTextAttr Get basic (overall) style """ return _richtext.RichTextCtrl_GetBasicStyle(*args, **kwargs) def BeginStyle(*args, **kwargs): """ BeginStyle(self, RichTextAttr style) -> bool Begin using a style """ return _richtext.RichTextCtrl_BeginStyle(*args, **kwargs) def EndStyle(*args, **kwargs): """ EndStyle(self) -> bool End the style """ return _richtext.RichTextCtrl_EndStyle(*args, **kwargs) def EndAllStyles(*args, **kwargs): """ EndAllStyles(self) -> bool End all styles """ return _richtext.RichTextCtrl_EndAllStyles(*args, **kwargs) def BeginBold(*args, **kwargs): """ BeginBold(self) -> bool Begin using bold """ return _richtext.RichTextCtrl_BeginBold(*args, **kwargs) def EndBold(*args, **kwargs): """ EndBold(self) -> bool End using bold """ return _richtext.RichTextCtrl_EndBold(*args, **kwargs) def BeginItalic(*args, **kwargs): """ BeginItalic(self) -> bool Begin using italic """ return _richtext.RichTextCtrl_BeginItalic(*args, **kwargs) def EndItalic(*args, **kwargs): """ EndItalic(self) -> bool End using italic """ return _richtext.RichTextCtrl_EndItalic(*args, **kwargs) def BeginUnderline(*args, **kwargs): """ BeginUnderline(self) -> bool Begin using underline """ return _richtext.RichTextCtrl_BeginUnderline(*args, **kwargs) def EndUnderline(*args, **kwargs): """ EndUnderline(self) -> bool End using underline """ return _richtext.RichTextCtrl_EndUnderline(*args, **kwargs) def BeginFontSize(*args, **kwargs): """ BeginFontSize(self, int pointSize) -> bool Begin using point size """ return _richtext.RichTextCtrl_BeginFontSize(*args, **kwargs) def EndFontSize(*args, **kwargs): """ EndFontSize(self) -> bool End using point size """ return _richtext.RichTextCtrl_EndFontSize(*args, **kwargs) def BeginFont(*args, **kwargs): """ BeginFont(self, Font font) -> bool Begin using this font """ return _richtext.RichTextCtrl_BeginFont(*args, **kwargs) def EndFont(*args, **kwargs): """ EndFont(self) -> bool End using a font """ return _richtext.RichTextCtrl_EndFont(*args, **kwargs) def BeginTextColour(*args, **kwargs): """ BeginTextColour(self, Colour colour) -> bool Begin using this colour """ return _richtext.RichTextCtrl_BeginTextColour(*args, **kwargs) def EndTextColour(*args, **kwargs): """ EndTextColour(self) -> bool End using a colour """ return _richtext.RichTextCtrl_EndTextColour(*args, **kwargs) def BeginAlignment(*args, **kwargs): """ BeginAlignment(self, int alignment) -> bool Begin using alignment """ return _richtext.RichTextCtrl_BeginAlignment(*args, **kwargs) def EndAlignment(*args, **kwargs): """ EndAlignment(self) -> bool End alignment """ return _richtext.RichTextCtrl_EndAlignment(*args, **kwargs) def BeginLeftIndent(*args, **kwargs): """ BeginLeftIndent(self, int leftIndent, int leftSubIndent=0) -> bool Begin left indent """ return _richtext.RichTextCtrl_BeginLeftIndent(*args, **kwargs) def EndLeftIndent(*args, **kwargs): """ EndLeftIndent(self) -> bool End left indent """ return _richtext.RichTextCtrl_EndLeftIndent(*args, **kwargs) def BeginRightIndent(*args, **kwargs): """ BeginRightIndent(self, int rightIndent) -> bool Begin right indent """ return _richtext.RichTextCtrl_BeginRightIndent(*args, **kwargs) def EndRightIndent(*args, **kwargs): """ EndRightIndent(self) -> bool End right indent """ return _richtext.RichTextCtrl_EndRightIndent(*args, **kwargs) def BeginParagraphSpacing(*args, **kwargs): """ BeginParagraphSpacing(self, int before, int after) -> bool Begin paragraph spacing """ return _richtext.RichTextCtrl_BeginParagraphSpacing(*args, **kwargs) def EndParagraphSpacing(*args, **kwargs): """ EndParagraphSpacing(self) -> bool End paragraph spacing """ return _richtext.RichTextCtrl_EndParagraphSpacing(*args, **kwargs) def BeginLineSpacing(*args, **kwargs): """ BeginLineSpacing(self, int lineSpacing) -> bool Begin line spacing """ return _richtext.RichTextCtrl_BeginLineSpacing(*args, **kwargs) def EndLineSpacing(*args, **kwargs): """ EndLineSpacing(self) -> bool End line spacing """ return _richtext.RichTextCtrl_EndLineSpacing(*args, **kwargs) def BeginNumberedBullet(*args, **kwargs): """ BeginNumberedBullet(self, int bulletNumber, int leftIndent, int leftSubIndent, int bulletStyle=wxTEXT_ATTR_BULLET_STYLE_ARABIC|wxTEXT_ATTR_BULLET_STYLE_PERIOD) -> bool Begin numbered bullet """ return _richtext.RichTextCtrl_BeginNumberedBullet(*args, **kwargs) def EndNumberedBullet(*args, **kwargs): """ EndNumberedBullet(self) -> bool End numbered bullet """ return _richtext.RichTextCtrl_EndNumberedBullet(*args, **kwargs) def BeginSymbolBullet(*args, **kwargs): """ BeginSymbolBullet(self, String symbol, int leftIndent, int leftSubIndent, int bulletStyle=TEXT_ATTR_BULLET_STYLE_SYMBOL) -> bool Begin symbol bullet """ return _richtext.RichTextCtrl_BeginSymbolBullet(*args, **kwargs) def EndSymbolBullet(*args, **kwargs): """ EndSymbolBullet(self) -> bool End symbol bullet """ return _richtext.RichTextCtrl_EndSymbolBullet(*args, **kwargs) def BeginStandardBullet(*args, **kwargs): """ BeginStandardBullet(self, String bulletName, int leftIndent, int leftSubIndent, int bulletStyle=TEXT_ATTR_BULLET_STYLE_STANDARD) -> bool Begin standard bullet """ return _richtext.RichTextCtrl_BeginStandardBullet(*args, **kwargs) def EndStandardBullet(*args, **kwargs): """ EndStandardBullet(self) -> bool End standard bullet """ return _richtext.RichTextCtrl_EndStandardBullet(*args, **kwargs) def BeginCharacterStyle(*args, **kwargs): """ BeginCharacterStyle(self, String characterStyle) -> bool Begin named character style """ return _richtext.RichTextCtrl_BeginCharacterStyle(*args, **kwargs) def EndCharacterStyle(*args, **kwargs): """ EndCharacterStyle(self) -> bool End named character style """ return _richtext.RichTextCtrl_EndCharacterStyle(*args, **kwargs) def BeginParagraphStyle(*args, **kwargs): """ BeginParagraphStyle(self, String paragraphStyle) -> bool Begin named paragraph style """ return _richtext.RichTextCtrl_BeginParagraphStyle(*args, **kwargs) def EndParagraphStyle(*args, **kwargs): """ EndParagraphStyle(self) -> bool End named character style """ return _richtext.RichTextCtrl_EndParagraphStyle(*args, **kwargs) def BeginListStyle(*args, **kwargs): """ BeginListStyle(self, String listStyle, int level=1, int number=1) -> bool Begin named list style. """ return _richtext.RichTextCtrl_BeginListStyle(*args, **kwargs) def EndListStyle(*args, **kwargs): """ EndListStyle(self) -> bool End named list style. """ return _richtext.RichTextCtrl_EndListStyle(*args, **kwargs) def BeginURL(*args, **kwargs): """ BeginURL(self, String url, String characterStyle=wxEmptyString) -> bool Begin URL. """ return _richtext.RichTextCtrl_BeginURL(*args, **kwargs) def EndURL(*args, **kwargs): """ EndURL(self) -> bool End URL. """ return _richtext.RichTextCtrl_EndURL(*args, **kwargs) def SetDefaultStyleToCursorStyle(*args, **kwargs): """ SetDefaultStyleToCursorStyle(self) -> bool Sets the default style to the style under the cursor """ return _richtext.RichTextCtrl_SetDefaultStyleToCursorStyle(*args, **kwargs) def SelectNone(*args, **kwargs): """ SelectNone(self) Clear the selection """ return _richtext.RichTextCtrl_SelectNone(*args, **kwargs) def SelectWord(*args, **kwargs): """ SelectWord(self, long position) -> bool Select the word at the given character position """ return _richtext.RichTextCtrl_SelectWord(*args, **kwargs) def GetSelectionRange(*args, **kwargs): """ GetSelectionRange(self) -> RichTextRange Get the selection range in character positions. """ return _richtext.RichTextCtrl_GetSelectionRange(*args, **kwargs) def SetSelectionRange(*args, **kwargs): """ SetSelectionRange(self, RichTextRange range) Set the selection range in character positions. The end point of range is specified as the last character position of the span of text, plus one. So, for example, to set the selection for a character at position 5, use the range (5,6). """ return _richtext.RichTextCtrl_SetSelectionRange(*args, **kwargs) def GetInternalSelectionRange(*args, **kwargs): """ GetInternalSelectionRange(self) -> RichTextRange Get the selection range in character positions. The range is in internal format, i.e. a single character selection is denoted by (n,n). """ return _richtext.RichTextCtrl_GetInternalSelectionRange(*args, **kwargs) def SetInternalSelectionRange(*args, **kwargs): """ SetInternalSelectionRange(self, RichTextRange range) Set the selection range in character positions. The range is in internal format, i.e. a single character selection is denoted by (n,n). """ return _richtext.RichTextCtrl_SetInternalSelectionRange(*args, **kwargs) def AddParagraph(*args, **kwargs): """ AddParagraph(self, String text) -> RichTextRange Add a new paragraph of text to the end of the buffer """ return _richtext.RichTextCtrl_AddParagraph(*args, **kwargs) def AddImage(*args, **kwargs): """ AddImage(self, Image image) -> RichTextRange Add an image """ return _richtext.RichTextCtrl_AddImage(*args, **kwargs) def LayoutContent(*args, **kwargs): """ LayoutContent(self, bool onlyVisibleRect=False) -> bool Layout the buffer: which we must do before certain operations, such as setting the caret position. """ return _richtext.RichTextCtrl_LayoutContent(*args, **kwargs) def MoveCaret(*args, **kwargs): """ MoveCaret(self, long pos, bool showAtLineStart=False) -> bool Move the caret to the given character position """ return _richtext.RichTextCtrl_MoveCaret(*args, **kwargs) def MoveRight(*args, **kwargs): """ MoveRight(self, int noPositions=1, int flags=0) -> bool Move right """ return _richtext.RichTextCtrl_MoveRight(*args, **kwargs) def MoveLeft(*args, **kwargs): """ MoveLeft(self, int noPositions=1, int flags=0) -> bool Move left """ return _richtext.RichTextCtrl_MoveLeft(*args, **kwargs) def MoveUp(*args, **kwargs): """ MoveUp(self, int noLines=1, int flags=0) -> bool Move up """ return _richtext.RichTextCtrl_MoveUp(*args, **kwargs) def MoveDown(*args, **kwargs): """ MoveDown(self, int noLines=1, int flags=0) -> bool Move down """ return _richtext.RichTextCtrl_MoveDown(*args, **kwargs) def MoveToLineEnd(*args, **kwargs): """ MoveToLineEnd(self, int flags=0) -> bool Move to the end of the line """ return _richtext.RichTextCtrl_MoveToLineEnd(*args, **kwargs) def MoveToLineStart(*args, **kwargs): """ MoveToLineStart(self, int flags=0) -> bool Move to the start of the line """ return _richtext.RichTextCtrl_MoveToLineStart(*args, **kwargs) def MoveToParagraphEnd(*args, **kwargs): """ MoveToParagraphEnd(self, int flags=0) -> bool Move to the end of the paragraph """ return _richtext.RichTextCtrl_MoveToParagraphEnd(*args, **kwargs) def MoveToParagraphStart(*args, **kwargs): """ MoveToParagraphStart(self, int flags=0) -> bool Move to the start of the paragraph """ return _richtext.RichTextCtrl_MoveToParagraphStart(*args, **kwargs) def MoveHome(*args, **kwargs): """ MoveHome(self, int flags=0) -> bool Move to the start of the buffer """ return _richtext.RichTextCtrl_MoveHome(*args, **kwargs) def MoveEnd(*args, **kwargs): """ MoveEnd(self, int flags=0) -> bool Move to the end of the buffer """ return _richtext.RichTextCtrl_MoveEnd(*args, **kwargs) def PageUp(*args, **kwargs): """ PageUp(self, int noPages=1, int flags=0) -> bool Move n pages up """ return _richtext.RichTextCtrl_PageUp(*args, **kwargs) def PageDown(*args, **kwargs): """ PageDown(self, int noPages=1, int flags=0) -> bool Move n pages down """ return _richtext.RichTextCtrl_PageDown(*args, **kwargs) def WordLeft(*args, **kwargs): """ WordLeft(self, int noPages=1, int flags=0) -> bool Move n words left """ return _richtext.RichTextCtrl_WordLeft(*args, **kwargs) def WordRight(*args, **kwargs): """ WordRight(self, int noPages=1, int flags=0) -> bool Move n words right """ return _richtext.RichTextCtrl_WordRight(*args, **kwargs) def GetBuffer(*args, **kwargs): """ GetBuffer(self) -> RichTextBuffer Returns the buffer associated with the control. """ return _richtext.RichTextCtrl_GetBuffer(*args, **kwargs) def BeginBatchUndo(*args, **kwargs): """ BeginBatchUndo(self, String cmdName) -> bool Start batching undo history for commands """ return _richtext.RichTextCtrl_BeginBatchUndo(*args, **kwargs) def EndBatchUndo(*args, **kwargs): """ EndBatchUndo(self) -> bool End batching undo history for commands. """ return _richtext.RichTextCtrl_EndBatchUndo(*args, **kwargs) def BatchingUndo(*args, **kwargs): """ BatchingUndo(self) -> bool Are we batching undo history for commands? """ return _richtext.RichTextCtrl_BatchingUndo(*args, **kwargs) def BeginSuppressUndo(*args, **kwargs): """ BeginSuppressUndo(self) -> bool Start suppressing undo history for commands. """ return _richtext.RichTextCtrl_BeginSuppressUndo(*args, **kwargs) def EndSuppressUndo(*args, **kwargs): """ EndSuppressUndo(self) -> bool End suppressing undo history for commands. """ return _richtext.RichTextCtrl_EndSuppressUndo(*args, **kwargs) def SuppressingUndo(*args, **kwargs): """ SuppressingUndo(self) -> bool Are we suppressing undo history for commands? """ return _richtext.RichTextCtrl_SuppressingUndo(*args, **kwargs) def HasCharacterAttributes(*args, **kwargs): """ HasCharacterAttributes(self, RichTextRange range, RichTextAttr style) -> bool Test if this whole range has character attributes of the specified kind. If any of the attributes are different within the range, the test fails. You can use this to implement, for example, bold button updating. ``style`` must have flags indicating which attributes are of interest. """ return _richtext.RichTextCtrl_HasCharacterAttributes(*args, **kwargs) def HasParagraphAttributes(*args, **kwargs): """ HasParagraphAttributes(self, RichTextRange range, RichTextAttr style) -> bool Test if this whole range has paragraph attributes of the specified kind. If any of the attributes are different within the range, the test fails. You can use this to implement, for example, centering button updating. style must have flags indicating which attributes are of interest. """ return _richtext.RichTextCtrl_HasParagraphAttributes(*args, **kwargs) def IsSelectionBold(*args, **kwargs): """ IsSelectionBold(self) -> bool Is all of the selection bold? """ return _richtext.RichTextCtrl_IsSelectionBold(*args, **kwargs) def IsSelectionItalics(*args, **kwargs): """ IsSelectionItalics(self) -> bool Is all of the selection italics? """ return _richtext.RichTextCtrl_IsSelectionItalics(*args, **kwargs) def IsSelectionUnderlined(*args, **kwargs): """ IsSelectionUnderlined(self) -> bool Is all of the selection underlined? """ return _richtext.RichTextCtrl_IsSelectionUnderlined(*args, **kwargs) def DoesSelectionHaveTextEffectFlag(*args, **kwargs): """DoesSelectionHaveTextEffectFlag(self, int flag) -> bool""" return _richtext.RichTextCtrl_DoesSelectionHaveTextEffectFlag(*args, **kwargs) def IsSelectionAligned(*args, **kwargs): """ IsSelectionAligned(self, int alignment) -> bool Is all of the selection aligned according to the specified flag? """ return _richtext.RichTextCtrl_IsSelectionAligned(*args, **kwargs) def ApplyBoldToSelection(*args, **kwargs): """ ApplyBoldToSelection(self) -> bool Apply bold to the selection """ return _richtext.RichTextCtrl_ApplyBoldToSelection(*args, **kwargs) def ApplyItalicToSelection(*args, **kwargs): """ ApplyItalicToSelection(self) -> bool Apply italic to the selection """ return _richtext.RichTextCtrl_ApplyItalicToSelection(*args, **kwargs) def ApplyUnderlineToSelection(*args, **kwargs): """ ApplyUnderlineToSelection(self) -> bool Apply underline to the selection """ return _richtext.RichTextCtrl_ApplyUnderlineToSelection(*args, **kwargs) def ApplyTextEffectToSelection(*args, **kwargs): """ApplyTextEffectToSelection(self, int flags) -> bool""" return _richtext.RichTextCtrl_ApplyTextEffectToSelection(*args, **kwargs) def ApplyAlignmentToSelection(*args, **kwargs): """ ApplyAlignmentToSelection(self, int alignment) -> bool Apply alignment to the selection """ return _richtext.RichTextCtrl_ApplyAlignmentToSelection(*args, **kwargs) def ApplyStyle(*args, **kwargs): """ ApplyStyle(self, wxRichTextStyleDefinition def) -> bool Apply a named style to the selection """ return _richtext.RichTextCtrl_ApplyStyle(*args, **kwargs) def SetStyleSheet(*args, **kwargs): """ SetStyleSheet(self, wxRichTextStyleSheet styleSheet) Set style sheet, if any. """ return _richtext.RichTextCtrl_SetStyleSheet(*args, **kwargs) def GetStyleSheet(*args, **kwargs): """GetStyleSheet(self) -> wxRichTextStyleSheet""" return _richtext.RichTextCtrl_GetStyleSheet(*args, **kwargs) def PushStyleSheet(*args, **kwargs): """ PushStyleSheet(self, wxRichTextStyleSheet styleSheet) -> bool Push style sheet to top of stack """ return _richtext.RichTextCtrl_PushStyleSheet(*args, **kwargs) def PopStyleSheet(*args, **kwargs): """ PopStyleSheet(self) -> wxRichTextStyleSheet Pop style sheet from top of stack """ return _richtext.RichTextCtrl_PopStyleSheet(*args, **kwargs) def ApplyStyleSheet(*args, **kwargs): """ ApplyStyleSheet(self, wxRichTextStyleSheet styleSheet=None) -> bool Apply the style sheet to the buffer, for example if the styles have changed. """ return _richtext.RichTextCtrl_ApplyStyleSheet(*args, **kwargs) def ShowContextMenu(*args, **kwargs): """ShowContextMenu(self, Menu menu, Point pt, bool addPropertyCommands=True) -> bool""" return _richtext.RichTextCtrl_ShowContextMenu(*args, **kwargs) def PrepareContextMenu(*args, **kwargs): """PrepareContextMenu(self, Menu menu, Point pt, bool addPropertyCommands=True) -> int""" return _richtext.RichTextCtrl_PrepareContextMenu(*args, **kwargs) Buffer = property(GetBuffer) DelayedLayoutThreshold = property(GetDelayedLayoutThreshold,SetDelayedLayoutThreshold) Filename = property(GetFilename,SetFilename) InternalSelectionRange = property(GetInternalSelectionRange,SetInternalSelectionRange) SelectionRange = property(GetSelectionRange,SetSelectionRange) StyleSheet = property(GetStyleSheet,SetStyleSheet) TextCursor = property(GetTextCursor,SetTextCursor) URLCursor = property(GetURLCursor,SetURLCursor) def SetupScrollbars(*args, **kwargs): """SetupScrollbars(self, bool atTop=False)""" return _richtext.RichTextCtrl_SetupScrollbars(*args, **kwargs) def KeyboardNavigate(*args, **kwargs): """KeyboardNavigate(self, int keyCode, int flags) -> bool""" return _richtext.RichTextCtrl_KeyboardNavigate(*args, **kwargs) def PositionCaret(*args, **kwargs): """PositionCaret(self)""" return _richtext.RichTextCtrl_PositionCaret(*args, **kwargs) def ExtendSelection(*args, **kwargs): """ExtendSelection(self, long oldPosition, long newPosition, int flags) -> bool""" return _richtext.RichTextCtrl_ExtendSelection(*args, **kwargs) def ScrollIntoView(*args, **kwargs): """ScrollIntoView(self, long position, int keyCode) -> bool""" return _richtext.RichTextCtrl_ScrollIntoView(*args, **kwargs) def SetCaretPosition(*args, **kwargs): """SetCaretPosition(self, long position, bool showAtLineStart=False)""" return _richtext.RichTextCtrl_SetCaretPosition(*args, **kwargs) def GetCaretPosition(*args, **kwargs): """GetCaretPosition(self) -> long""" return _richtext.RichTextCtrl_GetCaretPosition(*args, **kwargs) def GetAdjustedCaretPosition(*args, **kwargs): """GetAdjustedCaretPosition(self, long caretPos) -> long""" return _richtext.RichTextCtrl_GetAdjustedCaretPosition(*args, **kwargs) def MoveCaretForward(*args, **kwargs): """MoveCaretForward(self, long oldPosition)""" return _richtext.RichTextCtrl_MoveCaretForward(*args, **kwargs) def MoveCaretBack(*args, **kwargs): """MoveCaretBack(self, long oldPosition)""" return _richtext.RichTextCtrl_MoveCaretBack(*args, **kwargs) def GetCaretPositionForIndex(*args, **kwargs): """GetCaretPositionForIndex(self, long position, Rect rect) -> bool""" return _richtext.RichTextCtrl_GetCaretPositionForIndex(*args, **kwargs) def GetVisibleLineForCaretPosition(*args, **kwargs): """GetVisibleLineForCaretPosition(self, long caretPosition) -> RichTextLine""" return _richtext.RichTextCtrl_GetVisibleLineForCaretPosition(*args, **kwargs) def GetCommandProcessor(*args, **kwargs): """GetCommandProcessor(self) -> wxCommandProcessor""" return _richtext.RichTextCtrl_GetCommandProcessor(*args, **kwargs) def DeleteSelectedContent(*args, **kwargs): """DeleteSelectedContent(self, long OUTPUT) -> bool""" return _richtext.RichTextCtrl_DeleteSelectedContent(*args, **kwargs) def GetPhysicalPoint(*args, **kwargs): """GetPhysicalPoint(self, Point ptLogical) -> Point""" return _richtext.RichTextCtrl_GetPhysicalPoint(*args, **kwargs) def GetLogicalPoint(*args, **kwargs): """GetLogicalPoint(self, Point ptPhysical) -> Point""" return _richtext.RichTextCtrl_GetLogicalPoint(*args, **kwargs) def FindNextWordPosition(*args, **kwargs): """FindNextWordPosition(self, int direction=1) -> long""" return _richtext.RichTextCtrl_FindNextWordPosition(*args, **kwargs) def IsPositionVisible(*args, **kwargs): """IsPositionVisible(self, long pos) -> bool""" return _richtext.RichTextCtrl_IsPositionVisible(*args, **kwargs) def GetFirstVisiblePosition(*args, **kwargs): """GetFirstVisiblePosition(self) -> long""" return _richtext.RichTextCtrl_GetFirstVisiblePosition(*args, **kwargs) def GetCaretPositionForDefaultStyle(*args, **kwargs): """GetCaretPositionForDefaultStyle(self) -> long""" return _richtext.RichTextCtrl_GetCaretPositionForDefaultStyle(*args, **kwargs) def SetCaretPositionForDefaultStyle(*args, **kwargs): """SetCaretPositionForDefaultStyle(self, long pos)""" return _richtext.RichTextCtrl_SetCaretPositionForDefaultStyle(*args, **kwargs) def IsDefaultStyleShowing(*args, **kwargs): """IsDefaultStyleShowing(self) -> bool""" return _richtext.RichTextCtrl_IsDefaultStyleShowing(*args, **kwargs) def SetAndShowDefaultStyle(*args, **kwargs): """SetAndShowDefaultStyle(self, RichTextAttr attr)""" return _richtext.RichTextCtrl_SetAndShowDefaultStyle(*args, **kwargs) def GetFirstVisiblePoint(*args, **kwargs): """GetFirstVisiblePoint(self) -> Point""" return _richtext.RichTextCtrl_GetFirstVisiblePoint(*args, **kwargs) def GetScrollPageSize(*args, **kwargs): """GetScrollPageSize(self, int orient) -> int""" return _richtext.RichTextCtrl_GetScrollPageSize(*args, **kwargs) def SetScrollPageSize(*args, **kwargs): """SetScrollPageSize(self, int orient, int pageSize)""" return _richtext.RichTextCtrl_SetScrollPageSize(*args, **kwargs) def SetScrollRate(*args, **kwargs): """SetScrollRate(self, int xstep, int ystep)""" return _richtext.RichTextCtrl_SetScrollRate(*args, **kwargs) def GetViewStart(*args, **kwargs): """ GetViewStart() -> (x,y) Get the view start """ return _richtext.RichTextCtrl_GetViewStart(*args, **kwargs) def SetScale(*args, **kwargs): """SetScale(self, double xs, double ys)""" return _richtext.RichTextCtrl_SetScale(*args, **kwargs) def GetScaleX(*args, **kwargs): """GetScaleX(self) -> double""" return _richtext.RichTextCtrl_GetScaleX(*args, **kwargs) def GetScaleY(*args, **kwargs): """GetScaleY(self) -> double""" return _richtext.RichTextCtrl_GetScaleY(*args, **kwargs) def CalcScrolledPosition(*args): """ CalcScrolledPosition(self, Point pt) -> Point CalcScrolledPosition(int x, int y) -> (sx, sy) Translate between scrolled and unscrolled coordinates. """ return _richtext.RichTextCtrl_CalcScrolledPosition(*args) def CalcUnscrolledPosition(*args): """ CalcUnscrolledPosition(self, Point pt) -> Point CalcUnscrolledPosition(int x, int y) -> (ux, uy) Translate between scrolled and unscrolled coordinates. """ return _richtext.RichTextCtrl_CalcUnscrolledPosition(*args) def SetTargetRect(*args, **kwargs): """SetTargetRect(self, Rect rect)""" return _richtext.RichTextCtrl_SetTargetRect(*args, **kwargs) def GetTargetRect(*args, **kwargs): """GetTargetRect(self) -> Rect""" return _richtext.RichTextCtrl_GetTargetRect(*args, **kwargs) def IsEmpty(*args, **kwargs): """ IsEmpty(self) -> bool Returns True if the value in the text field is empty. """ return _richtext.RichTextCtrl_IsEmpty(*args, **kwargs) def SetModified(*args, **kwargs): """SetModified(self, bool modified)""" return _richtext.RichTextCtrl_SetModified(*args, **kwargs) _richtext.RichTextCtrl_swigregister(RichTextCtrl) RichTextCtrlNameStr = cvar.RichTextCtrlNameStr def PreRichTextCtrl(*args, **kwargs): """PreRichTextCtrl() -> RichTextCtrl""" val = _richtext.new_PreRichTextCtrl(*args, **kwargs) return val #--------------------------------------------------------------------------- wxEVT_COMMAND_RICHTEXT_LEFT_CLICK = _richtext.wxEVT_COMMAND_RICHTEXT_LEFT_CLICK wxEVT_COMMAND_RICHTEXT_RIGHT_CLICK = _richtext.wxEVT_COMMAND_RICHTEXT_RIGHT_CLICK wxEVT_COMMAND_RICHTEXT_MIDDLE_CLICK = _richtext.wxEVT_COMMAND_RICHTEXT_MIDDLE_CLICK wxEVT_COMMAND_RICHTEXT_LEFT_DCLICK = _richtext.wxEVT_COMMAND_RICHTEXT_LEFT_DCLICK wxEVT_COMMAND_RICHTEXT_RETURN = _richtext.wxEVT_COMMAND_RICHTEXT_RETURN wxEVT_COMMAND_RICHTEXT_CHARACTER = _richtext.wxEVT_COMMAND_RICHTEXT_CHARACTER wxEVT_COMMAND_RICHTEXT_DELETE = _richtext.wxEVT_COMMAND_RICHTEXT_DELETE wxEVT_COMMAND_RICHTEXT_STYLESHEET_CHANGING = _richtext.wxEVT_COMMAND_RICHTEXT_STYLESHEET_CHANGING wxEVT_COMMAND_RICHTEXT_STYLESHEET_CHANGED = _richtext.wxEVT_COMMAND_RICHTEXT_STYLESHEET_CHANGED wxEVT_COMMAND_RICHTEXT_STYLESHEET_REPLACING = _richtext.wxEVT_COMMAND_RICHTEXT_STYLESHEET_REPLACING wxEVT_COMMAND_RICHTEXT_STYLESHEET_REPLACED = _richtext.wxEVT_COMMAND_RICHTEXT_STYLESHEET_REPLACED wxEVT_COMMAND_RICHTEXT_CONTENT_INSERTED = _richtext.wxEVT_COMMAND_RICHTEXT_CONTENT_INSERTED wxEVT_COMMAND_RICHTEXT_CONTENT_DELETED = _richtext.wxEVT_COMMAND_RICHTEXT_CONTENT_DELETED wxEVT_COMMAND_RICHTEXT_STYLE_CHANGED = _richtext.wxEVT_COMMAND_RICHTEXT_STYLE_CHANGED wxEVT_COMMAND_RICHTEXT_SELECTION_CHANGED = _richtext.wxEVT_COMMAND_RICHTEXT_SELECTION_CHANGED wxEVT_COMMAND_RICHTEXT_BUFFER_RESET = _richtext.wxEVT_COMMAND_RICHTEXT_BUFFER_RESET EVT_RICHTEXT_LEFT_CLICK = wx.PyEventBinder(wxEVT_COMMAND_RICHTEXT_LEFT_CLICK, 1) EVT_RICHTEXT_RIGHT_CLICK = wx.PyEventBinder(wxEVT_COMMAND_RICHTEXT_RIGHT_CLICK, 1) EVT_RICHTEXT_MIDDLE_CLICK = wx.PyEventBinder(wxEVT_COMMAND_RICHTEXT_MIDDLE_CLICK, 1) EVT_RICHTEXT_LEFT_DCLICK = wx.PyEventBinder(wxEVT_COMMAND_RICHTEXT_LEFT_DCLICK, 1) EVT_RICHTEXT_RETURN = wx.PyEventBinder( wxEVT_COMMAND_RICHTEXT_RETURN, 1) EVT_RICHTEXT_CHARACTER = wx.PyEventBinder( wxEVT_COMMAND_RICHTEXT_CHARACTER, 1) EVT_RICHTEXT_DELETE = wx.PyEventBinder( wxEVT_COMMAND_RICHTEXT_DELETE, 1) EVT_RICHTEXT_STYLESHEET_CHANGING = wx.PyEventBinder( wxEVT_COMMAND_RICHTEXT_STYLESHEET_CHANGING, 1) EVT_RICHTEXT_STYLESHEET_CHANGED = wx.PyEventBinder( wxEVT_COMMAND_RICHTEXT_STYLESHEET_CHANGED, 1) EVT_RICHTEXT_STYLESHEET_REPLACING = wx.PyEventBinder( wxEVT_COMMAND_RICHTEXT_STYLESHEET_REPLACING, 1) EVT_RICHTEXT_STYLESHEET_REPLACED = wx.PyEventBinder( wxEVT_COMMAND_RICHTEXT_STYLESHEET_REPLACED, 1) EVT_RICHTEXT_CONTENT_INSERTED = wx.PyEventBinder( wxEVT_COMMAND_RICHTEXT_CONTENT_INSERTED, 1) EVT_RICHTEXT_CONTENT_DELETED = wx.PyEventBinder( wxEVT_COMMAND_RICHTEXT_CONTENT_DELETED, 1) EVT_RICHTEXT_STYLE_CHANGED = wx.PyEventBinder( wxEVT_COMMAND_RICHTEXT_STYLE_CHANGED, 1) EVT_RICHTEXT_SELECTION_CHANGED = wx.PyEventBinder( wxEVT_COMMAND_RICHTEXT_SELECTION_CHANGED, 1) EVT_RICHTEXT_BUFFER_RESET = wx.PyEventBinder( wxEVT_COMMAND_RICHTEXT_BUFFER_RESET, 1) class RichTextEvent(_core.NotifyEvent): """Proxy of C++ RichTextEvent class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """__init__(self, EventType commandType=wxEVT_NULL, int winid=0) -> RichTextEvent""" _richtext.RichTextEvent_swiginit(self,_richtext.new_RichTextEvent(*args, **kwargs)) def GetPosition(*args, **kwargs): """GetPosition(self) -> int""" return _richtext.RichTextEvent_GetPosition(*args, **kwargs) def SetPosition(*args, **kwargs): """SetPosition(self, int n)""" return _richtext.RichTextEvent_SetPosition(*args, **kwargs) def GetFlags(*args, **kwargs): """GetFlags(self) -> int""" return _richtext.RichTextEvent_GetFlags(*args, **kwargs) def SetFlags(*args, **kwargs): """SetFlags(self, int flags)""" return _richtext.RichTextEvent_SetFlags(*args, **kwargs) def GetOldStyleSheet(*args, **kwargs): """GetOldStyleSheet(self) -> wxRichTextStyleSheet""" return _richtext.RichTextEvent_GetOldStyleSheet(*args, **kwargs) def SetOldStyleSheet(*args, **kwargs): """SetOldStyleSheet(self, wxRichTextStyleSheet sheet)""" return _richtext.RichTextEvent_SetOldStyleSheet(*args, **kwargs) def GetNewStyleSheet(*args, **kwargs): """GetNewStyleSheet(self) -> wxRichTextStyleSheet""" return _richtext.RichTextEvent_GetNewStyleSheet(*args, **kwargs) def SetNewStyleSheet(*args, **kwargs): """SetNewStyleSheet(self, wxRichTextStyleSheet sheet)""" return _richtext.RichTextEvent_SetNewStyleSheet(*args, **kwargs) def GetRange(*args, **kwargs): """GetRange(self) -> RichTextRange""" return _richtext.RichTextEvent_GetRange(*args, **kwargs) def SetRange(*args, **kwargs): """SetRange(self, RichTextRange range)""" return _richtext.RichTextEvent_SetRange(*args, **kwargs) def GetCharacter(*args, **kwargs): """GetCharacter(self) -> wxChar""" return _richtext.RichTextEvent_GetCharacter(*args, **kwargs) def SetCharacter(*args, **kwargs): """SetCharacter(self, wxChar ch)""" return _richtext.RichTextEvent_SetCharacter(*args, **kwargs) Flags = property(GetFlags,SetFlags) Index = property(GetPosition,SetPosition) OldStyleSheet = property(GetOldStyleSheet,SetOldStyleSheet) NewStyleSheet = property(GetNewStyleSheet,SetNewStyleSheet) Range = property(GetRange,SetRange) Character = property(GetCharacter,SetCharacter) _richtext.RichTextEvent_swigregister(RichTextEvent) #--------------------------------------------------------------------------- class RichTextHTMLHandler(RichTextFileHandler): """Proxy of C++ RichTextHTMLHandler class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """__init__(self, String name=HtmlName, String ext=HtmlExt, int type=RICHTEXT_TYPE_HTML) -> RichTextHTMLHandler""" _richtext.RichTextHTMLHandler_swiginit(self,_richtext.new_RichTextHTMLHandler(*args, **kwargs)) def SetTemporaryImageLocations(*args, **kwargs): """ SetTemporaryImageLocations(self, wxArrayString locations) Set the list of image locations generated by the last operation """ return _richtext.RichTextHTMLHandler_SetTemporaryImageLocations(*args, **kwargs) def GetTemporaryImageLocations(*args, **kwargs): """ GetTemporaryImageLocations(self) -> wxArrayString Get the list of image locations generated by the last operation """ return _richtext.RichTextHTMLHandler_GetTemporaryImageLocations(*args, **kwargs) TemporaryImageLocations = property(GetTemporaryImageLocations,SetTemporaryImageLocations) def ClearTemporaryImageLocations(*args, **kwargs): """ ClearTemporaryImageLocations(self) Clear the image locations generated by the last operation """ return _richtext.RichTextHTMLHandler_ClearTemporaryImageLocations(*args, **kwargs) def DeleteTemporaryImages(*args, **kwargs): """ DeleteTemporaryImages(self) -> bool Delete the in-memory or temporary files generated by the last operation """ return _richtext.RichTextHTMLHandler_DeleteTemporaryImages(*args, **kwargs) def SetFileCounter(*args, **kwargs): """ SetFileCounter(int counter) Reset the file counter, in case, for example, the same names are required each time """ return _richtext.RichTextHTMLHandler_SetFileCounter(*args, **kwargs) SetFileCounter = staticmethod(SetFileCounter) def SetTempDir(*args, **kwargs): """ SetTempDir(self, String tempDir) Set the directory for storing temporary files. If empty, the system temporary directory will be used. """ return _richtext.RichTextHTMLHandler_SetTempDir(*args, **kwargs) def GetTempDir(*args, **kwargs): """ GetTempDir(self) -> String Get the directory for storing temporary files. If empty, the system temporary directory will be used. """ return _richtext.RichTextHTMLHandler_GetTempDir(*args, **kwargs) TempDir = property(GetTempDir,SetTempDir) def SetFontSizeMapping(*args, **kwargs): """ SetFontSizeMapping(self, wxArrayInt fontSizeMapping) Set mapping from point size to HTML font size. There should be 7 elements, one for each HTML font size, each element specifying the maximum point size for that HTML font size. E.g. 8, 10, 13, 17, 22, 29, 100 """ return _richtext.RichTextHTMLHandler_SetFontSizeMapping(*args, **kwargs) def GetFontSizeMapping(*args, **kwargs): """ GetFontSizeMapping(self) -> wxArrayInt Get mapping deom point size to HTML font size. """ return _richtext.RichTextHTMLHandler_GetFontSizeMapping(*args, **kwargs) FontSizeMapping = property(GetFontSizeMapping,SetFontSizeMapping) _richtext.RichTextHTMLHandler_swigregister(RichTextHTMLHandler) HtmlName = cvar.HtmlName HtmlExt = cvar.HtmlExt def RichTextHTMLHandler_SetFileCounter(*args, **kwargs): """ RichTextHTMLHandler_SetFileCounter(int counter) Reset the file counter, in case, for example, the same names are required each time """ return _richtext.RichTextHTMLHandler_SetFileCounter(*args, **kwargs) #--------------------------------------------------------------------------- class RichTextXMLHandler(RichTextFileHandler): """Proxy of C++ RichTextXMLHandler class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """__init__(self, String name=XmlName, String ext=XmlExt, int type=RICHTEXT_TYPE_XML) -> RichTextXMLHandler""" _richtext.RichTextXMLHandler_swiginit(self,_richtext.new_RichTextXMLHandler(*args, **kwargs)) _richtext.RichTextXMLHandler_swigregister(RichTextXMLHandler) XmlName = cvar.XmlName XmlExt = cvar.XmlExt #--------------------------------------------------------------------------- RICHTEXT_PRINT_MAX_PAGES = _richtext.RICHTEXT_PRINT_MAX_PAGES RICHTEXT_PAGE_ODD = _richtext.RICHTEXT_PAGE_ODD RICHTEXT_PAGE_EVEN = _richtext.RICHTEXT_PAGE_EVEN RICHTEXT_PAGE_ALL = _richtext.RICHTEXT_PAGE_ALL RICHTEXT_PAGE_LEFT = _richtext.RICHTEXT_PAGE_LEFT RICHTEXT_PAGE_CENTRE = _richtext.RICHTEXT_PAGE_CENTRE RICHTEXT_PAGE_RIGHT = _richtext.RICHTEXT_PAGE_RIGHT class RichTextPrintout(_windows.Printout): """Proxy of C++ RichTextPrintout class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """__init__(self, String title=wxT("Printout")) -> RichTextPrintout""" _richtext.RichTextPrintout_swiginit(self,_richtext.new_RichTextPrintout(*args, **kwargs)) __swig_destroy__ = _richtext.delete_RichTextPrintout __del__ = lambda self : None; def SetRichTextBuffer(*args, **kwargs): """SetRichTextBuffer(self, RichTextBuffer buffer)""" return _richtext.RichTextPrintout_SetRichTextBuffer(*args, **kwargs) def GetRichTextBuffer(*args, **kwargs): """GetRichTextBuffer(self) -> RichTextBuffer""" return _richtext.RichTextPrintout_GetRichTextBuffer(*args, **kwargs) def SetHeaderFooterData(*args, **kwargs): """SetHeaderFooterData(self, wxRichTextHeaderFooterData data)""" return _richtext.RichTextPrintout_SetHeaderFooterData(*args, **kwargs) def GetHeaderFooterData(*args, **kwargs): """GetHeaderFooterData(self) -> wxRichTextHeaderFooterData""" return _richtext.RichTextPrintout_GetHeaderFooterData(*args, **kwargs) def SetMargins(*args, **kwargs): """SetMargins(self, int top=254, int bottom=254, int left=254, int right=254)""" return _richtext.RichTextPrintout_SetMargins(*args, **kwargs) def CalculateScaling(*args, **kwargs): """CalculateScaling(self, DC dc, Rect textRect, Rect headerRect, Rect footerRect)""" return _richtext.RichTextPrintout_CalculateScaling(*args, **kwargs) _richtext.RichTextPrintout_swigregister(RichTextPrintout) class RichTextPrinting(_core.Object): """Proxy of C++ RichTextPrinting class""" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args, **kwargs): """__init__(self, String name=wxT("Printing"), Window parentWindow=None) -> RichTextPrinting""" _richtext.RichTextPrinting_swiginit(self,_richtext.new_RichTextPrinting(*args, **kwargs)) __swig_destroy__ = _richtext.delete_RichTextPrinting __del__ = lambda self : None; def PreviewFile(*args, **kwargs): """PreviewFile(self, String richTextFile) -> bool""" return _richtext.RichTextPrinting_PreviewFile(*args, **kwargs) def PreviewBuffer(*args, **kwargs): """PreviewBuffer(self, RichTextBuffer buffer) -> bool""" return _richtext.RichTextPrinting_PreviewBuffer(*args, **kwargs) def PrintFile(*args, **kwargs): """PrintFile(self, String richTextFile) -> bool""" return _richtext.RichTextPrinting_PrintFile(*args, **kwargs) def PrintBuffer(*args, **kwargs): """PrintBuffer(self, RichTextBuffer buffer) -> bool""" return _richtext.RichTextPrinting_PrintBuffer(*args, **kwargs) def PageSetup(*args, **kwargs): """PageSetup(self)""" return _richtext.RichTextPrinting_PageSetup(*args, **kwargs) def SetHeaderFooterData(*args, **kwargs): """SetHeaderFooterData(self, wxRichTextHeaderFooterData data)""" return _richtext.RichTextPrinting_SetHeaderFooterData(*args, **kwargs) def GetHeaderFooterData(*args, **kwargs): """GetHeaderFooterData(self) -> wxRichTextHeaderFooterData""" return _richtext.RichTextPrinting_GetHeaderFooterData(*args, **kwargs) def SetHeaderText(*args, **kwargs): """SetHeaderText(self, String text, int page=RICHTEXT_PAGE_ALL, int location=RICHTEXT_PAGE_CENTRE)""" return _richtext.RichTextPrinting_SetHeaderText(*args, **kwargs) def GetHeaderText(*args, **kwargs): """GetHeaderText(self, int page=RICHTEXT_PAGE_EVEN, int location=RICHTEXT_PAGE_CENTRE) -> String""" return _richtext.RichTextPrinting_GetHeaderText(*args, **kwargs) def SetFooterText(*args, **kwargs): """SetFooterText(self, String text, int page=RICHTEXT_PAGE_ALL, int location=RICHTEXT_PAGE_CENTRE)""" return _richtext.RichTextPrinting_SetFooterText(*args, **kwargs) def GetFooterText(*args, **kwargs): """GetFooterText(self, int page=RICHTEXT_PAGE_EVEN, int location=RICHTEXT_PAGE_CENTRE) -> String""" return _richtext.RichTextPrinting_GetFooterText(*args, **kwargs) def SetShowOnFirstPage(*args, **kwargs): """SetShowOnFirstPage(self, bool show)""" return _richtext.RichTextPrinting_SetShowOnFirstPage(*args, **kwargs) def SetHeaderFooterFont(*args, **kwargs): """SetHeaderFooterFont(self, Font font)""" return _richtext.RichTextPrinting_SetHeaderFooterFont(*args, **kwargs) def SetHeaderFooterTextColour(*args, **kwargs): """SetHeaderFooterTextColour(self, Colour font)""" return _richtext.RichTextPrinting_SetHeaderFooterTextColour(*args, **kwargs) def GetPrintData(*args, **kwargs): """GetPrintData(self) -> PrintData""" return _richtext.RichTextPrinting_GetPrintData(*args, **kwargs) def GetPageSetupData(*args, **kwargs): """GetPageSetupData(self) -> PageSetupDialogData""" return _richtext.RichTextPrinting_GetPageSetupData(*args, **kwargs) def SetPrintData(*args, **kwargs): """SetPrintData(self, PrintData printData)""" return _richtext.RichTextPrinting_SetPrintData(*args, **kwargs) def SetPageSetupData(*args, **kwargs): """SetPageSetupData(self, wxPageSetupData pageSetupData)""" return _richtext.RichTextPrinting_SetPageSetupData(*args, **kwargs) def SetRichTextBufferPreview(*args, **kwargs): """SetRichTextBufferPreview(self, RichTextBuffer buf)""" return _richtext.RichTextPrinting_SetRichTextBufferPreview(*args, **kwargs) def GetRichTextBufferPreview(*args, **kwargs): """GetRichTextBufferPreview(self) -> RichTextBuffer""" return _richtext.RichTextPrinting_GetRichTextBufferPreview(*args, **kwargs) def SetRichTextBufferPrinting(*args, **kwargs): """SetRichTextBufferPrinting(self, RichTextBuffer buf)""" return _richtext.RichTextPrinting_SetRichTextBufferPrinting(*args, **kwargs) def GetRichTextBufferPrinting(*args, **kwargs): """GetRichTextBufferPrinting(self) -> RichTextBuffer""" return _richtext.RichTextPrinting_GetRichTextBufferPrinting(*args, **kwargs) def SetParentWindow(*args, **kwargs): """SetParentWindow(self, Window parent)""" return _richtext.RichTextPrinting_SetParentWindow(*args, **kwargs) def GetParentWindow(*args, **kwargs): """GetParentWindow(self) -> Window""" return _richtext.RichTextPrinting_GetParentWindow(*args, **kwargs) def SetTitle(*args, **kwargs): """SetTitle(self, String title)""" return _richtext.RichTextPrinting_SetTitle(*args, **kwargs) def GetTitle(*args, **kwargs): """GetTitle(self) -> String""" return _richtext.RichTextPrinting_GetTitle(*args, **kwargs) def SetPreviewRect(*args, **kwargs): """SetPreviewRect(self, Rect rect)""" return _richtext.RichTextPrinting_SetPreviewRect(*args, **kwargs) def GetPreviewRect(*args, **kwargs): """GetPreviewRect(self) -> Rect""" return _richtext.RichTextPrinting_GetPreviewRect(*args, **kwargs) _richtext.RichTextPrinting_swigregister(RichTextPrinting)
ktan2020/legacy-automation
win/Lib/site-packages/wx-3.0-msw/wx/richtext.py
Python
mit
189,832
#!/usr/bin/python import re, csv, sys from urlparse import urlparse from nltk.corpus import stopwords from nltk.tokenize import word_tokenize, sent_tokenize from nltk.text import TextCollection #process command line arguments if len(sys.argv) < 2: print "ERROR: arg1: must specify the input file" print " arg2: specify -t to generate test ARFF" sys.exit(1) test = False if len(sys.argv) > 2: test = (sys.argv[2] == '-t') # initialize some variables stoplist = stopwords.words('english') stoplist.extend(['.', ',', ':', '?', '!' ';', '"', "'", '-', '--', '(', ')', '/', '\\', '[', ']', '{', '}', '|', '+', '*', '^']) emots_pos = [':)', ':D', ':-)', ':-D', '=)', '=D', ':]', ':-]', '=]', 'X)', 'XD', 'X]', 'X-)', 'X-D', 'X-]', 'C:', ';)', ';D', ';]', ';-)', ';-D', ';-]', '<3', ':P', ':-P', '=P', 'XP', 'X-P', ':o)', ':3', ':>', '8)', ':^)', '8-D', '8D', '=3', 'B^D', '\\o/', '<:', '(:', '(-:', '(=', '[:', '[-:', '[=', '(X', '[X', '(-X', '[-X', ':\')', ':\'-)', ':\']', ':\'-]', '=\')', '=\']', ';^)', '>:P', ':-b', ':b'] emots_pos = [emot.lower() for emot in emots_pos] emots_neg = [':(', ':[', ':-(', ':-[', 'D:', '=(', '=[', 'D=', 'DX', ':C', '</3', '>:[', ':-c', ':-<', ':<', '>:', ':{', ':\'-(', ':\'(', ':\'[', '=\'(', '=\'[', 'D;', 'D\':', 'D:<', 'D8', 'D-\':', '):', ']:', ')-:', ']-:', ')=', ']=', ']:<', '>-:'] emots_neg = [emot.lower() for emot in emots_neg] gaz_pos = [] gaz_neg = [] tweets = [] sentiments = [] emots_count = [] punct_count = [] gaz_count = [] words = [] #will contain all non-stop words that occur >1 times words1 = [] #will contain all non-stop words that occur 1 time # generate the gazetteers gaz_file = open('positive-words.txt', 'r') for line in gaz_file: line = line.strip() if line != '' and line[0] != ';': gaz_pos.append(line) gaz_file.close() gaz_file = open('negative-words.txt', 'r') for line in gaz_file: line = line.strip() if line != '' and line[0] != ';': gaz_neg.append(line) gaz_file.close() # print some information print 'Number of positive emoticons: ' + str(len(emots_pos)) print 'Number of negative emoticons: ' + str(len(emots_neg)) print '\nNumber of positive gazetteer words: ' + str(len(gaz_pos)) print 'Number of negative gazetteer words: ' + str(len(gaz_neg)) # extract all tweets and words (IN TRAINING) words_file = [] if not test: words_file = open('words-list.txt', 'w') # COMMENT OUT FOR TESTING tweet_file = open(sys.argv[1], 'rb') reader = csv.reader(tweet_file, delimiter=',', quotechar='"', escapechar='\\', quoting=csv.QUOTE_ALL) for line in reader: # save tweet data tweet = line[4].lower() sent = line[1] # REMOVE THIS SECTION FOR TESTING if not test: if sent == 'positive': sent = 'POS' elif sent == 'negative': sent = 'NEG' else: sent = 'OTHER' sentiments.append(sent) # standardize URLs w = tweet.split() for i in range(len(w)): r = urlparse(w[i]) if r[0] != '' and r[1] != '': w[i] = 'URL' tweet = ' '.join(w) tweets.append(tweet) # count emoticons count_pos = 0 for emot in emots_pos: count_pos += tweet.count(emot) count_neg = 0 for emot in emots_neg: count_neg += tweet.count(emot) emots_count.append( (count_pos, count_neg) ) # count punctuation punct_count.append( (tweet.count('?'), tweet.count('!')) ) # count gazetteer words count_pos = 0 for gw in gaz_pos: count_pos += tweet.count(gw) count_neg = 0 for gw in gaz_neg: count_neg += tweet.count(gw) gaz_count.append( (count_pos, count_neg) ) # USE THIS SECTION FOR TRAINING # extract only words used >1 times, and ignore stopwords if not test : tweet_sents = sent_tokenize(tweet) for sent in tweet_sents: sw = word_tokenize(sent) for word in sw: if word not in stoplist: if word not in words: if word in words1: words.append(word) words_file.write(word + '\n') else: words1.append(word) tweet_file.close() if not test: words_file.close() # COMMENT OUT FOR TESTING # USE THIS SECTION FOR TESTING # extract all words (IN TESTING) if test: wfile = open('words-list.txt', 'r') for line in wfile: words.append(line.strip()) wfile.close() # print some more information print '\nNumber of tweets: ' + str(len(tweets)) print 'Number of words occuring >1 time: ' + str(len(words)) print 'Number of words occuring 1 time: ' + str(len(words1)) # create .arff file for Weka texts = TextCollection(tweets) arff = open('tweets_sentiment.arff', "w") wc = 0 # header arff.write("@relation sentiment_analysis\n\n") arff.write("@attribute numPosEmots numeric\n") arff.write("@attribute numNegEmots numeric\n") arff.write("@attribute numQuest numeric\n") arff.write("@attribute numExclam numeric\n") arff.write("@attribute numPosGaz numeric\n") arff.write("@attribute numNegGaz numeric\n") for word in words: arff.write("@attribute word_") sub_w = re.subn('[^a-zA-Z]', 'X', word) arff.write(sub_w[0]) if sub_w[1] > 0: arff.write('_' + str(wc)) wc += 1 arff.write(" numeric\n") arff.write("@attribute class {POS, NEG, OTHER}\n\n") arff.write("@data\n") # data for i in xrange(len(tweets)): arff.write(str(emots_count[i][0]) + ',' + str(emots_count[i][1]) + ',') arff.write(str(punct_count[i][0]) + ',' + str(punct_count[i][1]) + ',') arff.write(str(gaz_count[i][0]) + ',' + str(gaz_count[i][1]) + ',') for j in xrange(len(words)): #loop through unigrams arff.write(str(texts.tf_idf(words[j], tweets[i])) + ',') arff.write(sentiments[i] + '\n') arff.close() print '\nFinished pre-processing! The ARFF file for Weka has been created.'
satybald/twitter-modeling-lda
source code/preprocess.py
Python
mit
5,802
import scipy.stats as meas def evaluation(predict, gold): """ pearsonr of predict and gold :param predict: list :param gold: list :return: mape """ pearsonr = meas.pearsonr(predict, gold)[0] return pearsonr def eval_file(predict_file, gold_feature_file): predict = open(predict_file).readlines() gold = open(gold_feature_file).readlines() predict = [float(x.strip().split()[0])for x in predict] gold = [float(x.strip().split()[0]) for x in gold] pearsonr = evaluation(predict, gold) return pearsonr def eval_output_file(predict_file): predict, gold = [], [] with open(predict_file) as f: for line in f: line = line.strip().split('\t#\t') predict.append(float(line[0])) gold.append(float(line[1].split('\t')[0])) pearsonr = evaluation(predict, gold) return pearsonr def eval_file_corpus(predict_file_list, gold_file_list): predicts, golds = [], [] for predict_file, gold_file in zip(predict_file_list, gold_file_list): predict = open(predict_file).readlines() gold = open(gold_file).readlines() predicts += predict golds += gold predicts = [float(x.strip().split()[0]) for x in predicts] golds = [float(x.strip().split()[0]) for x in golds] pearsonr = evaluation(predicts, golds) return pearsonr ######classification############################################################### # coding: utf-8 from stst.confusion_matrix import Alphabet, ConfusionMatrix from stst.config import DICT_INDEX_TO_LABEL def Evaluation(gold_file_path, predict_file_path): with open(gold_file_path) as gold_file, open(predict_file_path) as predict_file: gold_list = [ line.strip().split('\t')[0] for line in gold_file] predicted_list = [line.strip().split("\t#\t")[0] for line in predict_file] binary_alphabet = Alphabet() for i in range(18): binary_alphabet.add(DICT_INDEX_TO_LABEL[i]) cm = ConfusionMatrix(binary_alphabet) cm.add_list(predicted_list, gold_list) cm.print_out() macro_p, macro_r, macro_f1 = cm.get_average_prf() overall_accuracy = cm.get_accuracy() return overall_accuracy, macro_p, macro_r, macro_f1 if __name__ == '__main__': pass
fssqawj/classification_task
stst/evaluation.py
Python
mit
2,319
# Copyright 2017 MakeMyTrip (Kunal Aggarwal, Avinash Jain) # # This file is part of WebGuard. # # WebGuard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # WebGuard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with WebGuard. If not, see <http://www.gnu.org/licenses/>. from django.conf.urls import url from . import views urlpatterns = [ url(r'^login$', views.login, name = 'login'), url(r'^logout$', views.logout, name = 'logout'), url(r'^register$', views.register, name = 'register'), url(r'^unregister$', views.unregister, name = 'unregister'), # ZAP INSTANCE API URLS url(r'^zap/list$', views.zap_list, name = 'zap_list'), url(r'^zap/start$', views.zap_start, name = 'zap_start'), url(r'^zap/stop/(?P<ip>[0-9.]+)/(?P<port>[0-9]+)$', views.zap_stop, name = 'zap_stop'), url(r'^zap/get/logs/(?P<ip>[0-9.]+)/(?P<port>[0-9]+)$', views.zap_get_logs, name = 'zap_get_logs'), # ZAP SCAN URLS url(r'^zap/scan/start$', views.zap_scan_start, name = 'zap_scan_start'), url(r'^zap/scan/stop$', views.zap_scan_stop, name = 'zap_scan_stop'), url(r'^zap/scan/list$', views.zap_scan_list, name = 'zap_scan_list'), url(r'^zap/scan/report$', views.zap_scan_report, name = 'zap_scan_report'), url(r'^zap/scan/report/email$', views.zap_scan_report_email, name = 'zap_scan_report_email'), url(r'^zap/scan/report/save$', views.zap_scan_report_save, name = 'zap_scan_report_save'), url(r'^zap/scan/url$', views.zap_scan_url, name = 'zap_scan_url'), # ZAP HOST FILE URLS url(r'^zap/hosts/list$', views.zap_hosts_list, name = 'zap_hosts_list'), url(r'^zap/hosts/write$', views.zap_hosts_write, name = 'zap_hosts_write'), ]
makemytrip/webGuard-Server
api/urls.py
Python
gpl-3.0
2,078
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import types so that we can reference ListType in sphinx param declarations. # We can't just use list, because sphinx gets confused by # openstack.resource.Resource.list and openstack.resource2.Resource.list # import jsonpatch import types # noqa from openstack.cloud import exc from openstack.cloud import _normalize from openstack.cloud import _utils from openstack import exceptions from openstack import proxy class SecurityGroupCloudMixin(_normalize.Normalizer): def __init__(self): self.secgroup_source = self.config.config['secgroup_source'] def search_security_groups(self, name_or_id=None, filters=None): # `filters` could be a dict or a jmespath (str) groups = self.list_security_groups( filters=filters if isinstance(filters, dict) else None ) return _utils._filter_list(groups, name_or_id, filters) def list_security_groups(self, filters=None): """List all available security groups. :param filters: (optional) dict of filter conditions to push down :returns: A list of security group ``munch.Munch``. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) if not filters: filters = {} data = [] # Handle neutron security groups if self._use_neutron_secgroups(): # pass filters dict to the list to filter as much as possible on # the server side return list( self.network.security_groups(allow_unknown_params=True, **filters)) # Handle nova security groups else: data = proxy._json_response(self.compute.get( '/os-security-groups', params=filters)) return self._normalize_secgroups( self._get_and_munchify('security_groups', data)) def get_security_group(self, name_or_id, filters=None): """Get a security group by name or ID. :param name_or_id: Name or ID of the security group. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :returns: A security group ``munch.Munch`` or None if no matching security group is found. """ return _utils._get_entity( self, 'security_group', name_or_id, filters) def get_security_group_by_id(self, id): """ Get a security group by ID :param id: ID of the security group. :returns: A security group ``munch.Munch``. """ if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) error_message = ("Error getting security group with" " ID {id}".format(id=id)) if self._use_neutron_secgroups(): return self.network.get_security_group(id) else: data = proxy._json_response( self.compute.get( '/os-security-groups/{id}'.format(id=id)), error_message=error_message) return self._normalize_secgroup( self._get_and_munchify('security_group', data)) def create_security_group(self, name, description, project_id=None): """Create a new security group :param string name: A name for the security group. :param string description: Describes the security group. :param string project_id: Specify the project ID this security group will be created on (admin-only). :returns: A ``munch.Munch`` representing the new security group. :raises: OpenStackCloudException on operation error. :raises: OpenStackCloudUnavailableFeature if security groups are not supported on this cloud. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) data = [] security_group_json = { 'name': name, 'description': description } if project_id is not None: security_group_json['tenant_id'] = project_id if self._use_neutron_secgroups(): return self.network.create_security_group( **security_group_json) else: data = proxy._json_response(self.compute.post( '/os-security-groups', json={'security_group': security_group_json})) return self._normalize_secgroup( self._get_and_munchify('security_group', data)) def delete_security_group(self, name_or_id): """Delete a security group :param string name_or_id: The name or unique ID of the security group. :returns: True if delete succeeded, False otherwise. :raises: OpenStackCloudException on operation error. :raises: OpenStackCloudUnavailableFeature if security groups are not supported on this cloud. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) # TODO(mordred): Let's come back and stop doing a GET before we do # the delete. secgroup = self.get_security_group(name_or_id) if secgroup is None: self.log.debug('Security group %s not found for deleting', name_or_id) return False if self._use_neutron_secgroups(): self.network.delete_security_group( secgroup['id'], ignore_missing=False) return True else: proxy._json_response(self.compute.delete( '/os-security-groups/{id}'.format(id=secgroup['id']))) return True @_utils.valid_kwargs('name', 'description') def update_security_group(self, name_or_id, **kwargs): """Update a security group :param string name_or_id: Name or ID of the security group to update. :param string name: New name for the security group. :param string description: New description for the security group. :returns: A ``munch.Munch`` describing the updated security group. :raises: OpenStackCloudException on operation error. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) group = self.get_security_group(name_or_id) if group is None: raise exc.OpenStackCloudException( "Security group %s not found." % name_or_id) if self._use_neutron_secgroups(): return self.network.update_security_group( group['id'], **kwargs ) else: for key in ('name', 'description'): kwargs.setdefault(key, group[key]) data = proxy._json_response( self.compute.put( '/os-security-groups/{id}'.format(id=group['id']), json={'security_group': kwargs})) return self._normalize_secgroup( self._get_and_munchify('security_group', data)) def create_security_group_rule(self, secgroup_name_or_id, port_range_min=None, port_range_max=None, protocol=None, remote_ip_prefix=None, remote_group_id=None, direction='ingress', ethertype='IPv4', project_id=None): """Create a new security group rule :param string secgroup_name_or_id: The security group name or ID to associate with this security group rule. If a non-unique group name is given, an exception is raised. :param int port_range_min: The minimum port number in the range that is matched by the security group rule. If the protocol is TCP or UDP, this value must be less than or equal to the port_range_max attribute value. If nova is used by the cloud provider for security groups, then a value of None will be transformed to -1. :param int port_range_max: The maximum port number in the range that is matched by the security group rule. The port_range_min attribute constrains the port_range_max attribute. If nova is used by the cloud provider for security groups, then a value of None will be transformed to -1. :param string protocol: The protocol that is matched by the security group rule. Valid values are None, tcp, udp, and icmp. :param string remote_ip_prefix: The remote IP prefix to be associated with this security group rule. This attribute matches the specified IP prefix as the source IP address of the IP packet. :param string remote_group_id: The remote group ID to be associated with this security group rule. :param string direction: Ingress or egress: The direction in which the security group rule is applied. For a compute instance, an ingress security group rule is applied to incoming (ingress) traffic for that instance. An egress rule is applied to traffic leaving the instance. :param string ethertype: Must be IPv4 or IPv6, and addresses represented in CIDR must match the ingress or egress rules. :param string project_id: Specify the project ID this security group will be created on (admin-only). :returns: A ``munch.Munch`` representing the new security group rule. :raises: OpenStackCloudException on operation error. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) secgroup = self.get_security_group(secgroup_name_or_id) if not secgroup: raise exc.OpenStackCloudException( "Security group %s not found." % secgroup_name_or_id) if self._use_neutron_secgroups(): # NOTE: Nova accepts -1 port numbers, but Neutron accepts None # as the equivalent value. rule_def = { 'security_group_id': secgroup['id'], 'port_range_min': None if port_range_min == -1 else port_range_min, 'port_range_max': None if port_range_max == -1 else port_range_max, 'protocol': protocol, 'remote_ip_prefix': remote_ip_prefix, 'remote_group_id': remote_group_id, 'direction': direction, 'ethertype': ethertype } if project_id is not None: rule_def['tenant_id'] = project_id return self.network.create_security_group_rule( **rule_def ) else: # NOTE: Neutron accepts None for protocol. Nova does not. if protocol is None: raise exc.OpenStackCloudException('Protocol must be specified') if direction == 'egress': self.log.debug( 'Rule creation failed: Nova does not support egress rules' ) raise exc.OpenStackCloudException( 'No support for egress rules') # NOTE: Neutron accepts None for ports, but Nova requires -1 # as the equivalent value for ICMP. # # For TCP/UDP, if both are None, Neutron allows this and Nova # represents this as all ports (1-65535). Nova does not accept # None values, so to hide this difference, we will automatically # convert to the full port range. If only a single port value is # specified, it will error as normal. if protocol == 'icmp': if port_range_min is None: port_range_min = -1 if port_range_max is None: port_range_max = -1 elif protocol in ['tcp', 'udp']: if port_range_min is None and port_range_max is None: port_range_min = 1 port_range_max = 65535 security_group_rule_dict = dict(security_group_rule=dict( parent_group_id=secgroup['id'], ip_protocol=protocol, from_port=port_range_min, to_port=port_range_max, cidr=remote_ip_prefix, group_id=remote_group_id )) if project_id is not None: security_group_rule_dict[ 'security_group_rule']['tenant_id'] = project_id data = proxy._json_response( self.compute.post( '/os-security-group-rules', json=security_group_rule_dict )) return self._normalize_secgroup_rule( self._get_and_munchify('security_group_rule', data)) def delete_security_group_rule(self, rule_id): """Delete a security group rule :param string rule_id: The unique ID of the security group rule. :returns: True if delete succeeded, False otherwise. :raises: OpenStackCloudException on operation error. :raises: OpenStackCloudUnavailableFeature if security groups are not supported on this cloud. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) if self._use_neutron_secgroups(): self.network.delete_security_group_rule( rule_id, ignore_missing=False ) return True else: try: exceptions.raise_from_response( self.compute.delete( '/os-security-group-rules/{id}'.format(id=rule_id))) except exc.OpenStackCloudResourceNotFound: return False return True def _has_secgroups(self): if not self.secgroup_source: return False else: return self.secgroup_source.lower() in ('nova', 'neutron') def _use_neutron_secgroups(self): return (self.has_service('network') and self.secgroup_source == 'neutron')
dtroyer/python-openstacksdk
openstack/cloud/_security_group.py
Python
apache-2.0
16,266
# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo.config import cfg from neutron.common import topics from neutron.openstack.common import importutils from neutron.openstack.common import log as logging LOG = logging.getLogger(__name__) SG_RPC_VERSION = "1.1" security_group_opts = [ cfg.StrOpt( 'firewall_driver', help=_('Driver for security groups firewall in the L2 agent')), cfg.BoolOpt( 'enable_security_group', default=True, help=_( 'Controls whether the neutron security group API is enabled ' 'in the server. It should be false when using no security ' 'groups or using the nova security group API.')) ] cfg.CONF.register_opts(security_group_opts, 'SECURITYGROUP') #This is backward compatibility check for Havana def _is_valid_driver_combination(): return ((cfg.CONF.SECURITYGROUP.enable_security_group and (cfg.CONF.SECURITYGROUP.firewall_driver and cfg.CONF.SECURITYGROUP.firewall_driver != 'neutron.agent.firewall.NoopFirewallDriver')) or (not cfg.CONF.SECURITYGROUP.enable_security_group and (cfg.CONF.SECURITYGROUP.firewall_driver == 'neutron.agent.firewall.NoopFirewallDriver' or cfg.CONF.SECURITYGROUP.firewall_driver is None) )) def is_firewall_enabled(): if not _is_valid_driver_combination(): LOG.warn(_("Driver configuration doesn't match with " "enable_security_group")) return cfg.CONF.SECURITYGROUP.enable_security_group def _disable_extension(extension, aliases): if extension in aliases: aliases.remove(extension) def disable_security_group_extension_by_config(aliases): if not is_firewall_enabled(): LOG.info(_('Disabled security-group extension.')) _disable_extension('security-group', aliases) LOG.info(_('Disabled allowed-address-pairs extension.')) _disable_extension('allowed-address-pairs', aliases) class SecurityGroupServerRpcApiMixin(object): """A mix-in that enable SecurityGroup support in plugin rpc.""" def security_group_rules_for_devices(self, context, devices): LOG.debug(_("Get security group rules " "for devices via rpc %r"), devices) return self.call(context, self.make_msg('security_group_rules_for_devices', devices=devices), version=SG_RPC_VERSION, topic=self.topic) class SecurityGroupAgentRpcCallbackMixin(object): """A mix-in that enable SecurityGroup agent support in agent implementations. """ #mix-in object should be have sg_agent sg_agent = None def _security_groups_agent_not_set(self): LOG.warning(_("Security group agent binding currently not set. " "This should be set by the end of the init " "process.")) def security_groups_rule_updated(self, context, **kwargs): """Callback for security group rule update. :param security_groups: list of updated security_groups """ security_groups = kwargs.get('security_groups', []) LOG.debug( _("Security group rule updated on remote: %s"), security_groups) if not self.sg_agent: return self._security_groups_agent_not_set() self.sg_agent.security_groups_rule_updated(security_groups) def security_groups_member_updated(self, context, **kwargs): """Callback for security group member update. :param security_groups: list of updated security_groups """ security_groups = kwargs.get('security_groups', []) LOG.debug( _("Security group member updated on remote: %s"), security_groups) if not self.sg_agent: return self._security_groups_agent_not_set() self.sg_agent.security_groups_member_updated(security_groups) def security_groups_provider_updated(self, context, **kwargs): """Callback for security group provider update.""" LOG.debug(_("Provider rule updated")) if not self.sg_agent: return self._security_groups_agent_not_set() self.sg_agent.security_groups_provider_updated() class SecurityGroupAgentRpcMixin(object): """A mix-in that enable SecurityGroup agent support in agent implementations. """ def init_firewall(self, defer_refresh_firewall=False): firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver LOG.debug(_("Init firewall settings (driver=%s)"), firewall_driver) if not _is_valid_driver_combination(): LOG.warn(_("Driver configuration doesn't match " "with enable_security_group")) if not firewall_driver: firewall_driver = 'neutron.agent.firewall.NoopFirewallDriver' self.firewall = importutils.import_object(firewall_driver) # The following flag will be set to true if port filter must not be # applied as soon as a rule or membership notification is received self.defer_refresh_firewall = defer_refresh_firewall # Stores devices for which firewall should be refreshed when # deferred refresh is enabled. self.devices_to_refilter = set() # Flag raised when a global refresh is needed self.global_refresh_firewall = False def prepare_devices_filter(self, device_ids): if not device_ids: return LOG.info(_("Preparing filters for devices %s"), device_ids) devices = self.plugin_rpc.security_group_rules_for_devices( self.context, list(device_ids)) with self.firewall.defer_apply(): for device in devices.values(): self.firewall.prepare_port_filter(device) def security_groups_rule_updated(self, security_groups): LOG.info(_("Security group " "rule updated %r"), security_groups) self._security_group_updated( security_groups, 'security_groups') def security_groups_member_updated(self, security_groups): LOG.info(_("Security group " "member updated %r"), security_groups) self._security_group_updated( security_groups, 'security_group_source_groups') def _security_group_updated(self, security_groups, attribute): devices = [] sec_grp_set = set(security_groups) for device in self.firewall.ports.values(): if sec_grp_set & set(device.get(attribute, [])): devices.append(device['device']) if devices: if self.defer_refresh_firewall: LOG.debug(_("Adding %s devices to the list of devices " "for which firewall needs to be refreshed"), devices) self.devices_to_refilter |= set(devices) else: self.refresh_firewall(devices) def security_groups_provider_updated(self): LOG.info(_("Provider rule updated")) if self.defer_refresh_firewall: # NOTE(salv-orlando): A 'global refresh' might not be # necessary if the subnet for which the provider rules # were updated is known self.global_refresh_firewall = True else: self.refresh_firewall() def remove_devices_filter(self, device_ids): if not device_ids: return LOG.info(_("Remove device filter for %r"), device_ids) with self.firewall.defer_apply(): for device_id in device_ids: device = self.firewall.ports.get(device_id) if not device: continue self.firewall.remove_port_filter(device) def refresh_firewall(self, device_ids=None): LOG.info(_("Refresh firewall rules")) if not device_ids: device_ids = self.firewall.ports.keys() if not device_ids: LOG.info(_("No ports here to refresh firewall")) return devices = self.plugin_rpc.security_group_rules_for_devices( self.context, device_ids) with self.firewall.defer_apply(): for device in devices.values(): LOG.debug(_("Update port filter for %s"), device['device']) self.firewall.update_port_filter(device) def firewall_refresh_needed(self): return self.global_refresh_firewall or self.devices_to_refilter def setup_port_filters(self, new_devices, updated_devices): """Configure port filters for devices. This routine applies filters for new devices and refreshes firewall rules when devices have been updated, or when there are changes in security group membership or rules. :param new_devices: set containing identifiers for new devices :param updated_devices: set containing identifiers for updated devices """ if new_devices: LOG.debug(_("Preparing device filters for %d new devices"), len(new_devices)) self.prepare_devices_filter(new_devices) # These data structures are cleared here in order to avoid # losing updates occurring during firewall refresh devices_to_refilter = self.devices_to_refilter global_refresh_firewall = self.global_refresh_firewall self.devices_to_refilter = set() self.global_refresh_firewall = False # TODO(salv-orlando): Avoid if possible ever performing the global # refresh providing a precise list of devices for which firewall # should be refreshed if global_refresh_firewall: LOG.debug(_("Refreshing firewall for all filtered devices")) self.refresh_firewall() else: # If a device is both in new and updated devices # avoid reprocessing it updated_devices = ((updated_devices | devices_to_refilter) - new_devices) if updated_devices: LOG.debug(_("Refreshing firewall for %d devices"), len(updated_devices)) self.refresh_firewall(updated_devices) class SecurityGroupAgentRpcApiMixin(object): def _get_security_group_topic(self): return topics.get_topic_name(self.topic, topics.SECURITY_GROUP, topics.UPDATE) def security_groups_rule_updated(self, context, security_groups): """Notify rule updated security groups.""" if not security_groups: return self.fanout_cast(context, self.make_msg('security_groups_rule_updated', security_groups=security_groups), version=SG_RPC_VERSION, topic=self._get_security_group_topic()) def security_groups_member_updated(self, context, security_groups): """Notify member updated security groups.""" if not security_groups: return self.fanout_cast(context, self.make_msg('security_groups_member_updated', security_groups=security_groups), version=SG_RPC_VERSION, topic=self._get_security_group_topic()) def security_groups_provider_updated(self, context): """Notify provider updated security groups.""" self.fanout_cast(context, self.make_msg('security_groups_provider_updated'), version=SG_RPC_VERSION, topic=self._get_security_group_topic())
shakamunyi/neutron-dvr
neutron/agent/securitygroups_rpc.py
Python
apache-2.0
12,495
from __future__ import unicode_literals from django.contrib.auth.models import User from django.db import models from datetime import date, time, datetime from django.core.validators import RegexValidator # Create your models here. #list of all choices for colleges College = ( ('CAMD', 'CAMD'), ('CCIS', 'CCIS'), ('COS', 'COS'), ('CSSH', 'CSSH'), ('BOUVE', 'BOUVE'), ('DMSB', 'DMSB'), ('COE', 'COE'), ('LAW', 'LAW'), ('CPS', 'CPS'), ('PROVOST', 'PROVOST'), ) ApprovalStatus = ( ('PENDING', 'PENDING'), ('APPROVED', 'APPROVED'), ('REJECTED', 'REJECTED'), ) ServiceType = ( ('DIRECT_SERVICE', 'Direct Service'), ('TRAINING', 'Trainings & Orientations'), ('IND_RESEARCH', 'Individual Research & Planning'), ('TEAM_RESEARCH', 'Team Research & Planning'), ) # User Classes #################################################### ######################################################## class StudentManager(models.Manager): def create_student_without_user(self, first_name, last_name, grad_year): student = self.create() student.first_name=first_name student.last_name=last_name student.grad_year=grad_year return student def create_student(self, user, grad_year): student = self.create_student_without_user(first_name=user.first_name, last_name=user.last_name, grad_year=grad_year) student.user = user return student class Student(models.Model): #objects= StudentManager() numeric = RegexValidator(r'^[0-9]*$', 'only numbers allowed') user = models.OneToOneField(User, null=True, unique=True, on_delete=models.SET_NULL) courses = models.ManyToManyField('Course', related_name='students') grad_year = models.CharField(validators=[numeric], max_length=4, null=True) objects= StudentManager() college = models.CharField(choices=College, default='NONE', max_length=7) def __unicode__(self): if self.user is not None: return self.user.first_name + " " + self.user.last_name + "(" + self.user.email + ")" return "No user associated with this student" ############################################################## class FacultyManager(models.Manager): def create_faculty(self, user): faculty = self.create(user=user) faculty.user = user return faculty class Faculty(models.Model): class Meta: verbose_name = 'Faculty Member' verbose_name_plural = 'Faculty Members' objects = FacultyManager() user = models.OneToOneField(User, null=True) def __unicode__(self): return self.user.first_name + " " + self.user.last_name ############################################################### class StaffManager(models.Manager): def create_student(self, user, nuid, grad_year): staff = self.create(user=user) staff.user = user return student class Staff(models.Model): class Meta: verbose_name = 'Teaching Assistant' verbose_name_plural = 'Teaching Assistants' user = models.OneToOneField(User, null=True) courses = models.ManyToManyField('Course') objects = StaffManager() user = models.OneToOneField(User, null=True) def __unicode__(self): return self.user.first_name + " " + self.user.last_name ################################################################ # Data Classes ###################################################### class SubmitReportManager(models.Manager): def query_pending_reports(request): return SubmitReport.objects.all().filter(status="PENDING") class SubmitReport(models.Model): class Meta: verbose_name = 'Submitted Time Sheet' verbose_name_plural = 'Submitted Time Sheets' first_name = models.CharField(max_length=30) last_name = models.CharField(max_length=30) start_date = models.DateField(auto_now_add=False, auto_now=False, default=None) end_date = models.DateField(auto_now_add=False, auto_now=False, default=None) start_time = models.TimeField(auto_now_add=False, auto_now=False, default=None) end_time = models.TimeField(auto_now_add=False, auto_now=False, default=None) courses = models.ManyToManyField('Course', blank=True) service_type = models.CharField(max_length=14, null=True, blank=False, choices=ServiceType, default='default') status = models.CharField(max_length=8, choices=ApprovalStatus, default='PENDING', null=False, blank=False) summary = models.CharField(max_length=150, null=True, blank=True) submitter = models.ForeignKey(Student, null=True, on_delete=models.PROTECT) objects = SubmitReportManager() partner = models.ForeignKey('Partner', null=True, blank=False) def __unicode__(self): return (self.submitter.__unicode__()) class Course(models.Model): numeric = RegexValidator(r'^[0-9]*$', 'only numbers allowed') instructor = models.ForeignKey(Faculty, null=True, blank=False) college = models.CharField(choices=College, default='NONE', max_length=7) course_number = models.CharField(max_length=10, null=True) CRN = models.CharField(validators=[numeric], max_length=5, unique=True) section = models.IntegerField(null=True, blank=False) def __unicode__(self): return self.course_number + ": " + self.CRN class Partner(models.Model): name = models.CharField(max_length=100, null=True, default='New Partner Organization', unique=True) is_active = models.BooleanField(default=True, null=False) courses = models.ManyToManyField(Course) def __unicode__(self): return self.name
ServiceLearningB/ServiceLearningNew
submit_reports/models.py
Python
mit
5,288
import urllib2 from pyosm.parsing import iter_osm_file, iter_osm_change_file class Api(object): def __init__(self, base_url='https://api.openstreetmap.org/api'): self._base = base_url self.USER_AGENT = 'pyosm/1.0 (http://github.com/iandees/pyosm)' def _get(self, path, params={}): headers = { 'User-Agent': self.USER_AGENT } req = urllib2.Request(self._base + path, headers=headers) return urllib2.urlopen(req) def _get_as_osm(self, path, params={}): return [t for t in iter_osm_file(self._get(path, params))] def _get_object_revision_as_osm(self, kind, thing_id, version=None): path = '/0.6/{}/{}'.format(kind, thing_id) if version: path += '/' + str(version) everything = self._get_as_osm(path) single = None if everything: single = next(iter(everything)) return single def get_node(self, node_id, version=None): return self._get_object_revision_as_osm('node', node_id, version) def get_way(self, way_id, version=None): return self._get_object_revision_as_osm('way', way_id, version) def get_relation(self, relation_id, version=None): return self._get_object_revision_as_osm('relation', relation_id, version) def _get_objects_as_osm(self, kind, thing_ids): plural_kind = kind + 's' path = '/0.6/{}'.format(plural_kind) everything = self._get_as_osm(path, params={plural_kind: thing_ids}) return everything def get_nodes(self, node_ids): return self._get_objects_as_osm('node', node_ids) def get_ways(self, way_ids): return self._get_objects_as_osm('way', way_ids) def get_relations(self, relation_ids): return self._get_objects_as_osm('relation', relation_ids) def _get_object_history_as_osm(self, kind, thing_id): path = '/0.6/{}/{}/history'.format(kind, thing_id) everything = self._get_as_osm(path) return everything def get_node_history(self, node_id): return self._get_object_history_as_osm('node', node_id) def get_way_history(self, way_id): return self._get_object_history_as_osm('way', way_id) def get_relation_history(self, relation_id): return self._get_object_history_as_osm('relation', relation_id) def get_changeset_download(self, changeset_id): return [t for t in iter_osm_change_file(self._get('/0.6/changeset/{}/download'.format(changeset_id)))] def get_changeset_metadata(self, changeset_id): return next(iter(self._get_as_osm('/0.6/changeset/{}'.format(changeset_id))))
iandees/pyosm
pyosm/api.py
Python
mit
2,668
""" A set of common tools to be used in pilot commands """ __RCSID__ = '$Id$' import sys import time import os import pickle import getopt import imp import urllib2 import signal def printVersion( log ): log.info( "Running %s" % " ".join( sys.argv ) ) try: with open( "%s.run" % sys.argv[0], "w" ) as fd: pickle.dump( sys.argv[1:], fd ) except OSError: pass log.info( "Version %s" % __RCSID__ ) def pythonPathCheck(): try: os.umask( 18 ) # 022 pythonpath = os.getenv( 'PYTHONPATH', '' ).split( ':' ) print 'Directories in PYTHONPATH:', pythonpath for p in pythonpath: if p == '': continue try: if os.path.normpath( p ) in sys.path: # In case a given directory is twice in PYTHONPATH it has to removed only once sys.path.remove( os.path.normpath( p ) ) except Exception, x: print x print "[EXCEPTION-info] Failing path:", p, os.path.normpath( p ) print "[EXCEPTION-info] sys.path:", sys.path raise x except Exception, x: print x print "[EXCEPTION-info] sys.executable:", sys.executable print "[EXCEPTION-info] sys.version:", sys.version print "[EXCEPTION-info] os.uname():", os.uname() raise x def alarmTimeoutHandler( *args ): raise Exception( 'Timeout' ) def retrieveUrlTimeout( url, fileName, log, timeout = 0 ): """ Retrieve remote url to local file, with timeout wrapper """ urlData = '' if timeout: signal.signal( signal.SIGALRM, alarmTimeoutHandler ) # set timeout alarm signal.alarm( timeout + 5 ) try: remoteFD = urllib2.urlopen( url ) expectedBytes = 0 # Sometimes repositories do not return Content-Length parameter try: expectedBytes = long( remoteFD.info()[ 'Content-Length' ] ) except Exception as x: expectedBytes = 0 data = remoteFD.read() if fileName: with open( fileName + '-local', "wb" ) as localFD: localFD.write( data ) else: urlData += data remoteFD.close() if len( data ) != expectedBytes and expectedBytes > 0: log.error( 'URL retrieve: expected size does not match the received one' ) return False if timeout: signal.alarm( 0 ) if fileName: return True else: return urlData except urllib2.HTTPError, x: if x.code == 404: log.error( "URL retrieve: %s does not exist" % url ) if timeout: signal.alarm( 0 ) return False except urllib2.URLError: log.error( 'Timeout after %s seconds on transfer request for "%s"' % ( str( timeout ), url ) ) return False except Exception, x: if x == 'Timeout': log.error( 'Timeout after %s seconds on transfer request for "%s"' % ( str( timeout ), url ) ) if timeout: signal.alarm( 0 ) raise x class ObjectLoader( object ): """ Simplified class for loading objects from a DIRAC installation. Example: ol = ObjectLoader() object, modulePath = ol.loadObject( 'pilot', 'LaunchAgent' ) """ def __init__( self, baseModules, log ): """ init """ self.__rootModules = baseModules self.log = log def loadModule( self, modName, hideExceptions = False ): """ Auto search which root module has to be used """ for rootModule in self.__rootModules: impName = modName if rootModule: impName = "%s.%s" % ( rootModule, impName ) self.log.debug( "Trying to load %s" % impName ) module, parentPath = self.__recurseImport( impName, hideExceptions = hideExceptions ) #Error. Something cannot be imported. Return error if module is None: return None, None #Huge success! else: return module, parentPath #Nothing found, continue #Return nothing found return None, None def __recurseImport( self, modName, parentModule = None, hideExceptions = False ): """ Internal function to load modules """ if isinstance(modName, basestring): modName = modName.split( '.' ) try: if parentModule: impData = imp.find_module( modName[0], parentModule.__path__ ) else: impData = imp.find_module( modName[0] ) impModule = imp.load_module( modName[0], *impData ) if impData[0]: impData[0].close() except ImportError, excp: if str( excp ).find( "No module named %s" % modName[0] ) == 0: return None, None errMsg = "Can't load %s in %s" % ( ".".join( modName ), parentModule.__path__[0] ) if not hideExceptions: self.log.exception( errMsg ) return None, None if len( modName ) == 1: return impModule, parentModule.__path__[0] return self.__recurseImport( modName[1:], impModule, hideExceptions = hideExceptions ) def loadObject( self, package, moduleName, command ): """ Load an object from inside a module """ loadModuleName = '%s.%s' % ( package, moduleName ) module, parentPath = self.loadModule( loadModuleName ) if module is None: return None, None try: commandObj = getattr( module, command ) return commandObj, os.path.join( parentPath, moduleName ) except AttributeError, e: self.log.error( 'Exception: %s' % str(e) ) return None, None def getCommand( params, commandName, log ): """ Get an instantiated command object for execution. Commands are looked in the following modules in the order: 1. <CommandExtension>Commands 2. pilotCommands 3. <Extension>.WorkloadManagementSystem.PilotAgent.<CommandExtension>Commands 4. <Extension>.WorkloadManagementSystem.PilotAgent.pilotCommands 5. DIRAC.WorkloadManagementSystem.PilotAgent.<CommandExtension>Commands 6. DIRAC.WorkloadManagementSystem.PilotAgent.pilotCommands Note that commands in 3.-6. can only be used of the the DIRAC installation has been done. DIRAC extensions are taken from -e ( --extraPackages ) option of the pilot script. """ extensions = params.commandExtensions modules = [ m + 'Commands' for m in extensions + ['pilot'] ] commandObject = None # Look for commands in the modules in the current directory first for module in modules: try: impData = imp.find_module( module ) commandModule = imp.load_module( module, *impData ) commandObject = getattr( commandModule, commandName ) except Exception, _e: pass if commandObject: return commandObject( params ), module if params.diracInstalled: diracExtensions = [] for ext in params.extensions: if not ext.endswith( 'DIRAC' ): diracExtensions.append( ext + 'DIRAC' ) else: diracExtensions.append( ext ) diracExtensions += ['DIRAC'] ol = ObjectLoader( diracExtensions, log ) for module in modules: commandObject, modulePath = ol.loadObject( 'WorkloadManagementSystem.PilotAgent', module, commandName ) if commandObject: return commandObject( params ), modulePath # No command could be instantitated return None, None class Logger( object ): """ Basic logger object, for use inside the pilot. Just using print. """ def __init__( self, name = 'Pilot', debugFlag = False, pilotOutput = 'pilot.out' ): self.debugFlag = debugFlag self.name = name self.out = pilotOutput def __outputMessage( self, msg, level, header ): if self.out: with open( self.out, 'a' ) as outputFile: for _line in msg.split( "\n" ): if header: outLine = "%s UTC %s [%s] %s" % ( time.strftime( '%Y-%m-%d %H:%M:%S', time.gmtime() ), level, self.name, _line ) print outLine if self.out: outputFile.write( outLine + '\n' ) else: print _line outputFile.write( _line + '\n' ) sys.stdout.flush() def setDebug( self ): self.debugFlag = True def debug( self, msg, header = True ): if self.debugFlag: self.__outputMessage( msg, "DEBUG", header ) def error( self, msg, header = True ): self.__outputMessage( msg, "ERROR", header ) def warn( self, msg, header = True ): self.__outputMessage( msg, "WARN", header ) def info( self, msg, header = True ): self.__outputMessage( msg, "INFO", header ) class CommandBase( object ): """ CommandBase is the base class for every command in the pilot commands toolbox """ def __init__( self, pilotParams, dummy='' ): """ c'tor Defines the logger and the pilot parameters """ self.pp = pilotParams self.log = Logger( self.__class__.__name__ ) self.debugFlag = False for o, _ in self.pp.optList: if o == '-d' or o == '--debug': self.log.setDebug() self.debugFlag = True self.log.debug( "\n\n Initialized command %s" % self.__class__ ) def executeAndGetOutput( self, cmd, environDict = None ): """ Execute a command on the worker node and get the output """ self.log.info( "Executing command %s" % cmd ) try: import subprocess # spawn new processes, connect to their input/output/error pipes, and obtain their return codes. _p = subprocess.Popen( "%s" % cmd, shell = True, env=environDict, stdout = subprocess.PIPE, stderr = subprocess.PIPE, close_fds = False ) # standard output outData = _p.stdout.read().strip() for line in outData: sys.stdout.write( line ) sys.stdout.write( '\n' ) for line in _p.stderr: sys.stdout.write( line ) sys.stdout.write( '\n' ) # return code returnCode = _p.wait() self.log.debug( "Return code of %s: %d" % ( cmd, returnCode ) ) return (returnCode, outData) except ImportError: self.log.error( "Error importing subprocess" ) def exitWithError( self, errorCode ): """ Wrapper around sys.exit() """ self.log.info( "List of child processes of current PID:" ) retCode, _outData = self.executeAndGetOutput( "ps --forest -o pid,%%cpu,%%mem,tty,stat,time,cmd -g %d" % os.getpid() ) if retCode: self.log.error( "Failed to issue ps [ERROR %d] " % retCode ) sys.exit( errorCode ) class PilotParams( object ): """ Class that holds the structure with all the parameters to be used across all the commands """ MAX_CYCLES = 10 def __init__( self ): """ c'tor param names and defaults are defined here """ self.rootPath = os.getcwd() self.originalRootPath = os.getcwd() self.pilotRootPath = os.getcwd() self.workingDir = os.getcwd() self.optList = {} self.keepPythonPath = False self.debugFlag = False self.local = False self.commandExtensions = [] self.commands = ['GetPilotVersion', 'CheckWorkerNode', 'InstallDIRAC', 'ConfigureBasics', 'CheckCECapabilities', 'CheckWNCapabilities', 'ConfigureSite', 'ConfigureArchitecture', 'ConfigureCPURequirements', 'LaunchAgent'] self.extensions = [] self.tags = [] self.reqtags = [] self.site = "" self.setup = "" self.configServer = "" self.installation = "" self.ceName = "" self.ceType = '' self.queueName = "" self.platform = "" # in case users want to specify the max number of processors requested, per pilot self.maxNumberOfProcessors = 0 self.minDiskSpace = 2560 #MB self.pythonVersion = '27' self.userGroup = "" self.userDN = "" self.maxCycles = self.MAX_CYCLES self.flavour = 'DIRAC' self.gridVersion = '' self.pilotReference = '' self.releaseVersion = '' self.releaseProject = '' self.gateway = "" self.useServerCertificate = False self.pilotScriptName = '' self.genericOption = '' # DIRAC client installation environment self.diracInstalled = False self.diracExtensions = [] # Some commands can define environment necessary to execute subsequent commands self.installEnv = os.environ # If DIRAC is preinstalled this file will receive the updates of the local configuration self.localConfigFile = '' self.executeCmd = False self.configureScript = 'dirac-configure' self.architectureScript = 'dirac-platform' self.certsLocation = '%s/etc/grid-security' % self.workingDir self.pilotCFGFile = 'pilot.json' self.pilotCFGFileLocation = 'http://diracproject.web.cern.ch/diracproject/configs/' # Parameters that can be determined at runtime only self.queueParameters = {} # from CE description self.jobCPUReq = 900 # HS06s, here just a random value # Pilot command options self.cmdOpts = ( ( 'b', 'build', 'Force local compilation' ), ( 'd', 'debug', 'Set debug flag' ), ( 'e:', 'extraPackages=', 'Extra packages to install (comma separated)' ), ( 'E:', 'commandExtensions=', 'Python module with extra commands' ), ( 'X:', 'commands=', 'Pilot commands to execute commands' ), ( 'g:', 'grid=', 'lcg tools package version' ), ( 'h', 'help', 'Show this help' ), ( 'i:', 'python=', 'Use python<26|27> interpreter' ), ( 'k', 'keepPP', 'Do not clear PYTHONPATH on start' ), ( 'l:', 'project=', 'Project to install' ), ( 'p:', 'platform=', 'Use <platform> instead of local one' ), ( 'm:', 'maxNumberOfProcessors=', 'specify a max number of processors to use'), ( 'u:', 'url=', 'Use <url> to download tarballs' ), ( 'r:', 'release=', 'DIRAC release to install' ), ( 'n:', 'name=', 'Set <Site> as Site Name' ), ( 'D:', 'disk=', 'Require at least <space> MB available' ), ( 'M:', 'MaxCycles=', 'Maximum Number of JobAgent cycles to run' ), ( 'N:', 'Name=', 'CE Name' ), ( 'Q:', 'Queue=', 'Queue name' ), ( 'y:', 'CEType=', 'CE Type (normally InProcess)' ), ( 'S:', 'setup=', 'DIRAC Setup to use' ), ( 'C:', 'configurationServer=', 'Configuration servers to use' ), ( 'G:', 'Group=', 'DIRAC Group to use' ), ( 'O:', 'OwnerDN', 'Pilot OwnerDN (for private pilots)' ), ( 'U', 'Upload', 'Upload compiled distribution (if built)' ), ( 'V:', 'installation=', 'Installation configuration file' ), ( 'W:', 'gateway=', 'Configure <gateway> as DIRAC Gateway during installation' ), ( 's:', 'section=', 'Set base section for relative parsed options' ), ( 'o:', 'option=', 'Option=value to add' ), ( 'c', 'cert', 'Use server certificate instead of proxy' ), ( 'C:', 'certLocation=', 'Specify server certificate location' ), ( 'L:', 'pilotCFGLocation=', 'Specify pilot CFG location' ), ( 'F:', 'pilotCFGFile=', 'Specify pilot CFG file' ), ( 'R:', 'reference=', 'Use this pilot reference' ), ( 'x:', 'execute=', 'Execute instead of JobAgent' ), ( 't:', 'tag=', 'extra tags for resource description' ), ( '', 'requiredTag=', 'extra required tags for resource description') ) self.__initOptions() def __initOptions( self ): """ Parses and interpret options on the command line """ self.optList, __args__ = getopt.getopt( sys.argv[1:], "".join( [ opt[0] for opt in self.cmdOpts ] ), [ opt[1] for opt in self.cmdOpts ] ) for o, v in self.optList: if o == '-E' or o == '--commandExtensions': self.commandExtensions = v.split( ',' ) elif o == '-X' or o == '--commands': self.commands = v.split( ',' ) elif o == '-e' or o == '--extraPackages': self.extensions = v.split( ',' ) elif o == '-n' or o == '--name': self.site = v elif o == '-N' or o == '--Name': self.ceName = v elif o == '-y' or o == '--CEType': self.ceType = v elif o == '-Q' or o == '--Queue': self.queueName = v elif o == '-R' or o == '--reference': self.pilotReference = v elif o == '-k' or o == '--keepPP': self.keepPythonPath = True elif o == '-d' or o == '--debug': self.debugFlag = True elif o in ( '-S', '--setup' ): self.setup = v elif o in ( '-C', '--configurationServer' ): self.configServer = v elif o in ( '-G', '--Group' ): self.userGroup = v elif o in ( '-x', '--execute' ): self.executeCmd = v elif o in ( '-O', '--OwnerDN' ): self.userDN = v elif o in ( '-V', '--installation' ): self.installation = v elif o == '-p' or o == '--platform': self.platform = v elif o == '-m' or o == '--maxNumberOfProcessors': self.maxNumberOfProcessors = v elif o == '-D' or o == '--disk': try: self.minDiskSpace = int( v ) except ValueError: pass elif o == '-r' or o == '--release': self.releaseVersion = v.split(',',1)[0] elif o in ( '-l', '--project' ): self.releaseProject = v elif o in ( '-W', '--gateway' ): self.gateway = v elif o == '-c' or o == '--cert': self.useServerCertificate = True elif o == '-C' or o == '--certLocation': self.certsLocation = v elif o == '-L' or o == '--pilotCFGLocation': self.pilotCFGFileLocation = v elif o == '-F' or o == '--pilotCFGFile': self.pilotCFGFile = v elif o == '-M' or o == '--MaxCycles': try: self.maxCycles = min( self.MAX_CYCLES, int( v ) ) except ValueError: pass elif o in ( '-o', '--option' ): self.genericOption = v elif o in ( '-t', '--tag' ): self.tags.append(v) elif o == '--requiredTag': self.reqtags.append(v)
petricm/DIRAC
WorkloadManagementSystem/PilotAgent/pilotTools.py
Python
gpl-3.0
18,473
LISTSHINE_API_BASE = 'https://send.listshine.com/api/v1/'
sircco/pylistshine
pylistshine/constants.py
Python
apache-2.0
58
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime import decimal import unittest from django.template.defaultfilters import ( add, addslashes, capfirst, center, cut, date, default, default_if_none, dictsort, dictsortreversed, divisibleby, escape, escapejs_filter, filesizeformat, first, fix_ampersands_filter, floatformat, force_escape, get_digit, iriencode, join, length, length_is, linebreaksbr, linebreaks_filter, linenumbers, ljust, lower, make_list, phone2numeric_filter, pluralize, removetags, rjust, slice_filter, slugify, stringformat, striptags, time, timesince_filter, timeuntil_filter, title, truncatewords, truncatewords_html, unordered_list, upper, urlencode, urlize, urlizetrunc, wordcount, wordwrap, yesno, ) from django.test import TestCase from django.utils import six from django.utils import translation from django.utils.safestring import SafeData from django.utils.encoding import python_2_unicode_compatible class DefaultFiltersTests(TestCase): def test_floatformat(self): self.assertEqual(floatformat(7.7), '7.7') self.assertEqual(floatformat(7.0), '7') self.assertEqual(floatformat(0.7), '0.7') self.assertEqual(floatformat(0.07), '0.1') self.assertEqual(floatformat(0.007), '0.0') self.assertEqual(floatformat(0.0), '0') self.assertEqual(floatformat(7.7, 3), '7.700') self.assertEqual(floatformat(6.000000, 3), '6.000') self.assertEqual(floatformat(6.200000, 3), '6.200') self.assertEqual(floatformat(6.200000, -3), '6.200') self.assertEqual(floatformat(13.1031, -3), '13.103') self.assertEqual(floatformat(11.1197, -2), '11.12') self.assertEqual(floatformat(11.0000, -2), '11') self.assertEqual(floatformat(11.000001, -2), '11.00') self.assertEqual(floatformat(8.2798, 3), '8.280') self.assertEqual(floatformat(5555.555, 2), '5555.56') self.assertEqual(floatformat(001.3000, 2), '1.30') self.assertEqual(floatformat(0.12345, 2), '0.12') self.assertEqual(floatformat(decimal.Decimal('555.555'), 2), '555.56') self.assertEqual(floatformat(decimal.Decimal('09.000')), '9') self.assertEqual(floatformat('foo'), '') self.assertEqual(floatformat(13.1031, 'bar'), '13.1031') self.assertEqual(floatformat(18.125, 2), '18.13') self.assertEqual(floatformat('foo', 'bar'), '') self.assertEqual(floatformat('¿Cómo esta usted?'), '') self.assertEqual(floatformat(None), '') # Check that we're not converting to scientific notation. self.assertEqual(floatformat(0, 6), '0.000000') self.assertEqual(floatformat(0, 7), '0.0000000') self.assertEqual(floatformat(0, 10), '0.0000000000') self.assertEqual(floatformat(0.000000000000000000015, 20), '0.00000000000000000002') pos_inf = float(1e30000) self.assertEqual(floatformat(pos_inf), six.text_type(pos_inf)) neg_inf = float(-1e30000) self.assertEqual(floatformat(neg_inf), six.text_type(neg_inf)) nan = pos_inf / pos_inf self.assertEqual(floatformat(nan), six.text_type(nan)) class FloatWrapper(object): def __init__(self, value): self.value = value def __float__(self): return self.value self.assertEqual(floatformat(FloatWrapper(11.000001), -2), '11.00') # Regression for #15789 decimal_ctx = decimal.getcontext() old_prec, decimal_ctx.prec = decimal_ctx.prec, 2 try: self.assertEqual(floatformat(1.2345, 2), '1.23') self.assertEqual(floatformat(15.2042, -3), '15.204') self.assertEqual(floatformat(1.2345, '2'), '1.23') self.assertEqual(floatformat(15.2042, '-3'), '15.204') self.assertEqual(floatformat(decimal.Decimal('1.2345'), 2), '1.23') self.assertEqual(floatformat(decimal.Decimal('15.2042'), -3), '15.204') finally: decimal_ctx.prec = old_prec def test_floatformat_py2_fail(self): self.assertEqual(floatformat(1.00000000000000015, 16), '1.0000000000000002') # The test above fails because of Python 2's float handling. Floats with # many zeroes after the decimal point should be passed in as another type # such as unicode or Decimal. if six.PY2: test_floatformat_py2_fail = unittest.expectedFailure(test_floatformat_py2_fail) def test_addslashes(self): self.assertEqual(addslashes('"double quotes" and \'single quotes\''), '\\"double quotes\\" and \\\'single quotes\\\'') self.assertEqual(addslashes(r'\ : backslashes, too'), '\\\\ : backslashes, too') def test_capfirst(self): self.assertEqual(capfirst('hello world'), 'Hello world') def test_escapejs(self): self.assertEqual(escapejs_filter('"double quotes" and \'single quotes\''), '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027') self.assertEqual(escapejs_filter(r'\ : backslashes, too'), '\\u005C : backslashes, too') self.assertEqual(escapejs_filter('and lots of whitespace: \r\n\t\v\f\b'), 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008') self.assertEqual(escapejs_filter(r'<script>and this</script>'), '\\u003Cscript\\u003Eand this\\u003C/script\\u003E') self.assertEqual( escapejs_filter('paragraph separator:\u2029and line separator:\u2028'), 'paragraph separator:\\u2029and line separator:\\u2028') def test_fix_ampersands(self): self.assertEqual(fix_ampersands_filter('Jack & Jill & Jeroboam'), 'Jack &amp; Jill &amp; Jeroboam') def test_linenumbers(self): self.assertEqual(linenumbers('line 1\nline 2'), '1. line 1\n2. line 2') self.assertEqual(linenumbers('\n'.join(['x'] * 10)), '01. x\n02. x\n03. x\n04. x\n05. x\n06. x\n07. ' 'x\n08. x\n09. x\n10. x') def test_lower(self): self.assertEqual(lower('TEST'), 'test') # uppercase E umlaut self.assertEqual(lower('\xcb'), '\xeb') def test_make_list(self): self.assertEqual(make_list('abc'), ['a', 'b', 'c']) self.assertEqual(make_list(1234), ['1', '2', '3', '4']) def test_slugify(self): self.assertEqual(slugify(' Jack & Jill like numbers 1,2,3 and 4 and' ' silly characters ?%.$!/'), 'jack-jill-like-numbers-123-and-4-and-silly-characters') self.assertEqual(slugify("Un \xe9l\xe9phant \xe0 l'or\xe9e du bois"), 'un-elephant-a-loree-du-bois') def test_stringformat(self): self.assertEqual(stringformat(1, '03d'), '001') self.assertEqual(stringformat(1, 'z'), '') def test_title(self): self.assertEqual(title('a nice title, isn\'t it?'), "A Nice Title, Isn't It?") self.assertEqual(title('discoth\xe8que'), 'Discoth\xe8que') def test_truncatewords(self): self.assertEqual( truncatewords('A sentence with a few words in it', 1), 'A ...') self.assertEqual( truncatewords('A sentence with a few words in it', 5), 'A sentence with a few ...') self.assertEqual( truncatewords('A sentence with a few words in it', 100), 'A sentence with a few words in it') self.assertEqual( truncatewords('A sentence with a few words in it', 'not a number'), 'A sentence with a few words in it') def test_truncatewords_html(self): self.assertEqual(truncatewords_html( '<p>one <a href="#">two - three <br>four</a> five</p>', 0), '') self.assertEqual(truncatewords_html('<p>one <a href="#">two - ' 'three <br>four</a> five</p>', 2), '<p>one <a href="#">two ...</a></p>') self.assertEqual(truncatewords_html( '<p>one <a href="#">two - three <br>four</a> five</p>', 4), '<p>one <a href="#">two - three <br>four ...</a></p>') self.assertEqual(truncatewords_html( '<p>one <a href="#">two - three <br>four</a> five</p>', 5), '<p>one <a href="#">two - three <br>four</a> five</p>') self.assertEqual(truncatewords_html( '<p>one <a href="#">two - three <br>four</a> five</p>', 100), '<p>one <a href="#">two - three <br>four</a> five</p>') self.assertEqual(truncatewords_html( '\xc5ngstr\xf6m was here', 1), '\xc5ngstr\xf6m ...') self.assertEqual(truncatewords_html('<i>Buenos d&iacute;as! ' '&#x00bf;C&oacute;mo est&aacute;?</i>', 3), '<i>Buenos d&iacute;as! &#x00bf;C&oacute;mo ...</i>') def test_upper(self): self.assertEqual(upper('Mixed case input'), 'MIXED CASE INPUT') # lowercase e umlaut self.assertEqual(upper('\xeb'), '\xcb') def test_urlencode(self): self.assertEqual(urlencode('fran\xe7ois & jill'), 'fran%C3%A7ois%20%26%20jill') self.assertEqual(urlencode(1), '1') def test_iriencode(self): self.assertEqual(iriencode('S\xf8r-Tr\xf8ndelag'), 'S%C3%B8r-Tr%C3%B8ndelag') self.assertEqual(iriencode(urlencode('fran\xe7ois & jill')), 'fran%C3%A7ois%20%26%20jill') def test_urlizetrunc(self): self.assertEqual(urlizetrunc('http://short.com/', 20), '<a href=' '"http://short.com/" rel="nofollow">http://short.com/</a>') self.assertEqual(urlizetrunc('http://www.google.co.uk/search?hl=en' '&q=some+long+url&btnG=Search&meta=', 20), '<a href="http://' 'www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&' 'meta=" rel="nofollow">http://www.google...</a>') self.assertEqual(urlizetrunc('http://www.google.co.uk/search?hl=en' '&q=some+long+url&btnG=Search&meta=', 20), '<a href="http://' 'www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search' '&meta=" rel="nofollow">http://www.google...</a>') # Check truncating of URIs which are the exact length uri = 'http://31characteruri.com/test/' self.assertEqual(len(uri), 31) self.assertEqual(urlizetrunc(uri, 31), '<a href="http://31characteruri.com/test/" rel="nofollow">' 'http://31characteruri.com/test/</a>') self.assertEqual(urlizetrunc(uri, 30), '<a href="http://31characteruri.com/test/" rel="nofollow">' 'http://31characteruri.com/t...</a>') self.assertEqual(urlizetrunc(uri, 2), '<a href="http://31characteruri.com/test/"' ' rel="nofollow">...</a>') def test_urlize(self): # Check normal urlize self.assertEqual(urlize('http://google.com'), '<a href="http://google.com" rel="nofollow">http://google.com</a>') self.assertEqual(urlize('http://google.com/'), '<a href="http://google.com/" rel="nofollow">http://google.com/</a>') self.assertEqual(urlize('www.google.com'), '<a href="http://www.google.com" rel="nofollow">www.google.com</a>') self.assertEqual(urlize('djangoproject.org'), '<a href="http://djangoproject.org" rel="nofollow">djangoproject.org</a>') self.assertEqual(urlize('info@djangoproject.org'), '<a href="mailto:info@djangoproject.org">info@djangoproject.org</a>') # Check urlize with https addresses self.assertEqual(urlize('https://google.com'), '<a href="https://google.com" rel="nofollow">https://google.com</a>') # Check urlize doesn't overquote already quoted urls - see #9655 # The teststring is the urlquoted version of 'http://hi.baidu.com/重新开始' self.assertEqual(urlize('http://hi.baidu.com/%E9%87%8D%E6%96%B0%E5%BC%80%E5%A7%8B'), '<a href="http://hi.baidu.com/%E9%87%8D%E6%96%B0%E5%BC%80%E5%A7%8B" rel="nofollow">' 'http://hi.baidu.com/%E9%87%8D%E6%96%B0%E5%BC%80%E5%A7%8B</a>') self.assertEqual(urlize('www.mystore.com/30%OffCoupons!'), '<a href="http://www.mystore.com/30%25OffCoupons!" rel="nofollow">' 'www.mystore.com/30%OffCoupons!</a>') self.assertEqual(urlize('http://en.wikipedia.org/wiki/Caf%C3%A9'), '<a href="http://en.wikipedia.org/wiki/Caf%C3%A9" rel="nofollow">' 'http://en.wikipedia.org/wiki/Caf%C3%A9</a>') self.assertEqual(urlize('http://en.wikipedia.org/wiki/Café'), '<a href="http://en.wikipedia.org/wiki/Caf%C3%A9" rel="nofollow">' 'http://en.wikipedia.org/wiki/Café</a>') # Check urlize keeps balanced parentheses - see #11911 self.assertEqual(urlize('http://en.wikipedia.org/wiki/Django_(web_framework)'), '<a href="http://en.wikipedia.org/wiki/Django_(web_framework)" rel="nofollow">' 'http://en.wikipedia.org/wiki/Django_(web_framework)</a>') self.assertEqual(urlize('(see http://en.wikipedia.org/wiki/Django_(web_framework))'), '(see <a href="http://en.wikipedia.org/wiki/Django_(web_framework)" rel="nofollow">' 'http://en.wikipedia.org/wiki/Django_(web_framework)</a>)') # Check urlize adds nofollow properly - see #12183 self.assertEqual(urlize('foo@bar.com or www.bar.com'), '<a href="mailto:foo@bar.com">foo@bar.com</a> or ' '<a href="http://www.bar.com" rel="nofollow">www.bar.com</a>') # Check urlize handles IDN correctly - see #13704 self.assertEqual(urlize('http://c✶.ws'), '<a href="http://xn--c-lgq.ws" rel="nofollow">http://c✶.ws</a>') self.assertEqual(urlize('www.c✶.ws'), '<a href="http://www.xn--c-lgq.ws" rel="nofollow">www.c✶.ws</a>') self.assertEqual(urlize('c✶.org'), '<a href="http://xn--c-lgq.org" rel="nofollow">c✶.org</a>') self.assertEqual(urlize('info@c✶.org'), '<a href="mailto:info@xn--c-lgq.org">info@c✶.org</a>') # Check urlize doesn't highlight malformed URIs - see #16395 self.assertEqual(urlize('http:///www.google.com'), 'http:///www.google.com') self.assertEqual(urlize('http://.google.com'), 'http://.google.com') self.assertEqual(urlize('http://@foo.com'), 'http://@foo.com') # Check urlize accepts more TLDs - see #16656 self.assertEqual(urlize('usa.gov'), '<a href="http://usa.gov" rel="nofollow">usa.gov</a>') # Check urlize don't crash on invalid email with dot-starting domain - see #17592 self.assertEqual(urlize('email@.stream.ru'), 'email@.stream.ru') # Check urlize accepts uppercased URL schemes - see #18071 self.assertEqual(urlize('HTTPS://github.com/'), '<a href="https://github.com/" rel="nofollow">HTTPS://github.com/</a>') # Check urlize trims trailing period when followed by parenthesis - see #18644 self.assertEqual(urlize('(Go to http://www.example.com/foo.)'), '(Go to <a href="http://www.example.com/foo" rel="nofollow">http://www.example.com/foo</a>.)') # Check urlize handles brackets properly (#19070) self.assertEqual(urlize('[see www.example.com]'), '[see <a href="http://www.example.com" rel="nofollow">www.example.com</a>]') self.assertEqual(urlize('see test[at[example.com'), 'see <a href="http://test[at[example.com" rel="nofollow">test[at[example.com</a>') self.assertEqual(urlize('[http://168.192.0.1](http://168.192.0.1)'), '[<a href="http://168.192.0.1](http://168.192.0.1)" rel="nofollow">http://168.192.0.1](http://168.192.0.1)</a>') # Check urlize works with IPv4/IPv6 addresses self.assertEqual(urlize('http://192.168.0.15/api/9'), '<a href="http://192.168.0.15/api/9" rel="nofollow">http://192.168.0.15/api/9</a>') self.assertEqual(urlize('http://[2001:db8:cafe::2]/api/9'), '<a href="http://[2001:db8:cafe::2]/api/9" rel="nofollow">http://[2001:db8:cafe::2]/api/9</a>') # Check urlize correctly include quotation marks in links - #20364 self.assertEqual(urlize('before "hi@example.com" afterwards'), 'before "<a href="mailto:hi@example.com">hi@example.com</a>" afterwards') self.assertEqual(urlize('before hi@example.com" afterwards'), 'before <a href="mailto:hi@example.com">hi@example.com</a>" afterwards') self.assertEqual(urlize('before "hi@example.com afterwards'), 'before "<a href="mailto:hi@example.com">hi@example.com</a> afterwards') self.assertEqual(urlize('before \'hi@example.com\' afterwards'), 'before \'<a href="mailto:hi@example.com">hi@example.com</a>\' afterwards') self.assertEqual(urlize('before hi@example.com\' afterwards'), 'before <a href="mailto:hi@example.com">hi@example.com</a>\' afterwards') self.assertEqual(urlize('before \'hi@example.com afterwards'), 'before \'<a href="mailto:hi@example.com">hi@example.com</a> afterwards') # Check urlize copes with commas following URLs in quotes - see #20364 self.assertEqual(urlize('Email us at "hi@example.com", or phone us at +xx.yy'), 'Email us at "<a href="mailto:hi@example.com">hi@example.com</a>", or phone us at +xx.yy') def test_wordcount(self): self.assertEqual(wordcount(''), 0) self.assertEqual(wordcount('oneword'), 1) self.assertEqual(wordcount('lots of words'), 3) self.assertEqual(wordwrap('this is a long paragraph of text that ' 'really needs to be wrapped I\'m afraid', 14), "this is a long\nparagraph of\ntext that\nreally needs\nto be " "wrapped\nI'm afraid") self.assertEqual(wordwrap('this is a short paragraph of text.\n ' 'But this line should be indented', 14), 'this is a\nshort\nparagraph of\ntext.\n But this\nline ' 'should be\nindented') self.assertEqual(wordwrap('this is a short paragraph of text.\n ' 'But this line should be indented', 15), 'this is a short\n' 'paragraph of\ntext.\n But this line\nshould be\nindented') def test_rjust(self): self.assertEqual(ljust('test', 10), 'test ') self.assertEqual(ljust('test', 3), 'test') self.assertEqual(rjust('test', 10), ' test') self.assertEqual(rjust('test', 3), 'test') def test_center(self): self.assertEqual(center('test', 6), ' test ') def test_cut(self): self.assertEqual(cut('a string to be mangled', 'a'), ' string to be mngled') self.assertEqual(cut('a string to be mangled', 'ng'), 'a stri to be maled') self.assertEqual(cut('a string to be mangled', 'strings'), 'a string to be mangled') def test_force_escape(self): escaped = force_escape('<some html & special characters > here') self.assertEqual( escaped, '&lt;some html &amp; special characters &gt; here') self.assertIsInstance(escaped, SafeData) self.assertEqual( force_escape('<some html & special characters > here ĐÅ€£'), '&lt;some html &amp; special characters &gt; here' ' \u0110\xc5\u20ac\xa3') def test_linebreaks(self): self.assertEqual(linebreaks_filter('line 1'), '<p>line 1</p>') self.assertEqual(linebreaks_filter('line 1\nline 2'), '<p>line 1<br />line 2</p>') self.assertEqual(linebreaks_filter('line 1\rline 2'), '<p>line 1<br />line 2</p>') self.assertEqual(linebreaks_filter('line 1\r\nline 2'), '<p>line 1<br />line 2</p>') def test_linebreaksbr(self): self.assertEqual(linebreaksbr('line 1\nline 2'), 'line 1<br />line 2') self.assertEqual(linebreaksbr('line 1\rline 2'), 'line 1<br />line 2') self.assertEqual(linebreaksbr('line 1\r\nline 2'), 'line 1<br />line 2') def test_removetags(self): self.assertEqual(removetags('some <b>html</b> with <script>alert' '("You smell")</script> disallowed <img /> tags', 'script img'), 'some <b>html</b> with alert("You smell") disallowed tags') self.assertEqual(striptags('some <b>html</b> with <script>alert' '("You smell")</script> disallowed <img /> tags'), 'some html with alert("You smell") disallowed tags') def test_dictsort(self): sorted_dicts = dictsort([{'age': 23, 'name': 'Barbara-Ann'}, {'age': 63, 'name': 'Ra Ra Rasputin'}, {'name': 'Jonny B Goode', 'age': 18}], 'age') self.assertEqual([sorted(dict.items()) for dict in sorted_dicts], [[('age', 18), ('name', 'Jonny B Goode')], [('age', 23), ('name', 'Barbara-Ann')], [('age', 63), ('name', 'Ra Ra Rasputin')]]) # If it gets passed a list of something else different from # dictionaries it should fail silently self.assertEqual(dictsort([1, 2, 3], 'age'), '') self.assertEqual(dictsort('Hello!', 'age'), '') self.assertEqual(dictsort({'a': 1}, 'age'), '') self.assertEqual(dictsort(1, 'age'), '') def test_dictsort_complex_sorting_key(self): """ Since dictsort uses template.Variable under the hood, it can sort on keys like 'foo.bar'. """ data = [ {'foo': {'bar': 1, 'baz': 'c'}}, {'foo': {'bar': 2, 'baz': 'b'}}, {'foo': {'bar': 3, 'baz': 'a'}}, ] sorted_data = dictsort(data, 'foo.baz') self.assertEqual([d['foo']['bar'] for d in sorted_data], [3, 2, 1]) def test_dictsortreversed(self): sorted_dicts = dictsortreversed([{'age': 23, 'name': 'Barbara-Ann'}, {'age': 63, 'name': 'Ra Ra Rasputin'}, {'name': 'Jonny B Goode', 'age': 18}], 'age') self.assertEqual([sorted(dict.items()) for dict in sorted_dicts], [[('age', 63), ('name', 'Ra Ra Rasputin')], [('age', 23), ('name', 'Barbara-Ann')], [('age', 18), ('name', 'Jonny B Goode')]]) # If it gets passed a list of something else different from # dictionaries it should fail silently self.assertEqual(dictsortreversed([1, 2, 3], 'age'), '') self.assertEqual(dictsortreversed('Hello!', 'age'), '') self.assertEqual(dictsortreversed({'a': 1}, 'age'), '') self.assertEqual(dictsortreversed(1, 'age'), '') def test_first(self): self.assertEqual(first([0, 1, 2]), 0) self.assertEqual(first(''), '') self.assertEqual(first('test'), 't') def test_join(self): self.assertEqual(join([0, 1, 2], 'glue'), '0glue1glue2') def test_length(self): self.assertEqual(length('1234'), 4) self.assertEqual(length([1, 2, 3, 4]), 4) self.assertEqual(length_is([], 0), True) self.assertEqual(length_is([], 1), False) self.assertEqual(length_is('a', 1), True) self.assertEqual(length_is('a', 10), False) def test_slice(self): self.assertEqual(slice_filter('abcdefg', '0'), '') self.assertEqual(slice_filter('abcdefg', '1'), 'a') self.assertEqual(slice_filter('abcdefg', '-1'), 'abcdef') self.assertEqual(slice_filter('abcdefg', '1:2'), 'b') self.assertEqual(slice_filter('abcdefg', '1:3'), 'bc') self.assertEqual(slice_filter('abcdefg', '0::2'), 'aceg') def test_unordered_list(self): self.assertEqual(unordered_list(['item 1', 'item 2']), '\t<li>item 1</li>\n\t<li>item 2</li>') self.assertEqual(unordered_list(['item 1', ['item 1.1']]), '\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>') self.assertEqual( unordered_list(['item 1', ['item 1.1', 'item1.2'], 'item 2']), '\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t\t<li>item1.2' '</li>\n\t</ul>\n\t</li>\n\t<li>item 2</li>') self.assertEqual( unordered_list(['item 1', ['item 1.1', ['item 1.1.1', ['item 1.1.1.1']]]]), '\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1\n\t\t<ul>\n\t\t\t<li>' 'item 1.1.1\n\t\t\t<ul>\n\t\t\t\t<li>item 1.1.1.1</li>\n\t\t\t' '</ul>\n\t\t\t</li>\n\t\t</ul>\n\t\t</li>\n\t</ul>\n\t</li>') self.assertEqual(unordered_list( ['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]), '\t<li>States\n\t<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>' 'Lawrence</li>\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>' '\n\t\t<li>Illinois</li>\n\t</ul>\n\t</li>') @python_2_unicode_compatible class ULItem(object): def __init__(self, title): self.title = title def __str__(self): return 'ulitem-%s' % str(self.title) a = ULItem('a') b = ULItem('b') self.assertEqual(unordered_list([a, b]), '\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>') # Old format for unordered lists should still work self.assertEqual(unordered_list(['item 1', []]), '\t<li>item 1</li>') self.assertEqual(unordered_list(['item 1', [['item 1.1', []]]]), '\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>') self.assertEqual(unordered_list(['item 1', [['item 1.1', []], ['item 1.2', []]]]), '\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1' '</li>\n\t\t<li>item 1.2</li>\n\t</ul>\n\t</li>') self.assertEqual(unordered_list(['States', [['Kansas', [['Lawrence', []], ['Topeka', []]]], ['Illinois', []]]]), '\t<li>States\n\t' '<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>Lawrence</li>' '\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>\n\t\t<li>' 'Illinois</li>\n\t</ul>\n\t</li>') def test_add(self): self.assertEqual(add('1', '2'), 3) def test_get_digit(self): self.assertEqual(get_digit(123, 1), 3) self.assertEqual(get_digit(123, 2), 2) self.assertEqual(get_digit(123, 3), 1) self.assertEqual(get_digit(123, 4), 0) self.assertEqual(get_digit(123, 0), 123) self.assertEqual(get_digit('xyz', 0), 'xyz') def test_date(self): # real testing of date() is in dateformat.py self.assertEqual(date(datetime.datetime(2005, 12, 29), "d F Y"), '29 December 2005') self.assertEqual(date(datetime.datetime(2005, 12, 29), r'jS \o\f F'), '29th of December') def test_time(self): # real testing of time() is done in dateformat.py self.assertEqual(time(datetime.time(13), "h"), '01') self.assertEqual(time(datetime.time(0), "h"), '12') def test_timesince(self): # real testing is done in timesince.py, where we can provide our own 'now' # NOTE: \xa0 avoids wrapping between value and unit self.assertEqual( timesince_filter(datetime.datetime.now() - datetime.timedelta(1)), '1\xa0day') self.assertEqual( timesince_filter(datetime.datetime(2005, 12, 29), datetime.datetime(2005, 12, 30)), '1\xa0day') def test_timeuntil(self): # NOTE: \xa0 avoids wrapping between value and unit self.assertEqual( timeuntil_filter(datetime.datetime.now() + datetime.timedelta(1, 1)), '1\xa0day') self.assertEqual( timeuntil_filter(datetime.datetime(2005, 12, 30), datetime.datetime(2005, 12, 29)), '1\xa0day') def test_default(self): self.assertEqual(default("val", "default"), 'val') self.assertEqual(default(None, "default"), 'default') self.assertEqual(default('', "default"), 'default') def test_if_none(self): self.assertEqual(default_if_none("val", "default"), 'val') self.assertEqual(default_if_none(None, "default"), 'default') self.assertEqual(default_if_none('', "default"), '') def test_divisibleby(self): self.assertEqual(divisibleby(4, 2), True) self.assertEqual(divisibleby(4, 3), False) def test_yesno(self): self.assertEqual(yesno(True), 'yes') self.assertEqual(yesno(False), 'no') self.assertEqual(yesno(None), 'maybe') self.assertEqual(yesno(True, 'certainly,get out of town,perhaps'), 'certainly') self.assertEqual(yesno(False, 'certainly,get out of town,perhaps'), 'get out of town') self.assertEqual(yesno(None, 'certainly,get out of town,perhaps'), 'perhaps') self.assertEqual(yesno(None, 'certainly,get out of town'), 'get out of town') def test_filesizeformat(self): # NOTE: \xa0 avoids wrapping between value and unit self.assertEqual(filesizeformat(1023), '1023\xa0bytes') self.assertEqual(filesizeformat(1024), '1.0\xa0KB') self.assertEqual(filesizeformat(10 * 1024), '10.0\xa0KB') self.assertEqual(filesizeformat(1024 * 1024 - 1), '1024.0\xa0KB') self.assertEqual(filesizeformat(1024 * 1024), '1.0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 50), '50.0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 - 1), '1024.0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 1024), '1.0\xa0GB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024), '1.0\xa0TB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024), '1.0\xa0PB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024 * 2000), '2000.0\xa0PB') self.assertEqual(filesizeformat(complex(1, -1)), '0\xa0bytes') self.assertEqual(filesizeformat(""), '0\xa0bytes') self.assertEqual(filesizeformat("\N{GREEK SMALL LETTER ALPHA}"), '0\xa0bytes') def test_pluralize(self): self.assertEqual(pluralize(1), '') self.assertEqual(pluralize(0), 's') self.assertEqual(pluralize(2), 's') self.assertEqual(pluralize([1]), '') self.assertEqual(pluralize([]), 's') self.assertEqual(pluralize([1, 2, 3]), 's') self.assertEqual(pluralize(1, 'es'), '') self.assertEqual(pluralize(0, 'es'), 'es') self.assertEqual(pluralize(2, 'es'), 'es') self.assertEqual(pluralize(1, 'y,ies'), 'y') self.assertEqual(pluralize(0, 'y,ies'), 'ies') self.assertEqual(pluralize(2, 'y,ies'), 'ies') self.assertEqual(pluralize(0, 'y,ies,error'), '') def test_phone2numeric(self): self.assertEqual(phone2numeric_filter('0800 flowers'), '0800 3569377') def test_non_string_input(self): # Filters shouldn't break if passed non-strings self.assertEqual(addslashes(123), '123') self.assertEqual(linenumbers(123), '1. 123') self.assertEqual(lower(123), '123') self.assertEqual(make_list(123), ['1', '2', '3']) self.assertEqual(slugify(123), '123') self.assertEqual(title(123), '123') self.assertEqual(truncatewords(123, 2), '123') self.assertEqual(upper(123), '123') self.assertEqual(urlencode(123), '123') self.assertEqual(urlize(123), '123') self.assertEqual(urlizetrunc(123, 1), '123') self.assertEqual(wordcount(123), 1) self.assertEqual(wordwrap(123, 2), '123') self.assertEqual(ljust('123', 4), '123 ') self.assertEqual(rjust('123', 4), ' 123') self.assertEqual(center('123', 5), ' 123 ') self.assertEqual(center('123', 6), ' 123 ') self.assertEqual(cut(123, '2'), '13') self.assertEqual(escape(123), '123') self.assertEqual(linebreaks_filter(123), '<p>123</p>') self.assertEqual(linebreaksbr(123), '123') self.assertEqual(removetags(123, 'a'), '123') self.assertEqual(striptags(123), '123') class DefaultFiltersI18NTests(TestCase): def test_localized_filesizeformat(self): # NOTE: \xa0 avoids wrapping between value and unit with self.settings(USE_L10N=True), translation.override('de'): self.assertEqual(filesizeformat(1023), '1023\xa0Bytes') self.assertEqual(filesizeformat(1024), '1,0\xa0KB') self.assertEqual(filesizeformat(10 * 1024), '10,0\xa0KB') self.assertEqual(filesizeformat(1024 * 1024 - 1), '1024,0\xa0KB') self.assertEqual(filesizeformat(1024 * 1024), '1,0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 50), '50,0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 - 1), '1024,0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 1024), '1,0\xa0GB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024), '1,0\xa0TB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024), '1,0\xa0PB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024 * 2000), '2000,0\xa0PB') self.assertEqual(filesizeformat(complex(1, -1)), '0\xa0Bytes') self.assertEqual(filesizeformat(""), '0\xa0Bytes') self.assertEqual(filesizeformat("\N{GREEK SMALL LETTER ALPHA}"), '0\xa0Bytes')
TimBuckley/effective_django
tests/defaultfilters/tests.py
Python
bsd-3-clause
34,161
#!/usr/bin/python # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*- ### BEGIN LICENSE # This file is in the public domain ### END LICENSE import sys import os.path import unittest sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))) from firstboot import AboutFirstbootDialog class TestExample(unittest.TestCase): def setUp(self): self.AboutFirstbootDialog_members = [ 'AboutFirstbootDialog', 'get_builder', 'gettext', 'gtk'] def test_AboutFirstbootDialog_members(self): all_members = dir(AboutFirstbootDialog) public_members = [x for x in all_members if not x.startswith('_')] public_members.sort() self.assertEqual(self.AboutFirstbootDialog_members, public_members) if __name__ == '__main__': unittest.main()
System25/gecosws-config-assistant
tests/test_example.py
Python
gpl-2.0
836
# -*- encoding: utf8 -*- import os brown = os.path.join(os.path.dirname(__file__), 'english-brown.txt') cmudict = os.path.join(os.path.dirname(__file__), 'english-cmudict.dx1')
jacksonllee/lxa5
linguistica/datasets/__init__.py
Python
mit
179
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2015 Cristian van Ee <cristian at cvee.org> # Copyright 2015 Igor Gnatenko <i.gnatenko.brain@gmail.com> # Copyright 2018 Adam Miller <admiller@redhat.com> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = ''' --- module: dnf version_added: 1.9 short_description: Manages packages with the I(dnf) package manager description: - Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager. options: name: description: - "A package name or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: dnf -y update. You can also pass a url or a local path to a rpm file. To operate on several packages this can accept a comma separated string of packages or a list of packages." required: true aliases: - pkg type: list elements: str list: description: - Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples. state: description: - Whether to install (C(present), C(latest)), or remove (C(absent)) a package. - Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is enabled for this module, then C(absent) is inferred. choices: ['absent', 'present', 'installed', 'removed', 'latest'] enablerepo: description: - I(Repoid) of repositories to enable for the install/update operation. These repos will not persist beyond the transaction. When specifying multiple repos, separate them with a ",". disablerepo: description: - I(Repoid) of repositories to disable for the install/update operation. These repos will not persist beyond the transaction. When specifying multiple repos, separate them with a ",". conf_file: description: - The remote dnf configuration file to use for the transaction. disable_gpg_check: description: - Whether to disable the GPG checking of signatures of packages being installed. Has an effect only if state is I(present) or I(latest). type: bool default: 'no' installroot: description: - Specifies an alternative installroot, relative to which all packages will be installed. version_added: "2.3" default: "/" releasever: description: - Specifies an alternative release from which all packages will be installed. version_added: "2.6" autoremove: description: - If C(yes), removes all "leaf" packages from the system that were originally installed as dependencies of user-installed packages but which are no longer required by any such package. Should be used alone or when state is I(absent) type: bool default: "no" version_added: "2.4" exclude: description: - Package name(s) to exclude when state=present, or latest. This can be a list or a comma separated string. version_added: "2.7" skip_broken: description: - Skip packages with broken dependencies(devsolve) and are causing problems. type: bool default: "no" version_added: "2.7" update_cache: description: - Force dnf to check if cache is out of date and redownload if needed. Has an effect only if state is I(present) or I(latest). type: bool default: "no" aliases: [ expire-cache ] version_added: "2.7" update_only: description: - When using latest, only update installed packages. Do not install packages. - Has an effect only if state is I(latest) default: "no" type: bool version_added: "2.7" security: description: - If set to C(yes), and C(state=latest) then only installs updates that have been marked security related. type: bool default: "no" version_added: "2.7" bugfix: description: - If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related. default: "no" type: bool version_added: "2.7" enable_plugin: description: - I(Plugin) name to enable for the install/update operation. The enabled plugin will not persist beyond the transaction. version_added: "2.7" disable_plugin: description: - I(Plugin) name to disable for the install/update operation. The disabled plugins will not persist beyond the transaction. version_added: "2.7" disable_excludes: description: - Disable the excludes defined in DNF config files. - If set to C(all), disables all excludes. - If set to C(main), disable excludes defined in [main] in dnf.conf. - If set to C(repoid), disable excludes defined for given repo id. version_added: "2.7" validate_certs: description: - This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated. - This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site. type: bool default: "yes" version_added: "2.7" allow_downgrade: description: - Specify if the named package and version is allowed to downgrade a maybe already installed higher version of that package. Note that setting allow_downgrade=True can make this module behave in a non-idempotent way. The task could end up with a set of packages that does not match the complete list of specified packages to install (because dependencies between the downgraded package and others can cause changes to the packages which were in the earlier transaction). type: bool default: "no" version_added: "2.7" install_repoquery: description: - This is effectively a no-op in DNF as it is not needed with DNF, but is an accepted parameter for feature parity/compatibility with the I(yum) module. type: bool default: "yes" version_added: "2.7" download_only: description: - Only download the packages, do not install them. default: "no" type: bool version_added: "2.7" lock_timeout: description: - Amount of time to wait for the dnf lockfile to be freed. required: false default: 30 type: int version_added: "2.8" install_weak_deps: description: - Will also install all packages linked by a weak dependency relation. type: bool default: "yes" version_added: "2.8" download_dir: description: - Specifies an alternate directory to store packages. - Has an effect only if I(download_only) is specified. type: str version_added: "2.8" allowerasing: description: - If C(yes) it allows erasing of installed packages to resolve dependencies. required: false type: bool default: "no" version_added: "2.10" nobest: description: - Set best option to False, so that transactions are not limited to best candidates only. required: false type: bool default: "no" version_added: "2.11" notes: - When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option. - Group removal doesn't work if the group was installed with Ansible because upstream dnf's API doesn't properly mark groups as installed, therefore upon removal the module is unable to detect that the group is installed (https://bugzilla.redhat.com/show_bug.cgi?id=1620324) requirements: - "python >= 2.6" - python-dnf - for the autoremove option you need dnf >= 2.0.1" author: - Igor Gnatenko (@ignatenkobrain) <i.gnatenko.brain@gmail.com> - Cristian van Ee (@DJMuggs) <cristian at cvee.org> - Berend De Schouwer (@berenddeschouwer) - Adam Miller (@maxamillion) <admiller@redhat.com> ''' EXAMPLES = ''' - name: Install the latest version of Apache dnf: name: httpd state: latest - name: Install the latest version of Apache and MariaDB dnf: name: - httpd - mariadb-server state: latest - name: Remove the Apache package dnf: name: httpd state: absent - name: Install the latest version of Apache from the testing repo dnf: name: httpd enablerepo: testing state: present - name: Upgrade all packages dnf: name: "*" state: latest - name: Install the nginx rpm from a remote repo dnf: name: 'http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm' state: present - name: Install nginx rpm from a local file dnf: name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state: present - name: Install the 'Development tools' package group dnf: name: '@Development tools' state: present - name: Autoremove unneeded packages installed as dependencies dnf: autoremove: yes - name: Uninstall httpd but keep its dependencies dnf: name: httpd state: absent autoremove: no - name: Install a modularity appstream with defined stream and profile dnf: name: '@postgresql:9.6/client' state: present - name: Install a modularity appstream with defined stream dnf: name: '@postgresql:9.6' state: present - name: Install a modularity appstream with defined profile dnf: name: '@postgresql/client' state: present ''' import os import re import sys try: import dnf import dnf.cli import dnf.const import dnf.exceptions import dnf.subject import dnf.util HAS_DNF = True except ImportError: HAS_DNF = False from ansible.module_utils._text import to_native, to_text from ansible.module_utils.urls import fetch_file from ansible.module_utils.six import PY2, text_type from distutils.version import LooseVersion from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec class DnfModule(YumDnf): """ DNF Ansible module back-end implementation """ def __init__(self, module): # This populates instance vars for all argument spec params super(DnfModule, self).__init__(module) self._ensure_dnf() self.lockfile = "/var/cache/dnf/*_lock.pid" self.pkg_mgr_name = "dnf" try: self.with_modules = dnf.base.WITH_MODULES except AttributeError: self.with_modules = False # DNF specific args that are not part of YumDnf self.allowerasing = self.module.params['allowerasing'] self.nobest = self.module.params['nobest'] def is_lockfile_pid_valid(self): # FIXME? it looks like DNF takes care of invalid lock files itself? # https://github.com/ansible/ansible/issues/57189 return True def _sanitize_dnf_error_msg_install(self, spec, error): """ For unhandled dnf.exceptions.Error scenarios, there are certain error messages we want to filter in an install scenario. Do that here. """ if ( to_text("no package matched") in to_text(error) or to_text("No match for argument:") in to_text(error) ): return "No package {0} available.".format(spec) return error def _sanitize_dnf_error_msg_remove(self, spec, error): """ For unhandled dnf.exceptions.Error scenarios, there are certain error messages we want to ignore in a removal scenario as known benign failures. Do that here. """ if ( 'no package matched' in to_native(error) or 'No match for argument:' in to_native(error) ): return (False, "{0} is not installed".format(spec)) # Return value is tuple of: # ("Is this actually a failure?", "Error Message") return (True, error) def _package_dict(self, package): """Return a dictionary of information for the package.""" # NOTE: This no longer contains the 'dnfstate' field because it is # already known based on the query type. result = { 'name': package.name, 'arch': package.arch, 'epoch': str(package.epoch), 'release': package.release, 'version': package.version, 'repo': package.repoid} result['nevra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format( **result) if package.installtime == 0: result['yumstate'] = 'available' else: result['yumstate'] = 'installed' return result def _packagename_dict(self, packagename): """ Return a dictionary of information for a package name string or None if the package name doesn't contain at least all NVR elements """ if packagename[-4:] == '.rpm': packagename = packagename[:-4] # This list was auto generated on a Fedora 28 system with the following one-liner # printf '[ '; for arch in $(ls /usr/lib/rpm/platform); do printf '"%s", ' ${arch%-linux}; done; printf ']\n' redhat_rpm_arches = [ "aarch64", "alphaev56", "alphaev5", "alphaev67", "alphaev6", "alpha", "alphapca56", "amd64", "armv3l", "armv4b", "armv4l", "armv5tejl", "armv5tel", "armv5tl", "armv6hl", "armv6l", "armv7hl", "armv7hnl", "armv7l", "athlon", "geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "m68k", "mips64el", "mips64", "mips64r6el", "mips64r6", "mipsel", "mips", "mipsr6el", "mipsr6", "noarch", "pentium3", "pentium4", "ppc32dy4", "ppc64iseries", "ppc64le", "ppc64", "ppc64p7", "ppc64pseries", "ppc8260", "ppc8560", "ppciseries", "ppc", "ppcpseries", "riscv64", "s390", "s390x", "sh3", "sh4a", "sh4", "sh", "sparc64", "sparc64v", "sparc", "sparcv8", "sparcv9", "sparcv9v", "x86_64" ] rpm_arch_re = re.compile(r'(.*)\.(.*)') rpm_nevr_re = re.compile(r'(\S+)-(?:(\d*):)?(.*)-(~?\w+[\w.+]*)') try: arch = None rpm_arch_match = rpm_arch_re.match(packagename) if rpm_arch_match: nevr, arch = rpm_arch_match.groups() if arch in redhat_rpm_arches: packagename = nevr rpm_nevr_match = rpm_nevr_re.match(packagename) if rpm_nevr_match: name, epoch, version, release = rpm_nevr_re.match(packagename).groups() if not version or not version.split('.')[0].isdigit(): return None else: return None except AttributeError as e: self.module.fail_json( msg='Error attempting to parse package: %s, %s' % (packagename, to_native(e)), rc=1, results=[] ) if not epoch: epoch = "0" if ':' in name: epoch_name = name.split(":") epoch = epoch_name[0] name = ''.join(epoch_name[1:]) result = { 'name': name, 'epoch': epoch, 'release': release, 'version': version, } return result # Original implementation from yum.rpmUtils.miscutils (GPLv2+) # http://yum.baseurl.org/gitweb?p=yum.git;a=blob;f=rpmUtils/miscutils.py def _compare_evr(self, e1, v1, r1, e2, v2, r2): # return 1: a is newer than b # 0: a and b are the same version # -1: b is newer than a if e1 is None: e1 = '0' else: e1 = str(e1) v1 = str(v1) r1 = str(r1) if e2 is None: e2 = '0' else: e2 = str(e2) v2 = str(v2) r2 = str(r2) # print '%s, %s, %s vs %s, %s, %s' % (e1, v1, r1, e2, v2, r2) rc = dnf.rpm.rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) # print '%s, %s, %s vs %s, %s, %s = %s' % (e1, v1, r1, e2, v2, r2, rc) return rc def _ensure_dnf(self): if not HAS_DNF: if PY2: package = 'python2-dnf' else: package = 'python3-dnf' if self.module.check_mode: self.module.fail_json( msg="`{0}` is not installed, but it is required" "for the Ansible dnf module.".format(package), results=[], ) rc, stdout, stderr = self.module.run_command(['dnf', 'install', '-y', package]) global dnf try: import dnf import dnf.cli import dnf.const import dnf.exceptions import dnf.subject import dnf.util except ImportError: self.module.fail_json( msg="Could not import the dnf python module using {0} ({1}). " "Please install `{2}` package or ensure you have specified the " "correct ansible_python_interpreter.".format(sys.executable, sys.version.replace('\n', ''), package), results=[], cmd='dnf install -y {0}'.format(package), rc=rc, stdout=stdout, stderr=stderr, ) def _configure_base(self, base, conf_file, disable_gpg_check, installroot='/'): """Configure the dnf Base object.""" conf = base.conf # Change the configuration file path if provided, this must be done before conf.read() is called if conf_file: # Fail if we can't read the configuration file. if not os.access(conf_file, os.R_OK): self.module.fail_json( msg="cannot read configuration file", conf_file=conf_file, results=[], ) else: conf.config_file_path = conf_file # Read the configuration file conf.read() # Turn off debug messages in the output conf.debuglevel = 0 # Set whether to check gpg signatures conf.gpgcheck = not disable_gpg_check conf.localpkg_gpgcheck = not disable_gpg_check # Don't prompt for user confirmations conf.assumeyes = True # Set installroot conf.installroot = installroot # Load substitutions from the filesystem conf.substitutions.update_from_etc(installroot) # Handle different DNF versions immutable mutable datatypes and # dnf v1/v2/v3 # # In DNF < 3.0 are lists, and modifying them works # In DNF >= 3.0 < 3.6 are lists, but modifying them doesn't work # In DNF >= 3.6 have been turned into tuples, to communicate that modifying them doesn't work # # https://www.happyassassin.net/2018/06/27/adams-debugging-adventures-the-immutable-mutable-object/ # # Set excludes if self.exclude: _excludes = list(conf.exclude) _excludes.extend(self.exclude) conf.exclude = _excludes # Set disable_excludes if self.disable_excludes: _disable_excludes = list(conf.disable_excludes) if self.disable_excludes not in _disable_excludes: _disable_excludes.append(self.disable_excludes) conf.disable_excludes = _disable_excludes # Set releasever if self.releasever is not None: conf.substitutions['releasever'] = self.releasever # Set skip_broken (in dnf this is strict=0) if self.skip_broken: conf.strict = 0 # Set best if self.nobest: conf.best = 0 if self.download_only: conf.downloadonly = True if self.download_dir: conf.destdir = self.download_dir # Default in dnf upstream is true conf.clean_requirements_on_remove = self.autoremove # Default in dnf (and module default) is True conf.install_weak_deps = self.install_weak_deps def _specify_repositories(self, base, disablerepo, enablerepo): """Enable and disable repositories matching the provided patterns.""" base.read_all_repos() repos = base.repos # Disable repositories for repo_pattern in disablerepo: if repo_pattern: for repo in repos.get_matching(repo_pattern): repo.disable() # Enable repositories for repo_pattern in enablerepo: if repo_pattern: for repo in repos.get_matching(repo_pattern): repo.enable() def _base(self, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot): """Return a fully configured dnf Base object.""" base = dnf.Base() self._configure_base(base, conf_file, disable_gpg_check, installroot) try: # this method has been supported in dnf-4.2.17-6 or later # https://bugzilla.redhat.com/show_bug.cgi?id=1788212 base.setup_loggers() except AttributeError: pass try: base.init_plugins(set(self.disable_plugin), set(self.enable_plugin)) base.pre_configure_plugins() except AttributeError: pass # older versions of dnf didn't require this and don't have these methods self._specify_repositories(base, disablerepo, enablerepo) try: base.configure_plugins() except AttributeError: pass # older versions of dnf didn't require this and don't have these methods try: if self.update_cache: try: base.update_cache() except dnf.exceptions.RepoError as e: self.module.fail_json( msg="{0}".format(to_text(e)), results=[], rc=1 ) base.fill_sack(load_system_repo='auto') except dnf.exceptions.RepoError as e: self.module.fail_json( msg="{0}".format(to_text(e)), results=[], rc=1 ) if self.bugfix: key = {'advisory_type__eq': 'bugfix'} base._update_security_filters = [base.sack.query().filter(**key)] if self.security: key = {'advisory_type__eq': 'security'} base._update_security_filters = [base.sack.query().filter(**key)] return base def list_items(self, command): """List package info based on the command.""" # Rename updates to upgrades if command == 'updates': command = 'upgrades' # Return the corresponding packages if command in ['installed', 'upgrades', 'available']: results = [ self._package_dict(package) for package in getattr(self.base.sack.query(), command)()] # Return the enabled repository ids elif command in ['repos', 'repositories']: results = [ {'repoid': repo.id, 'state': 'enabled'} for repo in self.base.repos.iter_enabled()] # Return any matching packages else: packages = dnf.subject.Subject(command).get_best_query(self.base.sack) results = [self._package_dict(package) for package in packages] self.module.exit_json(msg="", results=results) def _is_installed(self, pkg): installed = self.base.sack.query().installed() if installed.filter(name=pkg): return True else: return False def _is_newer_version_installed(self, pkg_name): candidate_pkg = self._packagename_dict(pkg_name) if not candidate_pkg: # The user didn't provide a versioned rpm, so version checking is # not required return False installed = self.base.sack.query().installed() installed_pkg = installed.filter(name=candidate_pkg['name']).run() if installed_pkg: installed_pkg = installed_pkg[0] # this looks weird but one is a dict and the other is a dnf.Package evr_cmp = self._compare_evr( installed_pkg.epoch, installed_pkg.version, installed_pkg.release, candidate_pkg['epoch'], candidate_pkg['version'], candidate_pkg['release'], ) if evr_cmp == 1: return True else: return False else: return False def _mark_package_install(self, pkg_spec, upgrade=False): """Mark the package for install.""" is_newer_version_installed = self._is_newer_version_installed(pkg_spec) is_installed = self._is_installed(pkg_spec) try: if is_newer_version_installed: if self.allow_downgrade: # dnf only does allow_downgrade, we have to handle this ourselves # because it allows a possibility for non-idempotent transactions # on a system's package set (pending the yum repo has many old # NVRs indexed) if upgrade: if is_installed: self.base.upgrade(pkg_spec) else: self.base.install(pkg_spec) else: self.base.install(pkg_spec) else: # Nothing to do, report back pass elif is_installed: # An potentially older (or same) version is installed if upgrade: self.base.upgrade(pkg_spec) else: # Nothing to do, report back pass else: # The package is not installed, simply install it self.base.install(pkg_spec) return {'failed': False, 'msg': '', 'failure': '', 'rc': 0} except dnf.exceptions.MarkingError as e: return { 'failed': True, 'msg': "No package {0} available.".format(pkg_spec), 'failure': " ".join((pkg_spec, to_native(e))), 'rc': 1, "results": [] } except dnf.exceptions.DepsolveError as e: return { 'failed': True, 'msg': "Depsolve Error occured for package {0}.".format(pkg_spec), 'failure': " ".join((pkg_spec, to_native(e))), 'rc': 1, "results": [] } except dnf.exceptions.Error as e: if to_text("already installed") in to_text(e): return {'failed': False, 'msg': '', 'failure': ''} else: return { 'failed': True, 'msg': "Unknown Error occured for package {0}.".format(pkg_spec), 'failure': " ".join((pkg_spec, to_native(e))), 'rc': 1, "results": [] } def _whatprovides(self, filepath): available = self.base.sack.query().available() pkg_spec = available.filter(provides=filepath).run() if pkg_spec: return pkg_spec[0].name def _parse_spec_group_file(self): pkg_specs, grp_specs, module_specs, filenames = [], [], [], [] already_loaded_comps = False # Only load this if necessary, it's slow for name in self.names: if '://' in name: name = fetch_file(self.module, name) filenames.append(name) elif name.endswith(".rpm"): filenames.append(name) elif name.startswith("@") or ('/' in name): # like "dnf install /usr/bin/vi" if '/' in name: pkg_spec = self._whatprovides(name) if pkg_spec: pkg_specs.append(pkg_spec) continue if not already_loaded_comps: self.base.read_comps() already_loaded_comps = True grp_env_mdl_candidate = name[1:].strip() if self.with_modules: mdl = self.module_base._get_modules(grp_env_mdl_candidate) if mdl[0]: module_specs.append(grp_env_mdl_candidate) else: grp_specs.append(grp_env_mdl_candidate) else: grp_specs.append(grp_env_mdl_candidate) else: pkg_specs.append(name) return pkg_specs, grp_specs, module_specs, filenames def _update_only(self, pkgs): not_installed = [] for pkg in pkgs: if self._is_installed(pkg): try: if isinstance(to_text(pkg), text_type): self.base.upgrade(pkg) else: self.base.package_upgrade(pkg) except Exception as e: self.module.fail_json( msg="Error occured attempting update_only operation: {0}".format(to_native(e)), results=[], rc=1, ) else: not_installed.append(pkg) return not_installed def _install_remote_rpms(self, filenames): if int(dnf.__version__.split(".")[0]) >= 2: pkgs = list(sorted(self.base.add_remote_rpms(list(filenames)), reverse=True)) else: pkgs = [] try: for filename in filenames: pkgs.append(self.base.add_remote_rpm(filename)) except IOError as e: if to_text("Can not load RPM file") in to_text(e): self.module.fail_json( msg="Error occured attempting remote rpm install of package: {0}. {1}".format(filename, to_native(e)), results=[], rc=1, ) if self.update_only: self._update_only(pkgs) else: for pkg in pkgs: try: if self._is_newer_version_installed(self._package_dict(pkg)['nevra']): if self.allow_downgrade: self.base.package_install(pkg) else: self.base.package_install(pkg) except Exception as e: self.module.fail_json( msg="Error occured attempting remote rpm operation: {0}".format(to_native(e)), results=[], rc=1, ) def _is_module_installed(self, module_spec): if self.with_modules: module_spec = module_spec.strip() module_list, nsv = self.module_base._get_modules(module_spec) enabled_streams = self.base._moduleContainer.getEnabledStream(nsv.name) if enabled_streams: if nsv.stream: if nsv.stream in enabled_streams: return True # The provided stream was found else: return False # The provided stream was not found else: return True # No stream provided, but module found return False # seems like a sane default def ensure(self): response = { 'msg': "", 'changed': False, 'results': [], 'rc': 0 } # Accumulate failures. Package management modules install what they can # and fail with a message about what they can't. failure_response = { 'msg': "", 'failures': [], 'results': [], 'rc': 1 } # Autoremove is called alone # Jump to remove path where base.autoremove() is run if not self.names and self.autoremove: self.names = [] self.state = 'absent' if self.names == ['*'] and self.state == 'latest': try: self.base.upgrade_all() except dnf.exceptions.DepsolveError as e: failure_response['msg'] = "Depsolve Error occured attempting to upgrade all packages" self.module.fail_json(**failure_response) else: pkg_specs, group_specs, module_specs, filenames = self._parse_spec_group_file() pkg_specs = [p.strip() for p in pkg_specs] filenames = [f.strip() for f in filenames] groups = [] environments = [] for group_spec in (g.strip() for g in group_specs): group = self.base.comps.group_by_pattern(group_spec) if group: groups.append(group.id) else: environment = self.base.comps.environment_by_pattern(group_spec) if environment: environments.append(environment.id) else: self.module.fail_json( msg="No group {0} available.".format(group_spec), results=[], ) if self.state in ['installed', 'present']: # Install files. self._install_remote_rpms(filenames) for filename in filenames: response['results'].append("Installed {0}".format(filename)) # Install modules if module_specs and self.with_modules: for module in module_specs: try: if not self._is_module_installed(module): response['results'].append("Module {0} installed.".format(module)) self.module_base.install([module]) self.module_base.enable([module]) except dnf.exceptions.MarkingErrors as e: failure_response['failures'].append(' '.join((module, to_native(e)))) # Install groups. for group in groups: try: group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES) if group_pkg_count_installed == 0: response['results'].append("Group {0} already installed.".format(group)) else: response['results'].append("Group {0} installed.".format(group)) except dnf.exceptions.DepsolveError as e: failure_response['msg'] = "Depsolve Error occured attempting to install group: {0}".format(group) self.module.fail_json(**failure_response) except dnf.exceptions.Error as e: # In dnf 2.0 if all the mandatory packages in a group do # not install, an error is raised. We want to capture # this but still install as much as possible. failure_response['failures'].append(" ".join((group, to_native(e)))) for environment in environments: try: self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES) except dnf.exceptions.DepsolveError as e: failure_response['msg'] = "Depsolve Error occured attempting to install environment: {0}".format(environment) self.module.fail_json(**failure_response) except dnf.exceptions.Error as e: failure_response['failures'].append(" ".join((environment, to_native(e)))) if module_specs and not self.with_modules: # This means that the group or env wasn't found in comps self.module.fail_json( msg="No group {0} available.".format(module_specs[0]), results=[], ) # Install packages. if self.update_only: not_installed = self._update_only(pkg_specs) for spec in not_installed: response['results'].append("Packages providing %s not installed due to update_only specified" % spec) else: for pkg_spec in pkg_specs: install_result = self._mark_package_install(pkg_spec) if install_result['failed']: if install_result['msg']: failure_response['msg'] += install_result['msg'] failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure'])) else: if install_result['msg']: response['results'].append(install_result['msg']) elif self.state == 'latest': # "latest" is same as "installed" for filenames. self._install_remote_rpms(filenames) for filename in filenames: response['results'].append("Installed {0}".format(filename)) # Upgrade modules if module_specs and self.with_modules: for module in module_specs: try: if self._is_module_installed(module): response['results'].append("Module {0} upgraded.".format(module)) self.module_base.upgrade([module]) except dnf.exceptions.MarkingErrors as e: failure_response['failures'].append(' '.join((module, to_native(e)))) for group in groups: try: try: self.base.group_upgrade(group) response['results'].append("Group {0} upgraded.".format(group)) except dnf.exceptions.CompsError: if not self.update_only: # If not already installed, try to install. group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES) if group_pkg_count_installed == 0: response['results'].append("Group {0} already installed.".format(group)) else: response['results'].append("Group {0} installed.".format(group)) except dnf.exceptions.Error as e: failure_response['failures'].append(" ".join((group, to_native(e)))) for environment in environments: try: try: self.base.environment_upgrade(environment) except dnf.exceptions.CompsError: # If not already installed, try to install. self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES) except dnf.exceptions.DepsolveError as e: failure_response['msg'] = "Depsolve Error occured attempting to install environment: {0}".format(environment) except dnf.exceptions.Error as e: failure_response['failures'].append(" ".join((environment, to_native(e)))) if self.update_only: not_installed = self._update_only(pkg_specs) for spec in not_installed: response['results'].append("Packages providing %s not installed due to update_only specified" % spec) else: for pkg_spec in pkg_specs: # best effort causes to install the latest package # even if not previously installed self.base.conf.best = True install_result = self._mark_package_install(pkg_spec, upgrade=True) if install_result['failed']: if install_result['msg']: failure_response['msg'] += install_result['msg'] failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure'])) else: if install_result['msg']: response['results'].append(install_result['msg']) else: # state == absent if filenames: self.module.fail_json( msg="Cannot remove paths -- please specify package name.", results=[], ) # Remove modules if module_specs and self.with_modules: for module in module_specs: try: if self._is_module_installed(module): response['results'].append("Module {0} removed.".format(module)) self.module_base.remove([module]) self.module_base.disable([module]) self.module_base.reset([module]) except dnf.exceptions.MarkingErrors as e: failure_response['failures'].append(' '.join((module, to_native(e)))) for group in groups: try: self.base.group_remove(group) except dnf.exceptions.CompsError: # Group is already uninstalled. pass except AttributeError: # Group either isn't installed or wasn't marked installed at install time # because of DNF bug # # This is necessary until the upstream dnf API bug is fixed where installing # a group via the dnf API doesn't actually mark the group as installed # https://bugzilla.redhat.com/show_bug.cgi?id=1620324 pass for environment in environments: try: self.base.environment_remove(environment) except dnf.exceptions.CompsError: # Environment is already uninstalled. pass installed = self.base.sack.query().installed() for pkg_spec in pkg_specs: # short-circuit installed check for wildcard matching if '*' in pkg_spec: try: self.base.remove(pkg_spec) except dnf.exceptions.MarkingError as e: is_failure, handled_remove_error = self._sanitize_dnf_error_msg_remove(pkg_spec, to_native(e)) if is_failure: failure_response['failures'].append('{0} - {1}'.format(pkg_spec, to_native(e))) else: response['results'].append(handled_remove_error) continue installed_pkg = list(map(str, installed.filter(name=pkg_spec).run())) if installed_pkg: candidate_pkg = self._packagename_dict(installed_pkg[0]) installed_pkg = installed.filter(name=candidate_pkg['name']).run() else: candidate_pkg = self._packagename_dict(pkg_spec) installed_pkg = installed.filter(nevra=pkg_spec).run() if installed_pkg: installed_pkg = installed_pkg[0] evr_cmp = self._compare_evr( installed_pkg.epoch, installed_pkg.version, installed_pkg.release, candidate_pkg['epoch'], candidate_pkg['version'], candidate_pkg['release'], ) if evr_cmp == 0: self.base.remove(pkg_spec) # Like the dnf CLI we want to allow recursive removal of dependent # packages self.allowerasing = True if self.autoremove: self.base.autoremove() try: if not self.base.resolve(allow_erasing=self.allowerasing): if failure_response['failures']: failure_response['msg'] = 'Failed to install some of the specified packages' self.module.fail_json(**failure_response) response['msg'] = "Nothing to do" self.module.exit_json(**response) else: response['changed'] = True if failure_response['failures']: failure_response['msg'] = 'Failed to install some of the specified packages' self.module.fail_json(**failure_response) if self.module.check_mode: response['msg'] = "Check mode: No changes made, but would have if not in check mode" self.module.exit_json(**response) try: if self.download_only and self.download_dir and self.base.conf.destdir: dnf.util.ensure_dir(self.base.conf.destdir) self.base.repos.all().pkgdir = self.base.conf.destdir self.base.download_packages(self.base.transaction.install_set) except dnf.exceptions.DownloadError as e: self.module.fail_json( msg="Failed to download packages: {0}".format(to_text(e)), results=[], ) if self.download_only: for package in self.base.transaction.install_set: response['results'].append("Downloaded: {0}".format(package)) self.module.exit_json(**response) else: self.base.do_transaction() for package in self.base.transaction.install_set: response['results'].append("Installed: {0}".format(package)) for package in self.base.transaction.remove_set: response['results'].append("Removed: {0}".format(package)) if failure_response['failures']: failure_response['msg'] = 'Failed to install some of the specified packages' self.module.exit_json(**response) self.module.exit_json(**response) except dnf.exceptions.DepsolveError as e: failure_response['msg'] = "Depsolve Error occured: {0}".format(to_native(e)) self.module.fail_json(**failure_response) except dnf.exceptions.Error as e: if to_text("already installed") in to_text(e): response['changed'] = False response['results'].append("Package already installed: {0}".format(to_native(e))) self.module.exit_json(**response) else: failure_response['msg'] = "Unknown Error occured: {0}".format(to_native(e)) self.module.fail_json(**failure_response) @staticmethod def has_dnf(): return HAS_DNF def run(self): """The main function.""" # Check if autoremove is called correctly if self.autoremove: if LooseVersion(dnf.__version__) < LooseVersion('2.0.1'): self.module.fail_json( msg="Autoremove requires dnf>=2.0.1. Current dnf version is %s" % dnf.__version__, results=[], ) # Check if download_dir is called correctly if self.download_dir: if LooseVersion(dnf.__version__) < LooseVersion('2.6.2'): self.module.fail_json( msg="download_dir requires dnf>=2.6.2. Current dnf version is %s" % dnf.__version__, results=[], ) if self.update_cache and not self.names and not self.list: self.base = self._base( self.conf_file, self.disable_gpg_check, self.disablerepo, self.enablerepo, self.installroot ) self.module.exit_json( msg="Cache updated", changed=False, results=[], rc=0 ) # Set state as installed by default # This is not set in AnsibleModule() because the following shouldn't happen # - dnf: autoremove=yes state=installed if self.state is None: self.state = 'installed' if self.list: self.base = self._base( self.conf_file, self.disable_gpg_check, self.disablerepo, self.enablerepo, self.installroot ) self.list_items(self.list) else: # Note: base takes a long time to run so we want to check for failure # before running it. if not dnf.util.am_i_root(): self.module.fail_json( msg="This command has to be run under the root user.", results=[], ) self.base = self._base( self.conf_file, self.disable_gpg_check, self.disablerepo, self.enablerepo, self.installroot ) if self.with_modules: self.module_base = dnf.module.module_base.ModuleBase(self.base) self.ensure() def main(): # state=installed name=pkgspec # state=removed name=pkgspec # state=latest name=pkgspec # # informational commands: # list=installed # list=updates # list=available # list=repos # list=pkgspec # Extend yumdnf_argument_spec with dnf-specific features that will never be # backported to yum because yum is now in "maintenance mode" upstream yumdnf_argument_spec['argument_spec']['allowerasing'] = dict(default=False, type='bool') yumdnf_argument_spec['argument_spec']['nobest'] = dict(default=False, type='bool') module = AnsibleModule( **yumdnf_argument_spec ) module_implementation = DnfModule(module) try: module_implementation.run() except dnf.exceptions.RepoError as de: module.fail_json( msg="Failed to synchronize repodata: {0}".format(to_native(de)), rc=1, results=[], changed=False ) if __name__ == '__main__': main()
dpassante/ansible
lib/ansible/modules/dnf.py
Python
gpl-3.0
51,820
import py_isear.enums as enums import csv class IsearSubset: def __init__(self, labels, values): self.labels = labels self.values = values class IsearDataSet: def __init__(self, data=IsearSubset([], []), target=IsearSubset([], []), text_data=[]): self.__data = data self.__target = target self.__text_data = text_data def get_data(self): return self.__data.values def get_target(self): return self.__target.values def get_data_label_at(self, i): return self.__data.labels[i] def get_target_label_at(self, i): return self.__target.labels[i] def get_freetext_content(self): return self.__text_data class NoSuchFieldException: def __init__(self, field_name): self.message = "No such field in dataset : " + field_name def get_message(self): return self.message class IsearLoader: def load_isear(self, s_isear_path): f_isear = open(s_isear_path, "r") ''' The isear file extracted for the purpose of this initial loading is a pipe delimited csv-like file with headings ''' isear_reader = csv.reader(f_isear, delimiter="|", quotechar='"') i = 0 entry_attributes = [] text_data = [] entry_target = [] for isear_row in isear_reader: if i == 0: i = i + 1 continue result = self.__parse_entry(isear_row, i, text_data) entry_attributes.append(result["attributes"]) entry_target.append(result["target"]) i = i + 1 attributes_subset = IsearSubset(self.attribute_list, entry_attributes) target_subset = IsearSubset(self.target_list, entry_target) return IsearDataSet(attributes_subset, target_subset, text_data) def __parse_entry(self, isear_row, # The row of the entry index, # row number text_data): # the text data i_col = 0 l_attributes = [] l_target = [] # start parsing the columns for isear_col in isear_row: # we need to know to which field we are refering # handling the excess columns if i_col >= len(enums.CONST_ISEAR_CODES): break s_cur_col = enums.CONST_ISEAR_CODES[i_col] # for further test this will tell whether we are in the SIT column, # which is a text column b_is_sit = bool(s_cur_col == "SIT") if b_is_sit: if self.provide_text: # should be clear enough text_data.append(isear_col) else: # should be an int if s_cur_col in self.attribute_list: i_isear_col = int(isear_col) l_attributes.append(i_isear_col) if s_cur_col in self.target_list: i_isear_col = int(isear_col) l_target.append(i_isear_col) # next column i_col = i_col + 1 # we will return a pretty "free form" object return {"attributes": l_attributes, "target": l_target} def __init__(self, attribute_list=[], target_list=[], provide_text=True): # list of attributes to extract, please refer to enums.py self.attribute_list = [] self.set_attribute_list(attribute_list) # list of targets to extract self.target_list = [] self.set_target_list(target_list) # provide the text, true by default self.provide_text = provide_text # compares attribute existence in the Isear labels def __check_attr_exists(self, attribute): return attribute in enums.CONST_ISEAR_CODES def set_attribute_list(self, attrs): """Set a list of attributes to extract Args: attrs (list): a list of strings refering Isear fields . Returns: self. in order to ease fluent programming (loader.set().set()) Raises: NoSuchFieldException """ self.attribute_list = [] for attr in attrs: self.add_attribute(attr) return self def set_target_list(self, target): """Set a list of fields to extract as target Args: attrs (list): a list of strings refering Isear fields . Returns: self. in order to ease fluent programming (loader.set().set()) Raises: NoSuchFieldException """ self.target_list = [] for tgt in target: self.add_target(tgt) return self def set_provide_text(self, is_provide_text): """ Tell the extractor whether to load the free text field. Behaviour is true by default Args: is_provide_text (bool): whether to provide the text field or not Return self. For fluent API """ self.provide_text = is_provide_text return self def add_attribute(self, attr): b_att_ex = self.__check_attr_exists(attr) if b_att_ex is not True: ex = NoSuchFieldException(attr) raise ex self.attribute_list.append(attr) return self def add_target(self, attr): b_att_ex = self.__check_attr_exists(attr) if b_att_ex is not True: ex = NoSuchFieldException(attr) raise ex self.target_list.append(attr) return self # def load_isear(self):
sinmaniphel/py_isear_dataset
py_isear/isear_loader.py
Python
gpl-3.0
6,006
#!/usr/bin/env python # -*- coding: iso-8859-1 -*- # Copyright (C) 2007-2016 CEA/DEN, EDF R&D, OPEN CASCADE # # Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN, # CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com # import os import sys def __setup_config(nsport, args, save_config): # from salome_utils import generateFileName, getHostName hostname = getHostName() # omniorbUserPath = os.getenv("OMNIORB_USER_PATH") kwargs={} if omniorbUserPath is not None: kwargs["with_username"]=True # from ORBConfigFile import writeORBConfigFile omniorb_config, giopsize = writeORBConfigFile(omniorbUserPath, hostname, nsport, kwargs) args['port'] = os.environ['NSPORT'] # if save_config: last_running_config = generateFileName(omniorbUserPath, prefix="omniORB", suffix="last", extension="cfg", hidden=True, **kwargs) os.environ['LAST_RUNNING_CONFIG'] = last_running_config try: if sys.platform == "win32": import shutil shutil.copyfile(omniorb_config, last_running_config) else: try: if os.access(last_running_config, os.F_OK): os.remove(last_running_config) except OSError: pass os.symlink(omniorb_config, last_running_config) pass pass except: pass # # def searchFreePort_withPortManager(queue, args={}, save_config=1, use_port=None): from PortManager import getPort port = getPort(use_port) if use_port: print "Check if port can be used: %d" % use_port, if port == use_port and port != -1: print "- OK" __setup_config(use_port, args, save_config) queue.put([os.environ['OMNIORB_CONFIG'], os.environ['NSPORT'], os.environ['NSHOST']]) return else: print "- KO: port is busy" pass # print "Searching for a free port for naming service:", if port == -1: # try again port = getPort(use_port) if port != -1: print "%s - OK"%(port) __setup_config(port, args, save_config) else: print "Unable to obtain port" queue.put([os.environ['OMNIORB_CONFIG'], os.environ['NSPORT'], os.environ['NSHOST']]) # def __savePortToFile(args): # Save Naming service port name into # the file args["ns_port_log_file"] if args.has_key('ns_port_log_file'): omniorbUserPath = os.getenv("OMNIORB_USER_PATH") file_name = os.path.join(omniorbUserPath, args["ns_port_log_file"]) with open(file_name, "w") as f: f.write(os.environ['NSPORT']) # def searchFreePort(args={}, save_config=1, use_port=None): """ Search free port for SALOME session. Returns first found free port number. """ try: import PortManager # mandatory from multiprocessing import Process, Queue queue = Queue() p = Process(target = searchFreePort_withPortManager, args=(queue, args, save_config, use_port,)) p.start() info = queue.get() os.environ['OMNIORB_CONFIG'] = info[0] os.environ['NSPORT'] = info[1] args['port'] = os.environ['NSPORT'] os.environ['NSHOST'] = info[2] __savePortToFile(args) p.join() # this blocks until the process terminates except ImportError: raise Exception('PortManager module not found') #
FedoraScientific/salome-kernel
bin/searchFreePort.py
Python
lgpl-2.1
4,236
#! /usr/bin/env python ############################################################################### # # simulavr - A simulator for the Atmel AVR family of microcontrollers. # Copyright (C) 2001, 2002 Theodore A. Roth # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # ############################################################################### # # $Id: test_BLD.py,v 1.1 2004/07/31 00:59:11 rivetwa Exp $ # """Test the BLD opcode. """ import base_test from registers import Reg, SREG class BLD_TestFail(base_test.TestFail): pass class base_BLD(base_test.opcode_test): """Generic test case for testing BLD opcode. Bit load from the T flag in SREG to bin in register. opcode is '1111 100d dddd 0bbb' where d is register and b is the register bit. Only registers PC and Rd should be changed. """ def setup(self): # set the T flag in sreg self.setup_regs[Reg.SREG] = self.T << SREG.T # set the given register's bits to complement of T value if self.T == 0: self.setup_regs[self.reg] = 0xff else: self.setup_regs[self.reg] = 0x0 return 0xF800 | (self.reg << 4) | self.bit def analyze_results(self): self.reg_changed.append(self.reg) # check that register value is correct if self.T == 0: expect = 0xff & ~(1 << self.bit) else: expect = (1 << self.bit) got = self.anal_regs[self.reg] if expect != got: self.fail('r%02d bit %d not T(%d): expect=%02x, got=%02x' % ( self.reg, self.bit, self.T, expect, got)) # # Template code for test case. # The fail method will raise a test specific exception. # template = """ class BLD_r%02d_bit%d_T%d_TestFail(BLD_TestFail): pass class test_BLD_r%02d_bit%d_T%d(base_BLD): reg = %d bit = %d T = %d def fail(self,s): raise BLD_r%02d_bit%d_T%d_TestFail, s """ # # automagically generate the test_BLD_rNN_bitN_T[01] class definitions # code = '' for t in (0,1): for r in range(32): for b in range(8): code += template % (r,b,t, r,b,t, r,b,t, r,b,t) exec code
simark/simulavr
regress/test_opcodes/test_BLD.py
Python
gpl-2.0
2,630
from django.conf.urls import url import trigger.views urlpatterns = [ url(r'^(?P<trigger_id>\d+)/render/$', trigger.views.render_trigger_params, name='render_trigger_params'), ]
theju/dtwt
trigger/urls.py
Python
mit
185
# coding: utf-8 """Tests for IPython.lib.pretty.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from collections import Counter, defaultdict, deque, OrderedDict import types import string import unittest import nose.tools as nt from IPython.lib import pretty from IPython.testing.decorators import skip_without from io import StringIO class MyList(object): def __init__(self, content): self.content = content def _repr_pretty_(self, p, cycle): if cycle: p.text("MyList(...)") else: with p.group(3, "MyList(", ")"): for (i, child) in enumerate(self.content): if i: p.text(",") p.breakable() else: p.breakable("") p.pretty(child) class MyDict(dict): def _repr_pretty_(self, p, cycle): p.text("MyDict(...)") class MyObj(object): def somemethod(self): pass class Dummy1(object): def _repr_pretty_(self, p, cycle): p.text("Dummy1(...)") class Dummy2(Dummy1): _repr_pretty_ = None class NoModule(object): pass NoModule.__module__ = None class Breaking(object): def _repr_pretty_(self, p, cycle): with p.group(4,"TG: ",":"): p.text("Breaking(") p.break_() p.text(")") class BreakingRepr(object): def __repr__(self): return "Breaking(\n)" class BreakingReprParent(object): def _repr_pretty_(self, p, cycle): with p.group(4,"TG: ",":"): p.pretty(BreakingRepr()) class BadRepr(object): def __repr__(self): return 1/0 def test_indentation(): """Test correct indentation in groups""" count = 40 gotoutput = pretty.pretty(MyList(range(count))) expectedoutput = "MyList(\n" + ",\n".join(" %d" % i for i in range(count)) + ")" nt.assert_equal(gotoutput, expectedoutput) def test_dispatch(): """ Test correct dispatching: The _repr_pretty_ method for MyDict must be found before the registered printer for dict. """ gotoutput = pretty.pretty(MyDict()) expectedoutput = "MyDict(...)" nt.assert_equal(gotoutput, expectedoutput) def test_callability_checking(): """ Test that the _repr_pretty_ method is tested for callability and skipped if not. """ gotoutput = pretty.pretty(Dummy2()) expectedoutput = "Dummy1(...)" nt.assert_equal(gotoutput, expectedoutput) def test_sets(): """ Test that set and frozenset use Python 3 formatting. """ objects = [set(), frozenset(), set([1]), frozenset([1]), set([1, 2]), frozenset([1, 2]), set([-1, -2, -3])] expected = ['set()', 'frozenset()', '{1}', 'frozenset({1})', '{1, 2}', 'frozenset({1, 2})', '{-3, -2, -1}'] for obj, expected_output in zip(objects, expected): got_output = pretty.pretty(obj) yield nt.assert_equal, got_output, expected_output @skip_without('xxlimited') def test_pprint_heap_allocated_type(): """ Test that pprint works for heap allocated types. """ import xxlimited output = pretty.pretty(xxlimited.Null) nt.assert_equal(output, 'xxlimited.Null') def test_pprint_nomod(): """ Test that pprint works for classes with no __module__. """ output = pretty.pretty(NoModule) nt.assert_equal(output, 'NoModule') def test_pprint_break(): """ Test that p.break_ produces expected output """ output = pretty.pretty(Breaking()) expected = "TG: Breaking(\n ):" nt.assert_equal(output, expected) def test_pprint_break_repr(): """ Test that p.break_ is used in repr """ output = pretty.pretty(BreakingReprParent()) expected = "TG: Breaking(\n ):" nt.assert_equal(output, expected) def test_bad_repr(): """Don't catch bad repr errors""" with nt.assert_raises(ZeroDivisionError): pretty.pretty(BadRepr()) class BadException(Exception): def __str__(self): return -1 class ReallyBadRepr(object): __module__ = 1 @property def __class__(self): raise ValueError("I am horrible") def __repr__(self): raise BadException() def test_really_bad_repr(): with nt.assert_raises(BadException): pretty.pretty(ReallyBadRepr()) class SA(object): pass class SB(SA): pass class TestsPretty(unittest.TestCase): def test_super_repr(self): # "<super: module_name.SA, None>" output = pretty.pretty(super(SA)) self.assertRegex(output, r"<super: \S+.SA, None>") # "<super: module_name.SA, <module_name.SB at 0x...>>" sb = SB() output = pretty.pretty(super(SA, sb)) self.assertRegex(output, r"<super: \S+.SA,\s+<\S+.SB at 0x\S+>>") def test_long_list(self): lis = list(range(10000)) p = pretty.pretty(lis) last2 = p.rsplit('\n', 2)[-2:] self.assertEqual(last2, [' 999,', ' ...]']) def test_long_set(self): s = set(range(10000)) p = pretty.pretty(s) last2 = p.rsplit('\n', 2)[-2:] self.assertEqual(last2, [' 999,', ' ...}']) def test_long_tuple(self): tup = tuple(range(10000)) p = pretty.pretty(tup) last2 = p.rsplit('\n', 2)[-2:] self.assertEqual(last2, [' 999,', ' ...)']) def test_long_dict(self): d = { n:n for n in range(10000) } p = pretty.pretty(d) last2 = p.rsplit('\n', 2)[-2:] self.assertEqual(last2, [' 999: 999,', ' ...}']) def test_unbound_method(self): output = pretty.pretty(MyObj.somemethod) self.assertIn('MyObj.somemethod', output) class MetaClass(type): def __new__(cls, name): return type.__new__(cls, name, (object,), {'name': name}) def __repr__(self): return "[CUSTOM REPR FOR CLASS %s]" % self.name ClassWithMeta = MetaClass('ClassWithMeta') def test_metaclass_repr(): output = pretty.pretty(ClassWithMeta) nt.assert_equal(output, "[CUSTOM REPR FOR CLASS ClassWithMeta]") def test_unicode_repr(): u = u"üniçodé" ustr = u class C(object): def __repr__(self): return ustr c = C() p = pretty.pretty(c) nt.assert_equal(p, u) p = pretty.pretty([c]) nt.assert_equal(p, u'[%s]' % u) def test_basic_class(): def type_pprint_wrapper(obj, p, cycle): if obj is MyObj: type_pprint_wrapper.called = True return pretty._type_pprint(obj, p, cycle) type_pprint_wrapper.called = False stream = StringIO() printer = pretty.RepresentationPrinter(stream) printer.type_pprinters[type] = type_pprint_wrapper printer.pretty(MyObj) printer.flush() output = stream.getvalue() nt.assert_equal(output, '%s.MyObj' % __name__) nt.assert_true(type_pprint_wrapper.called) def test_collections_defaultdict(): # Create defaultdicts with cycles a = defaultdict() a.default_factory = a b = defaultdict(list) b['key'] = b # Dictionary order cannot be relied on, test against single keys. cases = [ (defaultdict(list), 'defaultdict(list, {})'), (defaultdict(list, {'key': '-' * 50}), "defaultdict(list,\n" " {'key': '--------------------------------------------------'})"), (a, 'defaultdict(defaultdict(...), {})'), (b, "defaultdict(list, {'key': defaultdict(...)})"), ] for obj, expected in cases: nt.assert_equal(pretty.pretty(obj), expected) def test_collections_ordereddict(): # Create OrderedDict with cycle a = OrderedDict() a['key'] = a cases = [ (OrderedDict(), 'OrderedDict()'), (OrderedDict((i, i) for i in range(1000, 1010)), 'OrderedDict([(1000, 1000),\n' ' (1001, 1001),\n' ' (1002, 1002),\n' ' (1003, 1003),\n' ' (1004, 1004),\n' ' (1005, 1005),\n' ' (1006, 1006),\n' ' (1007, 1007),\n' ' (1008, 1008),\n' ' (1009, 1009)])'), (a, "OrderedDict([('key', OrderedDict(...))])"), ] for obj, expected in cases: nt.assert_equal(pretty.pretty(obj), expected) def test_collections_deque(): # Create deque with cycle a = deque() a.append(a) cases = [ (deque(), 'deque([])'), (deque(i for i in range(1000, 1020)), 'deque([1000,\n' ' 1001,\n' ' 1002,\n' ' 1003,\n' ' 1004,\n' ' 1005,\n' ' 1006,\n' ' 1007,\n' ' 1008,\n' ' 1009,\n' ' 1010,\n' ' 1011,\n' ' 1012,\n' ' 1013,\n' ' 1014,\n' ' 1015,\n' ' 1016,\n' ' 1017,\n' ' 1018,\n' ' 1019])'), (a, 'deque([deque(...)])'), ] for obj, expected in cases: nt.assert_equal(pretty.pretty(obj), expected) def test_collections_counter(): class MyCounter(Counter): pass cases = [ (Counter(), 'Counter()'), (Counter(a=1), "Counter({'a': 1})"), (MyCounter(a=1), "MyCounter({'a': 1})"), ] for obj, expected in cases: nt.assert_equal(pretty.pretty(obj), expected) def test_mappingproxy(): MP = types.MappingProxyType underlying_dict = {} mp_recursive = MP(underlying_dict) underlying_dict[2] = mp_recursive underlying_dict[3] = underlying_dict cases = [ (MP({}), "mappingproxy({})"), (MP({None: MP({})}), "mappingproxy({None: mappingproxy({})})"), (MP({k: k.upper() for k in string.ascii_lowercase}), "mappingproxy({'a': 'A',\n" " 'b': 'B',\n" " 'c': 'C',\n" " 'd': 'D',\n" " 'e': 'E',\n" " 'f': 'F',\n" " 'g': 'G',\n" " 'h': 'H',\n" " 'i': 'I',\n" " 'j': 'J',\n" " 'k': 'K',\n" " 'l': 'L',\n" " 'm': 'M',\n" " 'n': 'N',\n" " 'o': 'O',\n" " 'p': 'P',\n" " 'q': 'Q',\n" " 'r': 'R',\n" " 's': 'S',\n" " 't': 'T',\n" " 'u': 'U',\n" " 'v': 'V',\n" " 'w': 'W',\n" " 'x': 'X',\n" " 'y': 'Y',\n" " 'z': 'Z'})"), (mp_recursive, "mappingproxy({2: {...}, 3: {2: {...}, 3: {...}}})"), (underlying_dict, "{2: mappingproxy({2: {...}, 3: {...}}), 3: {...}}"), ] for obj, expected in cases: nt.assert_equal(pretty.pretty(obj), expected)
unnikrishnankgs/va
venv/lib/python3.5/site-packages/IPython/lib/tests/test_pretty.py
Python
bsd-2-clause
11,154
SQL = """ create table users ( username varchar primary_key unique not null, -- username password varchar not null, -- encrypted password is_superuser boolean not null default 0, -- is admin user created timestamp not null, -- user creation timestamp email varchar unique not null, -- email address confirmed timestamp, -- email confirmed timestamp options varchar default '{}' -- arbitary user data ); """ def up(db, conf): db.executescript(SQL)
Outernet-Project/broadcast-portal
broadcast/migrations/sessions/01_add_users_table.py
Python
gpl-3.0
594
from django.conf.urls import patterns, url from corpus import views from corpus import api urlpatterns = patterns( '', # Next item to label url(r'^next_segment_to_label/(?P<relation_id>\d+)/', views.next_segment_to_label, name='next_segment_to_label'), url(r'^next_document_to_label/(?P<relation_id>\d+)/', views.next_document_to_label, name='next_document_to_label'), # Navigate labeled items url(r'^navigate_labeled_segments/(?P<relation_id>\d+)/(?P<segment_id>\d+)/(?P<direction>\w+)/judgeless', views.navigate_labeled_segments, kwargs={"judgeless": True}, name='navigate_labeled_segments_judgeless'), url(r'^navigate_labeled_documents/(?P<relation_id>\d+)/(?P<document_id>\d+)/(?P<direction>\w+)/judgeless', views.navigate_labeled_documents, kwargs={"judgeless": True}, name='navigate_labeled_documents_judgeless'), url(r'^navigate_labeled_segments/(?P<relation_id>\d+)/(?P<segment_id>\d+)/(?P<direction>\w+)/', views.navigate_labeled_segments, name='navigate_labeled_segments'), url(r'^navigate_labeled_documents/(?P<relation_id>\d+)/(?P<document_id>\d+)/(?P<direction>\w+)/', views.navigate_labeled_documents, name='navigate_labeled_documents'), # Labeling Forms & Views url(r'^label_evidence_for_segment/(?P<relation_id>\d+)/(?P<segment_id>\d+)/', views.LabelEvidenceOnSegmentView.as_view(), name='label_evidence_for_segment'), url(r'^label_evidence_for_document/(?P<relation_id>\d+)/(?P<document_id>\d+)/', views.LabelEvidenceOnDocumentView.as_view(), name='label_evidence_for_document'), # Human in the loop url(r'^human_in_the_loop/(?P<relation_id>\d+)/(?P<segment_id>\d+)/', views.HumanInTheLoopView.as_view(), name='human_in_the_loop_segment'), url(r'^human_in_the_loop/(?P<relation_id>\d+)/', views.human_in_the_loop, name='human_in_the_loop'), # Document navigation url(r'^navigate_documents/(?P<document_id>\d+)/(?P<direction>\w+)/', views.navigate_documents, name='navigate_documents'), url(r'^navigate_document/(?P<document_id>\d+)/', views.DocumentNavigation.as_view(), name='navigate_document'), # CRUD Angular EOs url(r'^crud/entity_occurrence/?$', api.EOCRUDView.as_view(), name='eo_crud_view'), url(r'^crud/entity/?$', api.EntityCRUDView.as_view(), name='entity_crud_view'), url(r'^create_eo/?$', views.create_entity_occurrence, name='create_entity_occurrence'), )
machinalis/iepy
iepy/webui/corpus/urls.py
Python
bsd-3-clause
2,591
# -*- coding: utf-8 -*- # # EAV-Django is a reusable Django application which implements EAV data model # Copyright © 2009—2010 Andrey Mikhaylenko # # This file is part of EAV-Django. # # EAV-Django is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # EAV-Django is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with EAV-Django. If not, see <http://gnu.org/licenses/>. """ Forms ~~~~~ """ # python from copy import deepcopy # django import django.forms as forms from django.forms import (BooleanField, CharField, CheckboxSelectMultiple, DateField, FloatField, ModelForm, ModelChoiceField, ModelMultipleChoiceField, ValidationError) from django.contrib.admin.widgets import AdminDateWidget, FilteredSelectMultiple, AdminRadioSelect #, RelatedFieldWidgetWrapper from django.utils.datastructures import SortedDict from django.utils.translation import ugettext_lazy as _ # this app __all__ = ['BaseSchemaForm', 'BaseDynamicEntityForm'] class BaseSchemaForm(ModelForm): """ Base class for schema forms. """ def clean_name(self): "Avoid name clashes between static and dynamic attributes." name = self.cleaned_data['name'] reserved_names = self._meta.model._meta.get_all_field_names() if name not in reserved_names: return name raise ValidationError(_('Attribute name must not clash with reserved names' ' ("%s")') % '", "'.join(reserved_names)) class BaseDynamicEntityForm(ModelForm): """ ModelForm for entity with support for EAV attributes. Form fields are created on the fly depending on Schema defined for given entity instance. If no schema is defined (i.e. the entity instance has not been saved yet), only static fields are used. However, on form validation the schema will be retrieved and EAV fields dynamically added to the form, so when the validation is actually done, all EAV fields are present in it (unless Rubric is not defined). """ FIELD_CLASSES = { 'text': CharField, 'float': FloatField, 'date': DateField, 'bool': BooleanField, 'one': ModelChoiceField, 'many': ModelMultipleChoiceField, #RelatedFieldWidgetWrapper(MultipleChoiceField), #'range': RangeField, #'object': ModelChoiceField } FIELD_EXTRA = { 'date': {'widget': AdminDateWidget}, 'one': lambda schema: { 'widget': AdminRadioSelect }, 'many': lambda schema: { 'widget': CheckboxSelectMultiple if len(schema.get_choices()) <= 5 else FilteredSelectMultiple(schema.title, is_stacked=False) }, } def __init__(self, data=None, *args, **kwargs): super(BaseDynamicEntityForm, self).__init__(data, *args, **kwargs) self._build_dynamic_fields() def check_eav_allowed(self): """ Returns True if dynamic attributes can be added to this form. If False is returned, only normal fields will be displayed. """ return bool( self.instance) # and self.instance.check_eav_allowed()) # XXX would break form where stuff is _being_ defined @classmethod def get_dynamic_form(cls, data, model=None): fields = SortedDict() initial = {} for schema in data: defaults = { 'label': schema.title.capitalize(), 'required': schema.required, 'help_text': schema.help_text, } datatype = schema.datatype if datatype == schema.TYPE_MANY: defaults.update({'queryset': schema.get_choices()}) if model is not None: choices = getattr(model, schema.name) defaults.update({'initial': [x.pk for x in choices]}) elif datatype == schema.TYPE_ONE: defaults.update( {'queryset': schema.get_choices(), 'empty_label': None if schema.required else u"---------"}) if model is not None: choice = getattr(model, schema.name) defaults.update({'initial': choice.pk if choice else None}) extra = cls.FIELD_EXTRA.get(datatype, {}) if hasattr(extra, '__call__'): extra = extra(schema) defaults.update(extra) MappedField = cls.FIELD_CLASSES[datatype] fields[schema.name] = MappedField(**defaults) # fill initial data (if attribute was already defined) if model is not None: value = getattr(model, schema.name) if value and not datatype in (schema.TYPE_ONE, schema.TYPE_MANY): # choices are already done above initial[schema.name] = value class DynamicFieldsForm(forms.Form): def __init__(self, *args, **kwargs): super(DynamicFieldsForm, self).__init__(*args, **kwargs) self.fields = fields self.initial = initial return DynamicFieldsForm def _build_dynamic_fields(self): # reset form fields self.fields = deepcopy(self.base_fields) # do not display dynamic fields if some fields are yet defined if not self.check_eav_allowed(): return for schema in self.instance.get_schemata(): if schema.name in (self.exclude if hasattr(self, "exclude") else []): continue defaults = { 'label': schema.title.capitalize(), 'required': schema.required, 'help_text': schema.help_text, } datatype = schema.datatype if datatype == schema.TYPE_MANY: choices = getattr(self.instance, schema.name) defaults.update({'queryset': schema.get_choices(), 'initial': [x.pk for x in choices]}) elif datatype == schema.TYPE_ONE: choice = getattr(self.instance, schema.name) defaults.update({'queryset': schema.get_choices(), 'initial': choice.pk if choice else None, # if schema is required remove --------- from ui 'empty_label': None if schema.required else u"---------"}) '''elif datatype == 'object': choice = getattr(self.instance, "pk") defaults.update({'queryset': schema.get_choices(), 'initial': choice.pk if choice else None, # if schema is required remove --------- from ui 'empty_label' : None if schema.required else u"---------"})''' extra = self.FIELD_EXTRA.get(datatype, {}) if hasattr(extra, '__call__'): extra = extra(schema) defaults.update(extra) MappedField = self.FIELD_CLASSES[datatype] self.fields[schema.name] = MappedField(**defaults) # fill initial data (if attribute was already defined) value = getattr(self.instance, schema.name) if value and not datatype in (schema.TYPE_ONE, schema.TYPE_MANY): # choices are already done above self.initial[schema.name] = value def save(self, commit=True): """ Saves this ``form``'s cleaned_data into model instance ``self.instance`` and related EAV attributes. Returns ``instance``. """ if self.errors: raise ValueError("The %s could not be saved because the data didn't" " validate." % self.instance._meta.object_name) # create entity instance, don't save yet instance = super(BaseDynamicEntityForm, self).save(commit=False) # assign attributes if it came from eav schema_names = instance.get_schema_names() for name in self.fields.keys(): if name in schema_names: value = self.cleaned_data.get(name) setattr(instance, name, value) # save entity and its attributes if commit: instance.save() return instance save.alters_data = True def save_m2m(self, *a, **kw): # stub for admin TODO: check if we don't need to super() if entity indeed has m2m pass
buremba/eav-django
eav/forms.py
Python
lgpl-3.0
9,003
from distutils.core import setup setup( name='TFCommon', version='0.2', packages=[''], url='', license='', author='Alex M. Wang', author_email='nanqiao15@126.com', description='Common modules for tensorflow developer', requires=['six', 'tensorflow'] )
MU94W/TFCommon
setup.py
Python
mit
285
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Momentum.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf class MomentumOptimizerTest(tf.test.TestCase): def testBasic(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.test_session(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() # Check we have slots self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEquals(slot0.get_shape(), var0.get_shape()) self.assertFalse(slot0 in tf.trainable_variables()) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEquals(slot1.get_shape(), var1.get_shape()) self.assertFalse(slot1 in tf.trainable_variables()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval()) self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval()) self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval()) # Step 2: the momentum accumulators contain the previous update. mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()) self.assertAllCloseAccordingToType( np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]), var0.eval()) self.assertAllCloseAccordingToType( np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]), var1.eval()) def testTensorLearningRateAndMomentum(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.test_session(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) mom_opt = tf.train.MomentumOptimizer( learning_rate=tf.constant(2.0), momentum=tf.constant(0.9)) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() # Check we have slots self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEquals(slot0.get_shape(), var0.get_shape()) self.assertFalse(slot0 in tf.trainable_variables()) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEquals(slot1.get_shape(), var1.get_shape()) self.assertFalse(slot1 in tf.trainable_variables()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval()) self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval()) self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval()) # Step 2: the momentum accumulators contain the previous update. mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()) self.assertAllCloseAccordingToType( np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]), var0.eval()) self.assertAllCloseAccordingToType( np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]), var1.eval()) def _dbParamsMom01(self): """Return dist-belief momentum values. Return values been generated from the dist-belief momentum unittest, running with a learning rate of 0.1 and a momentum of 0.1. These values record how a parameter vector of size 10, initialized with 0.0, gets updated with 10 consecutive momentum steps. It uses random gradients. Returns: db_grad: The gradients to apply db_out: The parameters after the momentum update. """ db_grad = [[]] * 10 db_out = [[]] * 10 # pylint: disable=line-too-long db_grad[0] = [0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018, 0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615] db_out[0] = [-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018, -0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618] db_grad[1] = [0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378, 0.5513742, 0.94687688, 0.16012503, 0.22159521] db_out[1] = [-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884, -0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544] db_grad[2] = [0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965, 0.31168157, 0.43203235, 0.16792089, 0.24644311] db_out[2] = [-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978, -0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189] db_grad[3] = [0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098, 0.81454384, 0.03848977, 0.89759839, 0.93665648] db_out[3] = [-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105, -0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303] db_grad[4] = [0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359, 0.69107032, 0.81897682, 0.5433259, 0.67860287] db_out[4] = [-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165, -0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544] db_grad[5] = [0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563, 0.84163809, 0.41172323, 0.83259648, 0.44941229] db_out[5] = [-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094, -0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717] db_grad[6] = [0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221, 0.73577434, 0.16014607, 0.57500273, 0.071136251] db_out[6] = [-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685, -0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997] db_grad[7] = [0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646, 0.74053431, 0.16033, 0.66625422, 0.73515922] db_out[7] = [-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838, -0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418] db_grad[8] = [0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039, 0.55561525, 0.22567581, 0.93331909, 0.29438227] db_out[8] = [-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527, -0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781] db_grad[9] = [0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893, 0.68593478, 0.50580865, 0.12602448, 0.093537711] db_out[9] = [-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302, -0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295] # pylint: enable=line-too-long return db_grad, db_out def testLikeDistBeliefMom01(self): with self.test_session(): db_grad, db_out = self._dbParamsMom01() num_samples = len(db_grad) var0 = tf.Variable([0.0] * num_samples) grads0 = tf.constant([0.0] * num_samples) mom_opt = tf.train.MomentumOptimizer(learning_rate=0.1, momentum=0.1) mom_update = mom_opt.apply_gradients(zip([grads0], [var0])) tf.initialize_all_variables().run() for i in xrange(num_samples): mom_update.run(feed_dict={grads0: db_grad[i]}) self.assertAllClose(np.array(db_out[i]), var0.eval()) def testSparse(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.test_session(): var0 = tf.Variable(tf.zeros([4, 2], dtype=dtype)) var1 = tf.Variable(tf.constant(1.0, dtype, [4, 2])) grads0 = tf.IndexedSlices(tf.constant([[.1, .1]], dtype=dtype), tf.constant([1]), tf.constant([4, 2])) grads1 = tf.IndexedSlices(tf.constant([[.01, .01], [.01, .01]], dtype=dtype), tf.constant([2, 3]), tf.constant([4, 2])) mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() # Check we have slots self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEquals(slot0.get_shape(), var0.get_shape()) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEquals(slot1.get_shape(), var1.get_shape()) # Fetch params to validate initial values self.assertAllClose([0, 0], var0.eval()[0]) self.assertAllClose([0, 0], var0.eval()[1]) self.assertAllClose([1, 1], var1.eval()[2]) # Step 1: the momentum accumulators are 0. So we should see a normal # update: v -= grad * learning_rate mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([0, 0]), slot0.eval()[0]) self.assertAllCloseAccordingToType( np.array([.1, .1]), slot0.eval()[1]) self.assertAllCloseAccordingToType( np.array([.01, .01]), slot1.eval()[2]) # Check that the parameters have been updated. self.assertAllCloseAccordingToType(np.array([0, 0]), var0.eval()[0]) self.assertAllCloseAccordingToType(np.array([- (0.1 * 2.0), - (0.1 * 2.0)]), var0.eval()[1]) self.assertAllCloseAccordingToType(np.array([1.0 - (0.01 * 2.0), 1.0 - (0.01 * 2.0)]), var1.eval()[2]) # Step 2: the momentum accumulators contain the previous update. mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllClose(np.array([0, 0]), slot0.eval()[0]) self.assertAllCloseAccordingToType(np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()[1]) self.assertAllCloseAccordingToType(np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval()[2]) # Check that the parameters have been updated. self.assertAllClose(np.array([0, 0]), var0.eval()[0]) self.assertAllCloseAccordingToType( np.array([- (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]), var0.eval()[1]) self.assertAllCloseAccordingToType( np.array([0.98 - ((0.9 * 0.01 + 0.01) * 2.0), 0.98 - ((0.9 * 0.01 + 0.01) * 2.0)]), var1.eval()[2]) def testSharing(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.test_session(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9) mom_update1 = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) mom_update2 = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEquals(slot0.get_shape(), var0.get_shape()) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEquals(slot1.get_shape(), var1.get_shape()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate mom_update1.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval()) self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval()) self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval()) # Step 2: the second momentum accumulators contain the previous update. mom_update2.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()) self.assertAllCloseAccordingToType( np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]), var0.eval()) self.assertAllCloseAccordingToType( np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]), var1.eval()) if __name__ == "__main__": tf.test.main()
sachinpro/sachinpro.github.io
tensorflow/python/training/momentum_test.py
Python
apache-2.0
17,251
# coding=utf-8 from __future__ import absolute_import, division, print_function __author__ = "Gina Häußge <osd@foosel.net>" __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' __copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License" from flask import request, jsonify, make_response from octoprint.settings import settings from octoprint.server import printer, printerProfileManager, NO_CONTENT from octoprint.server.api import api from octoprint.server.util.flask import restricted_access, get_json_command_from_request @api.route("/connection", methods=["GET"]) def connectionState(): state, port, baudrate, printer_profile = printer.get_current_connection() current = { "state": state, "port": port, "baudrate": baudrate, "printerProfile": printer_profile["id"] if printer_profile is not None and "id" in printer_profile else "_default" } return jsonify({"current": current, "options": _get_options()}) @api.route("/connection", methods=["POST"]) @restricted_access def connectionCommand(): valid_commands = { "connect": [], "disconnect": [], "fake_ack": [] } command, data, response = get_json_command_from_request(request, valid_commands) if response is not None: return response if command == "connect": connection_options = printer.__class__.get_connection_options() port = None baudrate = None printerProfile = None if "port" in data.keys(): port = data["port"] if port not in connection_options["ports"] and port != "AUTO": return make_response("Invalid port: %s" % port, 400) if "baudrate" in data.keys(): baudrate = data["baudrate"] if baudrate not in connection_options["baudrates"] and baudrate != 0: return make_response("Invalid baudrate: %d" % baudrate, 400) if "printerProfile" in data.keys(): printerProfile = data["printerProfile"] if not printerProfileManager.exists(printerProfile): return make_response("Invalid printer profile: %s" % printerProfile, 400) if "save" in data.keys() and data["save"]: settings().set(["serial", "port"], port) settings().setInt(["serial", "baudrate"], baudrate) printerProfileManager.set_default(printerProfile) if "autoconnect" in data.keys(): settings().setBoolean(["serial", "autoconnect"], data["autoconnect"]) settings().save() printer.connect(port=port, baudrate=baudrate, profile=printerProfile) elif command == "disconnect": printer.disconnect() elif command == "fake_ack": printer.fake_ack() return NO_CONTENT def _get_options(): connection_options = printer.__class__.get_connection_options() profile_options = printerProfileManager.get_all() default_profile = printerProfileManager.get_default() options = dict( ports=sorted(connection_options["ports"]), baudrates=sorted(connection_options["baudrates"], reverse=True), printerProfiles=[dict(id=printer_profile["id"], name=printer_profile["name"] if "name" in printer_profile else printer_profile["id"]) for printer_profile in profile_options.values() if "id" in printer_profile], portPreference=connection_options["portPreference"], baudratePreference=connection_options["baudratePreference"], printerProfilePreference=default_profile["id"] if "id" in default_profile else None ) return options
JackGavin13/octoprint-test-not-finished
src/octoprint/server/api/connection.py
Python
agpl-3.0
3,323
#!/usr/bin/env python # encoding: utf-8 __author__ = "Carlos González Sesmero" # When import with * symbol. __all__ = ["filecorrector", "mailing", "utilidades"] # print("Importamos {} en el path {}".format(__name__, __path__))
carlos-gs/MiStuRe
misture_core/MISTURE/utils/__init__.py
Python
gpl-3.0
227
""" Test receiving delayed (offline) messages on a text channel. """ import datetime from twisted.words.xish import domish from gabbletest import exec_test from servicetest import assertEquals import constants as cs def test(q, bus, conn, stream): m = domish.Element((None, 'message')) m['from'] = 'foo@bar.com' m['type'] = 'chat' m.addElement('body', content='hello') # add timestamp information x = m.addElement(('jabber:x:delay', 'x')) x['stamp'] = '20070517T16:15:01' stream.send(m) event = q.expect('dbus-signal', signal='NewChannels') path, props = event.args[0][0] assertEquals(cs.CHANNEL_TYPE_TEXT, props[cs.CHANNEL_TYPE]) assertEquals(cs.HT_CONTACT, props[cs.TARGET_HANDLE_TYPE]) jid = conn.inspect_contact_sync(props[cs.TARGET_HANDLE]) assertEquals('foo@bar.com', jid) message_received = q.expect('dbus-signal', signal='MessageReceived') message = message_received.args[0] header = message[0] message_sent_timestamp = header['message-sent'] assert str(datetime.datetime.utcfromtimestamp(message_sent_timestamp) == '2007-05-17 16:15:01'), header message_received_timestamp = header['message-received'] assert message_received_timestamp > message_sent_timestamp, header assert message[1]['content'] == 'hello', message if __name__ == '__main__': exec_test(test)
Ziemin/telepathy-gabble
tests/twisted/text/test-text-delayed.py
Python
lgpl-2.1
1,382
from morsel.panda import * from morsel.morselc import ShaderProgram as CShaderProgram #------------------------------------------------------------------------------- class ShaderProgram(CShaderProgram): def __init__(self, filename = None, node = None, **kargs): super(ShaderProgram, self).__init__(filename) self.filename = filename if node: shader = self.make() node.setShader(shader)
kralf/morsel
python/lib/morsel/utility/shader_program.py
Python
gpl-2.0
416
# We use the signal handler for long running condor jobs to save # output and stop the solver def signal_handler(signal, frame): logging.warning('Received external Interrupt signal. Solvers will stop and save data') logging.warning('Exiting.') exit(0) import signal signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler)
daajoe/asp_horn_backdoors
signal_handling.py
Python
gpl-2.0
370
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2017-09-23 20:00 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('podcast', '0015_show_type'), ] operations = [ migrations.AlterField( model_name='episode', name='guid', field=models.CharField(editable=False, max_length=64, verbose_name='GUID'), ), ]
richardcornish/django-itunespodcast
podcast/migrations/0016_auto_20170923_2000.py
Python
bsd-3-clause
478
from wayf.utils import ShibbolethMetadata # optional for now , getUserRealm import time from django.shortcuts import render_to_response from django.conf import settings from django.http import HttpResponseRedirect from django.utils.http import urlencode from django.template import RequestContext def wayf(request): # Instantiate the metadata metadata = ShibbolethMetadata(settings.SHIB_METADATA) # Get the IdP list idps = metadata.getIdps() sps = metadata.getSps() # A list to hold the cookies-to-be-set cookies = [] # Get the current IdP, if there is one if settings.IDP_COOKIE in request.COOKIES.keys(): current_idp = idps[request.COOKIES[settings.IDP_COOKIE]] else: current_idp = None # Try to get the user's last used IdP if settings.LAST_IDP_COOKIE in request.COOKIES.keys(): selectedidp = idps[request.COOKIES[settings.LAST_IDP_COOKIE]] else: selectedidp = None # If this is the first visit, use some IP-based heuristics, as in utils.py's getUserRealm() # this uses the getUserRealm magic from dnsutils # if not selectedidp: # selectedidp = idps.getIdpForScope(getUserRealm(request.META['REMOTE_ADDR'])) # First check to see if anything has changed if request.method == "POST": if 'clear' in request.POST.keys(): if request.POST['clear']: response = HttpResponseRedirect("/") response.delete_cookie(settings.IDP_COOKIE, domain=settings.COOKIE_DOMAIN) response['P3P'] = settings.P3P_HEADER return response elif 'user_idp' in request.POST.keys(): current_idp = idps[request.POST['user_idp']] if current_idp: cookies.append({'name': settings.LAST_IDP_COOKIE, 'data': request.POST['user_idp'], 'age': 86400 * 100}) if request.POST.get('save'): if request.POST.get('savetype') == 'perm': age = 86400 * 100 else: age = None cookies.append({'name': settings.IDP_COOKIE, 'data': request.POST['user_idp'], 'age': age}) # At this point we have handled the cookies and have an IdP, if the intent is such if not request.GET: # We were called without any arguments if current_idp: response = render_to_response("wayf_set.html", {'currentidp': current_idp.getName()}, context_instance=RequestContext(request)) for cookie in cookies: if cookie['age']: expires = time.strftime("%a, %d-%m-%y %H:%M:%S GMT", time.gmtime(time.time() + cookie['age'])) else: expires = None response.set_cookie(cookie['name'], cookie['data'], domain=settings.COOKIE_DOMAIN, max_age=cookie['age'], expires=expires) else: idplist = idps.getIdpsByCategory() response = render_to_response("wayf.html", {'idplist': idplist, 'request': request, 'selected': selectedidp}, context_instance=RequestContext(request)) response['P3P'] = settings.P3P_HEADER return response # If we got to this point, then this is a request comming from an SP if current_idp: # We have an IdP to route the request to if 'entityID' in request.GET.keys() and 'return' in request.GET.keys(): # a SAML Discovery Service request # Discovery Service mandates that 'entityID' holds the SP's ID if 'returnIDParam' in request.GET.keys() and request.GET['returnIDParam']: returnparam = request.GET['returnIDParam'] else: returnparam = 'entityID' # check if the return url is a valid response url returnval = request.GET['return'] if not sps.isDiscoveryResponseLocation(returnval): response = render_to_response("400.html") response.status_code = 400 # bad request else: response = HttpResponseRedirect(returnval + "&" + urlencode(((returnparam, current_idp.id),))) elif 'shire' in request.GET.keys() and 'target' in request.GET.keys(): # an old Shibboleth 1.x request # We just redirect the user to the given IdP response = HttpResponseRedirect( current_idp.sso['urn:mace:shibboleth:1.0:profiles:AuthnRequest'] + "?" + request.GET.urlencode() ) else: response = render_to_response("400.html") response.status_code = 400 # bad request for cookie in cookies: if cookie['age']: expires = time.strftime("%a, %d-%m-%y %H:%M:%S GMT", time.gmtime(time.time() + cookie['age'])) else: expires = None response.set_cookie(cookie['name'], cookie['data'], domain=settings.COOKIE_DOMAIN, max_age=cookie['age'], expires=expires) response['P3P'] = settings.P3P_HEADER return response # If we got this far, then we need to be redirected, but don't know where to. # Let the user pick an IdP # Generate the category - idp list idplist = idps.getIdpsByCategory() # Render the apropriate wayf template response = render_to_response("wayf_from_sp.html", {'idplist': idplist, 'request': request, 'selected': selectedidp}, context_instance=RequestContext(request)) response['P3P'] = settings.P3P_HEADER return response def index(request): return render_to_response("index.html", context_instance=RequestContext(request)) def idp_list(request): metadata = ShibbolethMetadata(settings.SHIB_METADATA) idps = metadata.getIdps() idplist = idps.getIdpsByCategory(exclude=('wayf', 'test')) return render_to_response("idp_list.html", {'idplist': idplist}, context_instance=RequestContext(request)) def sp_list(request): metadata = ShibbolethMetadata(settings.SHIB_METADATA) sps = metadata.getSps() splist = sps.getEntitiesByGroup() # splist_other = entities in the top group splist_other = [i[1] for i in splist if i[0] == 'http://www.grnet.gr/aai'][0] # fitlerids = entity.id for entities not in the top group filterids = [o['id'] for i in splist for o in i[1] if i[0] != 'http://www.grnet.gr/aai'] # filter out entities not in the top group from splist_other splist_other_new = filter(lambda x: x['id'] not in filterids, splist_other) # replace top group with filtered out version in splist splist.insert(splist.index(('http://www.grnet.gr/aai', splist_other)), ('other', splist_other_new)) splist.remove(('http://www.grnet.gr/aai', splist_other)) return render_to_response("sp_list.html", {'splist': splist}, context_instance=RequestContext(request)) def entity_list(request, group=None): if group is not None: group = "http://aai.grnet.gr%s" % request.path_info metadata = ShibbolethMetadata(settings.SHIB_METADATA) entities = metadata.getEntities(augmented=True) entlist = entities.getEntities(group=group, logosize=(100, 100)) return render_to_response("entity_list.html", {'entlist': entlist, 'group': group}, context_instance=RequestContext(request)) """ example support view uses urldecode from dnsutils and needs an idpmap from somewhere def support(request, mode="support"): # This gets triggered when a user's attributes fail to be accepted # by a service provider. The aim is to produce a help page, indicating # the user's home institution contact details. opts = {} userIdp = None # Check to see whether _redirect_user_idp is set. This cookie will include # The user's selected IdP if settings.IDP_COOKIE in request.COOKIES.keys(): userIdp = urldecode(request.COOKIES[settings.IDP_COOKIE]) elif settings.LAST_IDP_COOKIE in request.COOKIES.keys(): userIdp = urldecode(request.COOKIES[settings.LAST_IDP_COOKIE]) if userIdp: # Check to see if this is one of the old WAYF entries and map it to a # new entityID instead. if userIdp in idpmap.keys(): userIdp = idpmap[userIdp] # Get the corresponding IdentityProvider instance idp = ShibbolethMetadata(settings.SHIB_METADATA).getIdps()[userIdp] if idp: opts['idp'] = idp opts['idpname'] = idp.getName() if mode == "help": response = render_to_response("help.html", opts, context_instance=RequestContext(request)) else: response = render_to_response("support.html", opts, context_instance=RequestContext(request)) response['P3P'] = 'CP="NOI CUR DEVa OUR IND COM NAV PRE"' return response """ def setlanguage(request, lang): try: url = request.META['HTTP_REFERER'] except KeyError: url = '/' response = HttpResponseRedirect(url) response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang, domain=settings.COOKIE_DOMAIN, max_age=100 * 86400, expires=time.strftime("%a, %d-%m-%y %H:%M:%S GMT", time.gmtime(time.time() + 100 * 86400))) response['P3P'] = settings.P3P_HEADER return response
JensTimmerman/django-wayf
wayf/views.py
Python
gpl-3.0
9,435
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'G:\WorkDir\gas-sensing_resistors\SC_spectrum\SC_main.ui' # # Created: Wed Jan 20 20:49:15 2016 # by: PyQt4 UI code generator 4.11.3 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui from Rt_mplCanvas import Rt_CanvasWidget from SC_mplCanvas import SC_CanvasWidget try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_SC_APP(object): def setupUi(self, SC_APP): SC_APP.setObjectName(_fromUtf8("SC_APP")) SC_APP.resize(800, 600) SC_APP.setMinimumSize(QtCore.QSize(800, 600)) SC_APP.setMaximumSize(QtCore.QSize(800, 600)) font = QtGui.QFont() font.setPointSize(12) SC_APP.setFont(font) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/lmd.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) SC_APP.setWindowIcon(icon) self.verticalLayout_13 = QtGui.QVBoxLayout(SC_APP) self.verticalLayout_13.setObjectName(_fromUtf8("verticalLayout_13")) self.verticalLayout_12 = QtGui.QVBoxLayout() self.verticalLayout_12.setObjectName(_fromUtf8("verticalLayout_12")) self.horizontalLayout_15 = QtGui.QHBoxLayout() self.horizontalLayout_15.setObjectName(_fromUtf8("horizontalLayout_15")) self.verticalLayout_10 = QtGui.QVBoxLayout() self.verticalLayout_10.setObjectName(_fromUtf8("verticalLayout_10")) self.SC_MPLS = QtGui.QStackedWidget(SC_APP) self.SC_MPLS.setMinimumSize(QtCore.QSize(480, 320)) self.SC_MPLS.setMaximumSize(QtCore.QSize(480, 320)) font = QtGui.QFont() font.setPointSize(12) self.SC_MPLS.setFont(font) self.SC_MPLS.setObjectName(_fromUtf8("SC_MPLS")) self.Rt_MPL = Rt_CanvasWidget() self.Rt_MPL.setObjectName(_fromUtf8("Rt_MPL")) self.SC_MPLS.addWidget(self.Rt_MPL) self.SC_MPL = SC_CanvasWidget() self.SC_MPL.setObjectName(_fromUtf8("SC_MPL")) self.SC_MPLS.addWidget(self.SC_MPL) self.verticalLayout_10.addWidget(self.SC_MPLS) self.log_state = QtGui.QCheckBox(SC_APP) self.log_state.setObjectName(_fromUtf8("log_state")) self.verticalLayout_10.addWidget(self.log_state) self.groupBox_5 = QtGui.QGroupBox(SC_APP) self.groupBox_5.setObjectName(_fromUtf8("groupBox_5")) self.verticalLayout_8 = QtGui.QVBoxLayout(self.groupBox_5) self.verticalLayout_8.setObjectName(_fromUtf8("verticalLayout_8")) self.verticalLayout_7 = QtGui.QVBoxLayout() self.verticalLayout_7.setObjectName(_fromUtf8("verticalLayout_7")) self.horizontalLayout_19 = QtGui.QHBoxLayout() self.horizontalLayout_19.setObjectName(_fromUtf8("horizontalLayout_19")) self.horizontalLayout_12 = QtGui.QHBoxLayout() self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12")) self.label_18 = QtGui.QLabel(self.groupBox_5) self.label_18.setMinimumSize(QtCore.QSize(64, 32)) self.label_18.setMaximumSize(QtCore.QSize(64, 32)) self.label_18.setObjectName(_fromUtf8("label_18")) self.horizontalLayout_12.addWidget(self.label_18) self.run_time = QtGui.QLineEdit(self.groupBox_5) self.run_time.setMinimumSize(QtCore.QSize(113, 22)) self.run_time.setMaximumSize(QtCore.QSize(113, 22)) self.run_time.setReadOnly(True) self.run_time.setObjectName(_fromUtf8("run_time")) self.horizontalLayout_12.addWidget(self.run_time) self.label_5 = QtGui.QLabel(self.groupBox_5) self.label_5.setObjectName(_fromUtf8("label_5")) self.horizontalLayout_12.addWidget(self.label_5) self.horizontalLayout_19.addLayout(self.horizontalLayout_12) self.horizontalLayout_18 = QtGui.QHBoxLayout() self.horizontalLayout_18.setObjectName(_fromUtf8("horizontalLayout_18")) self.label_19 = QtGui.QLabel(self.groupBox_5) self.label_19.setMinimumSize(QtCore.QSize(56, 32)) self.label_19.setMaximumSize(QtCore.QSize(56, 32)) self.label_19.setObjectName(_fromUtf8("label_19")) self.horizontalLayout_18.addWidget(self.label_19) self.flow1 = QtGui.QLineEdit(self.groupBox_5) self.flow1.setMinimumSize(QtCore.QSize(113, 22)) self.flow1.setMaximumSize(QtCore.QSize(113, 22)) # self.flow1.setReadOnly(True) self.flow1.setObjectName(_fromUtf8("flow1")) self.horizontalLayout_18.addWidget(self.flow1) self.label_7 = QtGui.QLabel(self.groupBox_5) self.label_7.setMinimumSize(QtCore.QSize(48, 32)) self.label_7.setMaximumSize(QtCore.QSize(48, 32)) self.label_7.setObjectName(_fromUtf8("label_7")) self.horizontalLayout_18.addWidget(self.label_7) self.f1_open = QtGui.QCheckBox(self.groupBox_5) self.f1_open.setText(_fromUtf8("")) self.f1_open.setObjectName(_fromUtf8("f1_open")) self.horizontalLayout_18.addWidget(self.f1_open) self.horizontalLayout_19.addLayout(self.horizontalLayout_18) self.verticalLayout_7.addLayout(self.horizontalLayout_19) self.horizontalLayout_20 = QtGui.QHBoxLayout() self.horizontalLayout_20.setObjectName(_fromUtf8("horizontalLayout_20")) self.horizontalLayout_13 = QtGui.QHBoxLayout() self.horizontalLayout_13.setObjectName(_fromUtf8("horizontalLayout_13")) self.label_20 = QtGui.QLabel(self.groupBox_5) self.label_20.setMinimumSize(QtCore.QSize(64, 32)) self.label_20.setMaximumSize(QtCore.QSize(64, 32)) self.label_20.setObjectName(_fromUtf8("label_20")) self.horizontalLayout_13.addWidget(self.label_20) self.now_R = QtGui.QLineEdit(self.groupBox_5) self.now_R.setMinimumSize(QtCore.QSize(113, 22)) self.now_R.setMaximumSize(QtCore.QSize(113, 22)) self.now_R.setReadOnly(True) self.now_R.setObjectName(_fromUtf8("now_R")) self.horizontalLayout_13.addWidget(self.now_R) self.label_6 = QtGui.QLabel(self.groupBox_5) self.label_6.setObjectName(_fromUtf8("label_6")) self.horizontalLayout_13.addWidget(self.label_6) self.horizontalLayout_20.addLayout(self.horizontalLayout_13) self.horizontalLayout_17 = QtGui.QHBoxLayout() self.horizontalLayout_17.setObjectName(_fromUtf8("horizontalLayout_17")) self.label_26 = QtGui.QLabel(self.groupBox_5) self.label_26.setMinimumSize(QtCore.QSize(56, 32)) self.label_26.setMaximumSize(QtCore.QSize(56, 32)) self.label_26.setObjectName(_fromUtf8("label_26")) self.horizontalLayout_17.addWidget(self.label_26) self.flow2 = QtGui.QLineEdit(self.groupBox_5) self.flow2.setMinimumSize(QtCore.QSize(113, 22)) self.flow2.setMaximumSize(QtCore.QSize(113, 22)) # self.flow2.setReadOnly(True) self.flow2.setObjectName(_fromUtf8("flow2")) self.horizontalLayout_17.addWidget(self.flow2) self.label_8 = QtGui.QLabel(self.groupBox_5) self.label_8.setMinimumSize(QtCore.QSize(48, 32)) self.label_8.setMaximumSize(QtCore.QSize(48, 32)) self.label_8.setObjectName(_fromUtf8("label_8")) self.horizontalLayout_17.addWidget(self.label_8) self.f2_open = QtGui.QCheckBox(self.groupBox_5) self.f2_open.setText(_fromUtf8("")) self.f2_open.setObjectName(_fromUtf8("f2_open")) self.horizontalLayout_17.addWidget(self.f2_open) self.horizontalLayout_20.addLayout(self.horizontalLayout_17) self.verticalLayout_7.addLayout(self.horizontalLayout_20) self.horizontalLayout_21 = QtGui.QHBoxLayout() self.horizontalLayout_21.setObjectName(_fromUtf8("horizontalLayout_21")) self.horizontalLayout_14 = QtGui.QHBoxLayout() self.horizontalLayout_14.setObjectName(_fromUtf8("horizontalLayout_14")) self.label_27 = QtGui.QLabel(self.groupBox_5) self.label_27.setMinimumSize(QtCore.QSize(64, 32)) self.label_27.setMaximumSize(QtCore.QSize(64, 32)) self.label_27.setObjectName(_fromUtf8("label_27")) self.horizontalLayout_14.addWidget(self.label_27) self.now_T = QtGui.QLineEdit(self.groupBox_5) self.now_T.setMinimumSize(QtCore.QSize(113, 22)) self.now_T.setMaximumSize(QtCore.QSize(113, 22)) self.now_T.setReadOnly(True) self.now_T.setObjectName(_fromUtf8("now_T")) self.horizontalLayout_14.addWidget(self.now_T) self.label_4 = QtGui.QLabel(self.groupBox_5) self.label_4.setMinimumSize(QtCore.QSize(0, 16)) self.label_4.setMaximumSize(QtCore.QSize(32, 16)) self.label_4.setObjectName(_fromUtf8("label_4")) self.horizontalLayout_14.addWidget(self.label_4) self.horizontalLayout_21.addLayout(self.horizontalLayout_14) self.horizontalLayout_16 = QtGui.QHBoxLayout() self.horizontalLayout_16.setObjectName(_fromUtf8("horizontalLayout_16")) self.label_28 = QtGui.QLabel(self.groupBox_5) self.label_28.setMinimumSize(QtCore.QSize(56, 32)) self.label_28.setMaximumSize(QtCore.QSize(56, 32)) self.label_28.setObjectName(_fromUtf8("label_28")) self.horizontalLayout_16.addWidget(self.label_28) self.flow3 = QtGui.QLineEdit(self.groupBox_5) self.flow3.setMinimumSize(QtCore.QSize(113, 22)) self.flow3.setMaximumSize(QtCore.QSize(113, 22)) # self.flow3.setReadOnly(True) self.flow3.setObjectName(_fromUtf8("flow3")) self.horizontalLayout_16.addWidget(self.flow3) self.label_9 = QtGui.QLabel(self.groupBox_5) self.label_9.setMinimumSize(QtCore.QSize(48, 32)) self.label_9.setMaximumSize(QtCore.QSize(48, 32)) self.label_9.setObjectName(_fromUtf8("label_9")) self.horizontalLayout_16.addWidget(self.label_9) self.f3_open = QtGui.QCheckBox(self.groupBox_5) self.f3_open.setText(_fromUtf8("")) self.f3_open.setObjectName(_fromUtf8("f3_open")) self.horizontalLayout_16.addWidget(self.f3_open) self.horizontalLayout_21.addLayout(self.horizontalLayout_16) self.verticalLayout_7.addLayout(self.horizontalLayout_21) self.verticalLayout_8.addLayout(self.verticalLayout_7) self.verticalLayout_10.addWidget(self.groupBox_5) self.horizontalLayout_15.addLayout(self.verticalLayout_10) self.verticalLayout_5 = QtGui.QVBoxLayout() self.verticalLayout_5.setSpacing(20) self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5")) self.groupBox_15 = QtGui.QGroupBox(SC_APP) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.groupBox_15.sizePolicy().hasHeightForWidth()) self.groupBox_15.setSizePolicy(sizePolicy) self.groupBox_15.setMinimumSize(QtCore.QSize(281, 120)) self.groupBox_15.setMaximumSize(QtCore.QSize(281, 120)) font = QtGui.QFont() font.setPointSize(12) self.groupBox_15.setFont(font) self.groupBox_15.setObjectName(_fromUtf8("groupBox_15")) self.verticalLayout_9 = QtGui.QVBoxLayout(self.groupBox_15) self.verticalLayout_9.setSpacing(10) self.verticalLayout_9.setContentsMargins(10, 0, 10, 0) self.verticalLayout_9.setObjectName(_fromUtf8("verticalLayout_9")) self.verticalLayout_4 = QtGui.QVBoxLayout() self.verticalLayout_4.setSpacing(10) self.verticalLayout_4.setMargin(0) self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4")) self.horizontalLayout_32 = QtGui.QHBoxLayout() self.horizontalLayout_32.setObjectName(_fromUtf8("horizontalLayout_32")) self.label_16 = QtGui.QLabel(self.groupBox_15) self.label_16.setObjectName(_fromUtf8("label_16")) self.horizontalLayout_32.addWidget(self.label_16) self.sample_id = QtGui.QLineEdit(self.groupBox_15) self.sample_id.setObjectName(_fromUtf8("sample_id")) self.horizontalLayout_32.addWidget(self.sample_id) self.verticalLayout_4.addLayout(self.horizontalLayout_32) self.horizontalLayout_33 = QtGui.QHBoxLayout() self.horizontalLayout_33.setObjectName(_fromUtf8("horizontalLayout_33")) self.label_21 = QtGui.QLabel(self.groupBox_15) self.label_21.setObjectName(_fromUtf8("label_21")) self.horizontalLayout_33.addWidget(self.label_21) self.save_path = QtGui.QLineEdit(self.groupBox_15) self.save_path.setObjectName(_fromUtf8("save_path")) self.horizontalLayout_33.addWidget(self.save_path) self.btn_savepath = QtGui.QPushButton(self.groupBox_15) self.btn_savepath.setText(_fromUtf8("")) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/folder.ico")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btn_savepath.setIcon(icon1) self.btn_savepath.setIconSize(QtCore.QSize(16, 16)) self.btn_savepath.setObjectName(_fromUtf8("btn_savepath")) self.horizontalLayout_33.addWidget(self.btn_savepath) self.verticalLayout_4.addLayout(self.horizontalLayout_33) self.horizontalLayout_8 = QtGui.QHBoxLayout() self.horizontalLayout_8.setSpacing(10) self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8")) self.horizontalLayout_35 = QtGui.QHBoxLayout() self.horizontalLayout_35.setObjectName(_fromUtf8("horizontalLayout_35")) self.label_24 = QtGui.QLabel(self.groupBox_15) self.label_24.setMinimumSize(QtCore.QSize(36, 24)) self.label_24.setMaximumSize(QtCore.QSize(36, 24)) self.label_24.setObjectName(_fromUtf8("label_24")) self.horizontalLayout_35.addWidget(self.label_24) self.sample_area = QtGui.QLineEdit(self.groupBox_15) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.sample_area.sizePolicy().hasHeightForWidth()) self.sample_area.setSizePolicy(sizePolicy) self.sample_area.setMinimumSize(QtCore.QSize(40, 22)) self.sample_area.setMaximumSize(QtCore.QSize(40, 22)) self.sample_area.setText(_fromUtf8("")) self.sample_area.setObjectName(_fromUtf8("sample_area")) self.horizontalLayout_35.addWidget(self.sample_area) self.label_25 = QtGui.QLabel(self.groupBox_15) self.label_25.setMinimumSize(QtCore.QSize(32, 29)) self.label_25.setMaximumSize(QtCore.QSize(32, 29)) font = QtGui.QFont() font.setPointSize(12) self.label_25.setFont(font) self.label_25.setObjectName(_fromUtf8("label_25")) self.horizontalLayout_35.addWidget(self.label_25) self.horizontalLayout_8.addLayout(self.horizontalLayout_35) self.horizontalLayout_34 = QtGui.QHBoxLayout() self.horizontalLayout_34.setObjectName(_fromUtf8("horizontalLayout_34")) self.label_22 = QtGui.QLabel(self.groupBox_15) self.label_22.setMinimumSize(QtCore.QSize(36, 29)) self.label_22.setMaximumSize(QtCore.QSize(36, 29)) self.label_22.setObjectName(_fromUtf8("label_22")) self.horizontalLayout_34.addWidget(self.label_22) self.sample_height = QtGui.QLineEdit(self.groupBox_15) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.sample_height.sizePolicy().hasHeightForWidth()) self.sample_height.setSizePolicy(sizePolicy) self.sample_height.setMinimumSize(QtCore.QSize(40, 22)) self.sample_height.setMaximumSize(QtCore.QSize(40, 22)) self.sample_height.setText(_fromUtf8("")) self.sample_height.setObjectName(_fromUtf8("sample_height")) self.horizontalLayout_34.addWidget(self.sample_height) self.label_23 = QtGui.QLabel(self.groupBox_15) self.label_23.setMinimumSize(QtCore.QSize(23, 29)) self.label_23.setMaximumSize(QtCore.QSize(23, 29)) font = QtGui.QFont() font.setPointSize(12) self.label_23.setFont(font) self.label_23.setObjectName(_fromUtf8("label_23")) self.horizontalLayout_34.addWidget(self.label_23) self.horizontalLayout_8.addLayout(self.horizontalLayout_34) self.verticalLayout_4.addLayout(self.horizontalLayout_8) self.verticalLayout_9.addLayout(self.verticalLayout_4) self.verticalLayout_5.addWidget(self.groupBox_15) self.groupBox_2 = QtGui.QGroupBox(SC_APP) self.groupBox_2.setMinimumSize(QtCore.QSize(281, 131)) self.groupBox_2.setMaximumSize(QtCore.QSize(281, 131)) font = QtGui.QFont() font.setPointSize(12) self.groupBox_2.setFont(font) self.groupBox_2.setObjectName(_fromUtf8("groupBox_2")) self.verticalLayout_11 = QtGui.QVBoxLayout(self.groupBox_2) self.verticalLayout_11.setSpacing(10) self.verticalLayout_11.setMargin(10) self.verticalLayout_11.setObjectName(_fromUtf8("verticalLayout_11")) self.verticalLayout = QtGui.QVBoxLayout() self.verticalLayout.setSpacing(20) self.verticalLayout.setContentsMargins(0, 10, 0, 10) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setSpacing(20) self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.INST_SET = QtGui.QPushButton(self.groupBox_2) self.INST_SET.setObjectName(_fromUtf8("INST_SET")) self.horizontalLayout.addWidget(self.INST_SET) self.AI518P_SET = QtGui.QPushButton(self.groupBox_2) self.AI518P_SET.setObjectName(_fromUtf8("AI518P_SET")) self.horizontalLayout.addWidget(self.AI518P_SET) self.verticalLayout.addLayout(self.horizontalLayout) self.horizontalLayout_9 = QtGui.QHBoxLayout() self.horizontalLayout_9.setSpacing(20) self.horizontalLayout_9.setObjectName(_fromUtf8("horizontalLayout_9")) self.GAS_SET = QtGui.QPushButton(self.groupBox_2) self.GAS_SET.setObjectName(_fromUtf8("GAS_SET")) self.horizontalLayout_9.addWidget(self.GAS_SET) self.COORD_SET = QtGui.QPushButton(self.groupBox_2) self.COORD_SET.setObjectName(_fromUtf8("COORD_SET")) self.horizontalLayout_9.addWidget(self.COORD_SET) self.verticalLayout.addLayout(self.horizontalLayout_9) self.verticalLayout_11.addLayout(self.verticalLayout) self.verticalLayout_5.addWidget(self.groupBox_2) self.groupBox_4 = QtGui.QGroupBox(SC_APP) self.groupBox_4.setMinimumSize(QtCore.QSize(281, 111)) self.groupBox_4.setMaximumSize(QtCore.QSize(281, 111)) font = QtGui.QFont() font.setPointSize(12) self.groupBox_4.setFont(font) self.groupBox_4.setObjectName(_fromUtf8("groupBox_4")) self.verticalLayout_6 = QtGui.QVBoxLayout(self.groupBox_4) self.verticalLayout_6.setSpacing(0) self.verticalLayout_6.setMargin(10) self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6")) self.verticalLayout_3 = QtGui.QVBoxLayout() self.verticalLayout_3.setSpacing(10) self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3")) self.Rt_Curve = QtGui.QRadioButton(self.groupBox_4) self.Rt_Curve.setChecked(True) self.Rt_Curve.setObjectName(_fromUtf8("Rt_Curve")) self.verticalLayout_3.addWidget(self.Rt_Curve) self.SC_Curve = QtGui.QRadioButton(self.groupBox_4) self.SC_Curve.setObjectName(_fromUtf8("SC_Curve")) self.verticalLayout_3.addWidget(self.SC_Curve) self.verticalLayout_6.addLayout(self.verticalLayout_3) self.verticalLayout_5.addWidget(self.groupBox_4) self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2")) self.SC_start = QtGui.QPushButton(SC_APP) font = QtGui.QFont() font.setPointSize(12) self.SC_start.setFont(font) self.SC_start.setObjectName(_fromUtf8("SC_start")) self.horizontalLayout_2.addWidget(self.SC_start) self.SC_stop = QtGui.QPushButton(SC_APP) font = QtGui.QFont() font.setPointSize(12) self.SC_stop.setFont(font) self.SC_stop.setObjectName(_fromUtf8("SC_stop")) self.horizontalLayout_2.addWidget(self.SC_stop) self.SC_save = QtGui.QPushButton(SC_APP) font = QtGui.QFont() font.setPointSize(12) self.SC_save.setFont(font) self.SC_save.setObjectName(_fromUtf8("SC_save")) self.horizontalLayout_2.addWidget(self.SC_save) self.verticalLayout_5.addLayout(self.horizontalLayout_2) self.horizontalLayout_15.addLayout(self.verticalLayout_5) self.verticalLayout_12.addLayout(self.horizontalLayout_15) self.groupBox_3 = QtGui.QGroupBox(SC_APP) self.groupBox_3.setMinimumSize(QtCore.QSize(780, 61)) self.groupBox_3.setMaximumSize(QtCore.QSize(780, 61)) font = QtGui.QFont() font.setPointSize(12) self.groupBox_3.setFont(font) self.groupBox_3.setObjectName(_fromUtf8("groupBox_3")) self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_3) self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2")) self.horizontalLayout_11 = QtGui.QHBoxLayout() self.horizontalLayout_11.setObjectName(_fromUtf8("horizontalLayout_11")) self.horizontalLayout_4 = QtGui.QHBoxLayout() self.horizontalLayout_4.setSpacing(0) self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4")) self.inst_sta = QtGui.QLabel(self.groupBox_3) self.inst_sta.setText(_fromUtf8("")) self.inst_sta.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/noyb.png"))) self.inst_sta.setObjectName(_fromUtf8("inst_sta")) self.horizontalLayout_4.addWidget(self.inst_sta) self.pcb_sta = QtGui.QLabel(self.groupBox_3) self.pcb_sta.setText(_fromUtf8("")) self.pcb_sta.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/nodlb.png"))) self.pcb_sta.setObjectName(_fromUtf8("pcb_sta")) self.horizontalLayout_4.addWidget(self.pcb_sta) self.ai518_sta = QtGui.QLabel(self.groupBox_3) self.ai518_sta.setText(_fromUtf8("")) self.ai518_sta.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/nowky.png"))) self.ai518_sta.setObjectName(_fromUtf8("ai518_sta")) self.horizontalLayout_4.addWidget(self.ai518_sta) self.horizontalLayout_11.addLayout(self.horizontalLayout_4) self.sys_state = QtGui.QLineEdit(self.groupBox_3) self.sys_state.setEnabled(False) self.sys_state.setObjectName(_fromUtf8("sys_state")) self.horizontalLayout_11.addWidget(self.sys_state) self.horizontalLayout_10 = QtGui.QHBoxLayout() self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10")) self.horizontalLayout_5 = QtGui.QHBoxLayout() self.horizontalLayout_5.setSpacing(0) self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5")) self.label_14 = QtGui.QLabel(self.groupBox_3) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.label_14.setFont(font) self.label_14.setObjectName(_fromUtf8("label_14")) self.horizontalLayout_5.addWidget(self.label_14) self.valve1_sta = QtGui.QLabel(self.groupBox_3) self.valve1_sta.setText(_fromUtf8("")) self.valve1_sta.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/guan.png"))) self.valve1_sta.setObjectName(_fromUtf8("valve1_sta")) self.horizontalLayout_5.addWidget(self.valve1_sta) self.horizontalLayout_10.addLayout(self.horizontalLayout_5) self.horizontalLayout_3 = QtGui.QHBoxLayout() self.horizontalLayout_3.setSpacing(0) self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3")) self.label_13 = QtGui.QLabel(self.groupBox_3) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.label_13.setFont(font) self.label_13.setObjectName(_fromUtf8("label_13")) self.horizontalLayout_3.addWidget(self.label_13) self.valve2_sta = QtGui.QLabel(self.groupBox_3) self.valve2_sta.setText(_fromUtf8("")) self.valve2_sta.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/guan.png"))) self.valve2_sta.setObjectName(_fromUtf8("valve2_sta")) self.horizontalLayout_3.addWidget(self.valve2_sta) self.horizontalLayout_10.addLayout(self.horizontalLayout_3) self.horizontalLayout_6 = QtGui.QHBoxLayout() self.horizontalLayout_6.setSpacing(0) self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6")) self.label_15 = QtGui.QLabel(self.groupBox_3) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.label_15.setFont(font) self.label_15.setObjectName(_fromUtf8("label_15")) self.horizontalLayout_6.addWidget(self.label_15) self.valve3_sta = QtGui.QLabel(self.groupBox_3) self.valve3_sta.setText(_fromUtf8("")) self.valve3_sta.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/guan.png"))) self.valve3_sta.setObjectName(_fromUtf8("valve3_sta")) self.horizontalLayout_6.addWidget(self.valve3_sta) self.horizontalLayout_10.addLayout(self.horizontalLayout_6) self.horizontalLayout_7 = QtGui.QHBoxLayout() self.horizontalLayout_7.setSpacing(0) self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7")) self.label_17 = QtGui.QLabel(self.groupBox_3) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.label_17.setFont(font) self.label_17.setObjectName(_fromUtf8("label_17")) self.horizontalLayout_7.addWidget(self.label_17) self.clean_sta = QtGui.QLabel(self.groupBox_3) self.clean_sta.setText(_fromUtf8("")) self.clean_sta.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/guan.png"))) self.clean_sta.setObjectName(_fromUtf8("clean_sta")) self.horizontalLayout_7.addWidget(self.clean_sta) self.horizontalLayout_10.addLayout(self.horizontalLayout_7) self.horizontalLayout_11.addLayout(self.horizontalLayout_10) self.label = QtGui.QLabel(self.groupBox_3) self.label.setText(_fromUtf8("")) self.label.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/partulab.png"))) self.label.setObjectName(_fromUtf8("label")) self.horizontalLayout_11.addWidget(self.label) self.verticalLayout_2.addLayout(self.horizontalLayout_11) self.verticalLayout_12.addWidget(self.groupBox_3) self.verticalLayout_13.addLayout(self.verticalLayout_12) self.AI518P_SET.setEnabled(False) self.retranslateUi(SC_APP) self.SC_MPLS.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(SC_APP) def retranslateUi(self, SC_APP): SC_APP.setWindowTitle(_translate("SC_APP", "灵敏度-浓度谱", None)) self.log_state.setText(_translate("SC_APP", "log", None)) self.groupBox_5.setTitle(_translate("SC_APP", "测量参数", None)) self.label_18.setText(_translate("SC_APP", "测量时间", None)) self.label_5.setText(_translate("SC_APP", "S", None)) self.label_19.setText(_translate("SC_APP", "流量计1", None)) self.label_7.setText(_translate("SC_APP", "mL/min", None)) self.label_20.setText(_translate("SC_APP", "当前阻值", None)) self.label_6.setText(_translate("SC_APP", "Ω", None)) self.label_26.setText(_translate("SC_APP", "流量计2", None)) self.label_8.setText(_translate("SC_APP", "mL/min", None)) self.label_27.setText(_translate("SC_APP", "当前温度", None)) self.label_4.setText(_translate("SC_APP", "℃", None)) self.label_28.setText(_translate("SC_APP", "流量计3", None)) self.label_9.setText(_translate("SC_APP", "mL/min", None)) self.groupBox_15.setTitle(_translate("SC_APP", "样品信息", None)) self.label_16.setText(_translate("SC_APP", "样品标识", None)) self.sample_id.setText(_translate("SC_APP", "SC_test", None)) self.label_21.setText(_translate("SC_APP", "保存路径", None)) self.save_path.setText(_translate("SC_APP", "D:/", None)) self.label_24.setText(_translate("SC_APP", "面积", None)) self.label_25.setText(_translate("SC_APP", "mm^2", None)) self.label_22.setText(_translate("SC_APP", "厚度", None)) self.label_23.setText(_translate("SC_APP", "mm", None)) self.groupBox_2.setTitle(_translate("SC_APP", "参数设置", None)) self.INST_SET.setText(_translate("SC_APP", "仪器设置", None)) self.AI518P_SET.setText(_translate("SC_APP", "温度设置", None)) self.GAS_SET.setText(_translate("SC_APP", "气压控制", None)) self.COORD_SET.setText(_translate("SC_APP", "XY坐标设置", None)) self.groupBox_4.setTitle(_translate("SC_APP", "曲线选择", None)) self.Rt_Curve.setText(_translate("SC_APP", "R-t曲线", None)) self.SC_Curve.setText(_translate("SC_APP", "S-C曲线", None)) self.SC_start.setText(_translate("SC_APP", "开始测量", None)) self.SC_stop.setText(_translate("SC_APP", "停止测量", None)) self.SC_save.setText(_translate("SC_APP", "保存数据", None)) self.groupBox_3.setTitle(_translate("SC_APP", "当前状态", None)) self.label_14.setText(_translate("SC_APP", "阀门1", None)) self.label_13.setText(_translate("SC_APP", "阀门2", None)) self.label_15.setText(_translate("SC_APP", "阀门3", None)) self.label_17.setText(_translate("SC_APP", "清洗阀", None)) import mypic_rc if __name__ == "__main__": import sys app = QtGui.QApplication(sys.argv) SC_APP = QtGui.QDialog() ui = Ui_SC_APP() ui.setupUi(SC_APP) SC_APP.show() sys.exit(app.exec_())
cygnushan/measurement
SC_spectrum/Ui_SC_main.py
Python
mit
31,084
# pylint: disable-msg=C0111,C0103 import unittest from traits.api import Event from openmdao.main.api import Assembly, Component, Driver, set_as_top, VariableTree from openmdao.main.container import _get_entry_group from openmdao.main.datatypes.api import Float, Int, VarTree from openmdao.main.driver import GradientOptions from openmdao.main.test.test_derivatives import SimpleDriver class EventComp(Component): doit = Event() def __init__(self): super(EventComp, self).__init__() self.num_doits = 0 def _doit_fired(self): self.num_doits += 1 def execute(self): pass class A(Component): a = GradientOptions() class DriverTestCase(unittest.TestCase): def setUp(self): top = self.asm = set_as_top(Assembly()) top.add('evcomp', EventComp()) # driver process definition top.driver.workflow.add('evcomp') def test_add_event(self): for i in range(3): self.asm.run() self.assertEqual(self.asm.evcomp.exec_count, i+1) self.assertEqual(self.asm.evcomp.num_doits, 0) self.asm.driver.add_event('evcomp.doit') for i in range(3): self.asm.run() self.assertEqual(self.asm.evcomp.exec_count, i+4) self.assertEqual(self.asm.evcomp.num_doits, i+1) def test_get_entry_group(self): self.assertEqual(_get_entry_group(Driver()), 'openmdao.driver') def test_gradient_options(self): options = GradientOptions() assert(options.get_metadata("directional_fd")["framework_var"]) assert(options.get_metadata("derivative_direction")["framework_var"]) assert(options.get_metadata("fd_form")["framework_var"]) assert(options.get_metadata("fd_step")["framework_var"]) assert(options.get_metadata("fd_step_type")["framework_var"]) assert(options.get_metadata("force_fd")["framework_var"]) assert(options.get_metadata("lin_solver")["framework_var"]) assert(options.get_metadata("atol")["framework_var"]) assert(options.get_metadata("rtol")["framework_var"]) assert(options.get_metadata("maxiter")["framework_var"]) assert(Driver().get_metadata("gradient_options")["framework_var"]) class DriverTestCase2(unittest.TestCase): def test_get_req_compnames_vartree_param_obj(self): # Tests a fix for a bug reported by Rick Damiani class PileGeoInputs(VariableTree): """Basic Geometric Inputs need to build Legs of Jacket""" Lp = Float( units='m', desc='Pile Embedment Length.') class MyComp(Component): x = Float(0.0, iotype='in') y = Float(0.0, iotype='in') def execute(self): self.y = 2.0*self.x class Top(Assembly): Pileinputs = VarTree(PileGeoInputs(), iotype='in', desc="Pile Input Data") SPIstiffness = VarTree(PileGeoInputs(), iotype='out', desc="Pile Input Data") def configure(self): self.connect('Pileinputs.Lp', 'SPIstiffness.Lp') self.disconnect('Pileinputs.Lp', 'SPIstiffness.Lp') self.add('comp', MyComp()) self.driver.workflow.add('comp') self.connect('Pileinputs.Lp', 'comp.x') top = set_as_top(Top()) top.replace('driver', SimpleDriver()) top.driver.add_parameter('Pileinputs.Lp', low=-100, high=100) top.driver.add_objective('SPIstiffness.Lp + comp.y') top._setup() comps = top.driver._get_required_compnames() self.assertTrue(len(comps) == 2) self.assertTrue('comp' in comps) if __name__ == "__main__": unittest.main()
HyperloopTeam/FullOpenMDAO
lib/python2.7/site-packages/openmdao.main-0.13.0-py2.7.egg/openmdao/main/test/test_driver.py
Python
gpl-2.0
3,731
import urllib.parse import sys from ccs import core from ccs import constants from . import response def ticker(): s = __name__.split(".")[1] r = sys._getframe().f_code.co_name # complete request cr = core.request(s, r) return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s)) def trades(pair): s = __name__.split(".")[1] r = sys._getframe().f_code.co_name params = {} params["pair"] = pair # complete request cr = core.request(s, r) + urllib.parse.urlencode(params) return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s)) def order_book(pair, limit=None): s = __name__.split(".")[1] r = sys._getframe().f_code.co_name params = {} params["pair"] = pair if limit: params["limit"] = limit # complete request cr = core.request(s, r) + urllib.parse.urlencode(params) return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s)) def pair_settings(): s = __name__.split(".")[1] r = sys._getframe().f_code.co_name # complete request cr = core.request(s, r) return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s)) def currency(): s = __name__.split(".")[1] r = sys._getframe().f_code.co_name # complete request cr = core.request(s, r) return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s))
Honzin/ccs
dev/exmo/public/__init__.py
Python
agpl-3.0
1,507
import logging import os import pickle import subprocess import sys import uuid from datetime import datetime from mwclient import Site from PythonConfluenceAPI import ConfluenceAPI import Configuration from CustomModules.Mechanics import ContributionComparator, ExclusionsDict, MysqlConnector from CustomModules.SQL_Connector import SQLConnector import CustomModules.Mechanics as Mechanics from Server.ServerLogic import xwd_fullname_to_link as xwd_fullname_to_link from Server.ServerLogic import start_core_as_subprocess as start_core_as_subprocess GlobalStartTime = datetime.now() def initialize(logging_mode: str = 'DEBUG', log_to_file: bool = True): ################################################################################################################### # Contrib_Compare_inst # # Main instance, used to analyze pages and create page contribution maps based on the content, # # collected from one of supported platforms # # Mysql_Connector_inst # # Here used to connect to xWIKI DB to get a list of pages from a requested space # # ConfluenceAPI_inst # # ConfluenceAPI_inst - same for Confluence # # SQL_Connector_inst # # Used to store\load page contribution maps in\from SQL # # Page_Creator_inst # # Creates PAGE objects - data handlers for currently analyzed page # ################################################################################################################### formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') logger_inst = logging.getLogger() logger_inst.setLevel(logging_mode) Integration_config = Configuration.Integration() if log_to_file is True: log_name = Integration_config.log_location + "Comparer_task_builder_v1.0_" + str(datetime.now().strftime("%Y-%m-%d_%H_%M_%S", )) + '.log' fh = logging.FileHandler(log_name) fh.setLevel(logging_mode) fh.setFormatter(formatter) logger_inst.addHandler(fh) ch = logging.StreamHandler() ch.setLevel(logging_mode) ch.setFormatter(formatter) logger_inst.addHandler(ch) contrib_compare_inst = ContributionComparator() SQL_config_inst = Configuration.SQLConfig() confluence_config_inst = Configuration.ConfluenceConfig() try: media_w_i_k_i__config_inst = Configuration.MediaWIKIConfig() media_wiki_api_inst = Site((media_w_i_k_i__config_inst.Protocol, media_w_i_k_i__config_inst.URL), path=media_w_i_k_i__config_inst.APIPath, clients_useragent=media_w_i_k_i__config_inst.UserAgent,retry_timeout=1, max_retries=1) except Exception: media_wiki_api_inst = None xWiki_Config_inst = Configuration.XWikiConfig(['Migration pool', 'Sandbox', 'Main', 'StagingWiki']) MySQL_Config_inst = Configuration.MySQLConfig() Mysql_connector_inst = MysqlConnector(MySQL_Config_inst) SQL_connector_inst = SQLConnector(SQL_config_inst) # getting all pages in Confluence: confluenceAPI_inst = ConfluenceAPI(confluence_config_inst.USER, confluence_config_inst.PASS, confluence_config_inst.ULR) return contrib_compare_inst, Mysql_connector_inst, confluenceAPI_inst, SQL_connector_inst, logger_inst, media_wiki_api_inst Contrib_Compare_inst, Mysql_Connector_inst, ConfluenceAPI_inst, SQL_Connector_inst, Logger, MediaWIKI_api_inst = initialize('INFO', False) Logger.info('Initialization finished, job started at ' + str(GlobalStartTime)) # Task: # Confluence: VB (Veeam B&R Basic knowledge), WB (Veeam Support Hints and Tricks), GZ (Ground Zero) # MediaWIKI: just all # xWIKI: ['Blog', 'Main', 'Sandbox', 'XWiki'] Task = { # 'VB': 'Confluence', # 'WB': 'Confluence', # 'GZ': 'Confluence', # 'ALL mWIKI': 'MediaWIKI' #'Main': 'xWIKI', 'Main': 'xWIKI', # 'Migration pool': 'xWIKI', # 'Migrated bugs': 'xWIKI' 'StagingWiki': 'xWIKI' #'Main': 'xWIKI' } TaskExclusions = ExclusionsDict() TaskExclusions['Confluence'] = 'List of all KBs' TaskExclusions['MediaWIKI'] = 'Found Bugs' TaskExclusions['MediaWIKI'] = 'Registry values B&R' TaskExclusions['MediaWIKI'] = 'Veeam ONE Registry Keys' TaskExclusions['MediaWIKI'] = 'Patches and fixes for B&R' TaskExclusions['MediaWIKI'] = 'Bug%' TaskExclusions['MediaWIKI'] = 'BUG%' TaskExclusions['MediaWIKI'] = 'bug%' TaskExclusions['MediaWIKI'] = 'Case Handling' TaskExclusions['MediaWIKI'] = 'Team Members' TaskExclusions['xWIKI'] = 'Main.WebHome' TaskExclusions['xWIKI'] = 'StagingWiki.WebHome' TaskExclusions['xWIKI'] = 'StagingWiki.Personal Spaces%' TaskExclusions['xWIKI'] = 'Main.Internal Technical Docs.Veeam ONE.Veeam-One\:-Database%' TaskExclusions['xWIKI'] = 'Internal Technical Docs.Veeam ONE.FAQ%' select = None #select = "select page_id from [dbo].[KnownPages] inner join [dbo].[KnownBugs] on [dbo].[KnownPages].id =[dbo].[KnownBugs].KnownPages_id where [dbo].[KnownPages].id in(select KnownPages_id from [dbo].[KnownBugs] where id not in (select [KnownBug_ID] FROM [Karma].[dbo].[KnownBugs_TFS_state]) and bug_id != '0')" def build_task_array(task_dict: dict, task_exclusions_dict: Mechanics.ExclusionsDict, logger): global task_pages_dict, platform task_pages_dict = {} total_size = 0 for space, platform in task_dict.items(): if platform.lower() == 'confluence': respond = ConfluenceAPI_inst.get_content('page', space, None, 'current', None, None, 0, 500) size = respond['size'] total_size += size logger.info(str(size) + ' Confluence pages were found in space ' + space) try: confluence_pages_from_api = respond['results'] except Exception: logger.error('Unable to get Confluence pages from API, aborting this space') continue for page in confluence_pages_from_api: if task_exclusions_dict[platform] is not None: if not Mechanics.check_exclusions(page['title'], platform, task_exclusions_dict): continue else: task_pages_dict.update({page['title']: platform}) size += 1 else: task_pages_dict.update({page['title']: platform}) size += 1 if platform.lower() == 'mediawiki': if MediaWIKI_api_inst is not None: size = 0 for page in MediaWIKI_api_inst.allpages(): if task_exclusions_dict[platform] is not None: if not Mechanics.check_exclusions(page.name, platform, task_exclusions_dict): logger.debug(page.name + ' was excluded') continue else: task_pages_dict.update({page.name: platform}) size += 1 else: task_pages_dict.update({page.name: platform}) size += 1 logger.info(str(size) + ' MediaWIKI pages were found in space "' + space + '"') total_size += size else: logger.critical('Unable to connect to mWiki, skipping') if platform.lower() == 'xwiki': size = 0 logger.debug('Looking for pages in the following xWIKI space: "' + space + '"') for page in Mysql_Connector_inst.get_XWD_FULLNAMEs(space): if task_exclusions_dict[platform] is not None: if not Mechanics.check_exclusions(page, platform, task_exclusions_dict): logger.debug(page + ' was excluded') continue else: task_pages_dict.update({page: platform}) size += 1 else: task_pages_dict.update({page: platform}) size += 1 logger.info(str(size) + ' xWIKI pages were found in space "' + space + '"') total_size += size TaskStartTime = datetime.now() logger.info(str(total_size) + ' pages were found in all spaces') return task_pages_dict, TaskStartTime def build_task_array_by_sql_select(select: str, logger, SQL_Connector_inst): task_pages_dict = {} TaskStartTime = datetime.now() pages = SQL_Connector_inst.select_custom_select(select) for page in pages: page = page[0] if str(page).startswith('xwiki:'): page = str(page).replace('xwiki:', '') task_pages_dict.update({page: 'xwiki'}) total_size = len(task_pages_dict) logger.info(str(total_size) + ' pages were found in all spaces') return task_pages_dict, TaskStartTime if select is None: task_pages_dict, TaskStartTime = build_task_array(task_dict=Task, task_exclusions_dict=TaskExclusions, logger=Logger) else: task_pages_dict, TaskStartTime = build_task_array_by_sql_select(select=select, logger=Logger, SQL_Connector_inst=SQL_Connector_inst) # starting main process Logger.info('Re-indexing started') for xwd_fullname, platform in task_pages_dict.items(): try: dict_to_pickle = { xwd_fullname: platform } Logger.info('Re-indexing of "' + xwd_fullname + '" platform: ' + platform + ' started') link = xwd_fullname_to_link(xwd_fullname) token_id = SQL_Connector_inst.insert_into_dbo_webrequests_reindex_page_by_xwd_fullname(xwd_fullname, link) Logger.info('Starting CC_core') start_core_as_subprocess(dict_to_pickle, token_id, 'DEBUG') except: # all unhandled exceptions error = sys.exc_info()[0] Logger.error('Re-indexing unexpectedly failed with: ' + str(error)) Logger.info('Re-indexing finished')
daymer/xWIKI_Karma
Comparer_task_builder.py
Python
apache-2.0
10,592
#! /usr/bin/env python # -*- coding=utf-8 -*- def addDigits(num): while(1): sum= 0 for i in xrange(len(str(num))): sum+= int(str(num)[i]) if len(str(sum))== 1: return sum else: num= sum
ccqpein/Arithmetic-Exercises
Add-Digits/add_digits.py
Python
apache-2.0
260
# -*- coding: utf-8 -*- import importlib import io import json import os import kolibri EXTERNAL_PLUGINS_PREFIX = "kolibri_" def is_external_plugin(appname): """ Returns true when the given app is an external plugin. Implementation note: does a simple check on the name to see if it's prefixed with "kolibri_". If so, we think it's a plugin. """ return appname.startswith(EXTERNAL_PLUGINS_PREFIX) def get_installed_app_locale_path(appname): """ Load the app given by appname and return its locale folder path, if it exists. Note that the module is imported to determine its location. """ m = importlib.import_module(appname) module_path = os.path.dirname(m.__file__) module_locale_path = os.path.join(module_path, "locale") if os.path.isdir(module_locale_path): return module_locale_path def _get_supported_language_info(): file_path = os.path.join( os.path.dirname(kolibri.__file__), "locale", "supported_languages.json" ) with io.open(file_path, encoding="utf-8") as f: return json.load(f) # Kolibri format KOLIBRI_SUPPORTED_LANGUAGES = _get_supported_language_info()
lyw07/kolibri
kolibri/utils/i18n.py
Python
mit
1,177
from itertools import combinations import pytest import networkx as nx from networkx.algorithms.flow import boykov_kolmogorov from networkx.algorithms.flow import edmonds_karp from networkx.algorithms.flow import preflow_push from networkx.algorithms.flow import shortest_augmenting_path from networkx.algorithms.flow import dinitz flow_funcs = [ boykov_kolmogorov, dinitz, edmonds_karp, preflow_push, shortest_augmenting_path, ] class TestGomoryHuTree: def minimum_edge_weight(self, T, u, v): path = nx.shortest_path(T, u, v, weight='weight') return min((T[u][v]['weight'], (u, v)) for (u, v) in zip(path, path[1:])) def compute_cutset(self, G, T_orig, edge): T = T_orig.copy() T.remove_edge(*edge) U, V = list(nx.connected_components(T)) cutset = set() for x, nbrs in ((n, G[n]) for n in U): cutset.update((x, y) for y in nbrs if y in V) return cutset def test_default_flow_function_karate_club_graph(self): G = nx.karate_club_graph() nx.set_edge_attributes(G, 1, 'capacity') T = nx.gomory_hu_tree(G) assert nx.is_tree(T) for u, v in combinations(G, 2): cut_value, edge = self.minimum_edge_weight(T, u, v) assert (nx.minimum_cut_value(G, u, v) == cut_value) def test_karate_club_graph(self): G = nx.karate_club_graph() nx.set_edge_attributes(G, 1, 'capacity') for flow_func in flow_funcs: T = nx.gomory_hu_tree(G, flow_func=flow_func) assert nx.is_tree(T) for u, v in combinations(G, 2): cut_value, edge = self.minimum_edge_weight(T, u, v) assert (nx.minimum_cut_value(G, u, v) == cut_value) def test_davis_southern_women_graph(self): G = nx.davis_southern_women_graph() nx.set_edge_attributes(G, 1, 'capacity') for flow_func in flow_funcs: T = nx.gomory_hu_tree(G, flow_func=flow_func) assert nx.is_tree(T) for u, v in combinations(G, 2): cut_value, edge = self.minimum_edge_weight(T, u, v) assert (nx.minimum_cut_value(G, u, v) == cut_value) def test_florentine_families_graph(self): G = nx.florentine_families_graph() nx.set_edge_attributes(G, 1, 'capacity') for flow_func in flow_funcs: T = nx.gomory_hu_tree(G, flow_func=flow_func) assert nx.is_tree(T) for u, v in combinations(G, 2): cut_value, edge = self.minimum_edge_weight(T, u, v) assert (nx.minimum_cut_value(G, u, v) == cut_value) def test_les_miserables_graph_cutset(self): G = nx.les_miserables_graph() nx.set_edge_attributes(G, 1, 'capacity') for flow_func in flow_funcs: T = nx.gomory_hu_tree(G, flow_func=flow_func) assert nx.is_tree(T) for u, v in combinations(G, 2): cut_value, edge = self.minimum_edge_weight(T, u, v) assert (nx.minimum_cut_value(G, u, v) == cut_value) def test_karate_club_graph_cutset(self): G = nx.karate_club_graph() nx.set_edge_attributes(G, 1, 'capacity') T = nx.gomory_hu_tree(G) assert nx.is_tree(T) u, v = 0, 33 cut_value, edge = self.minimum_edge_weight(T, u, v) cutset = self.compute_cutset(G, T, edge) assert cut_value == len(cutset) def test_wikipedia_example(self): # Example from https://en.wikipedia.org/wiki/Gomory%E2%80%93Hu_tree G = nx.Graph() G.add_weighted_edges_from(( (0, 1, 1), (0, 2, 7), (1, 2, 1), (1, 3, 3), (1, 4, 2), (2, 4, 4), (3, 4, 1), (3, 5, 6), (4, 5, 2), )) for flow_func in flow_funcs: T = nx.gomory_hu_tree(G, capacity='weight', flow_func=flow_func) assert nx.is_tree(T) for u, v in combinations(G, 2): cut_value, edge = self.minimum_edge_weight(T, u, v) assert (nx.minimum_cut_value(G, u, v, capacity='weight') == cut_value) def test_directed_raises(self): with pytest.raises(nx.NetworkXNotImplemented): G = nx.DiGraph() T = nx.gomory_hu_tree(G) def test_empty_raises(self): with pytest.raises(nx.NetworkXError): G = nx.empty_graph() T = nx.gomory_hu_tree(G)
sserrot/champion_relationships
venv/Lib/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py
Python
mit
4,616
#!/usr/bin/env python from htcondor_dag import Dag, autorun # Two jobs, each writes text to its output file def print_sum(a, b): print a + b autorun(report_hostname=True) dag = Dag('htcondor_ex1') dag.defer(print_sum, output="res1.txt")(1, 2) dag.defer(print_sum, output="res2.txt")(3, 4) dag.write()
candlerb/htcondor_dag.py
examples/htcondor_ex1.py
Python
gpl-2.0
312
#!/usr/bin/env python import os import sys import unittest sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "scripts"))) from topic_compare import ROSTopicCompare import rospy import time def eps_equal(a, b, err=0.001): return abs(a - b) < err # subscribing three topics # * /origin # * /origin (the same topic) # * /half class TestTopicCompare(unittest.TestCase): def test_same_topic(self): while not tc.isAllTopicAvailable(20): rospy.sleep(1) print tc.getTotalBytes(0) / (tc.getEndTime(0) - tc.getStartTime(0)) print tc.getTotalBytes(1) / (tc.getEndTime(1) - tc.getStartTime(1)) self.assertTrue(eps_equal(tc.getBandwidth(0), tc.getBandwidth(1), 20)) def test_half_topic(self): while not tc.isAllTopicAvailable(20): rospy.sleep(1) print tc.getTotalBytes(0) / (tc.getEndTime(0) - tc.getStartTime(0)) print tc.getTotalBytes(2) / (tc.getEndTime(2) - tc.getStartTime(2)) self.assertTrue(eps_equal(tc.getBandwidth(0), 2 * tc.getBandwidth(2), 20)) if __name__ == "__main__": import rostest rospy.init_node("test_topic_compare") tc = ROSTopicCompare() tc.registerTopic("/origin") tc.registerTopic("/origin") tc.registerTopic("/half") rostest.rosrun("jsk_topic_tools", "test_topic_compare", TestTopicCompare)
AtsushiSakai/jsk_visualization_packages
jsk_topic_tools/test/test_topic_compare.py
Python
mit
1,425
from __future__ import absolute_import, unicode_literals import io import itertools import logging import os.path import re from mopidy import compat from mopidy.compat import configparser from mopidy.config import keyring from mopidy.config.schemas import * # noqa from mopidy.config.types import * # noqa from mopidy.internal import path, versioning logger = logging.getLogger(__name__) _core_schema = ConfigSchema('core') # MPD supports at most 10k tracks, some clients segfault when this is exceeded. _core_schema['max_tracklist_length'] = Integer(minimum=1, maximum=10000) _logging_schema = ConfigSchema('logging') _logging_schema['color'] = Boolean() _logging_schema['console_format'] = String() _logging_schema['debug_format'] = String() _logging_schema['debug_file'] = Path() _logging_schema['config_file'] = Path(optional=True) _loglevels_schema = MapConfigSchema('loglevels', LogLevel()) _logcolors_schema = MapConfigSchema('logcolors', LogColor()) _audio_schema = ConfigSchema('audio') _audio_schema['mixer'] = String() _audio_schema['mixer_track'] = Deprecated() _audio_schema['mixer_volume'] = Integer(optional=True, minimum=0, maximum=100) _audio_schema['output'] = String() _audio_schema['visualizer'] = Deprecated() _proxy_schema = ConfigSchema('proxy') _proxy_schema['scheme'] = String(optional=True, choices=['http', 'https', 'socks4', 'socks5']) _proxy_schema['hostname'] = Hostname(optional=True) _proxy_schema['port'] = Port(optional=True) _proxy_schema['username'] = String(optional=True) _proxy_schema['password'] = Secret(optional=True) # NOTE: if multiple outputs ever comes something like LogLevelConfigSchema # _outputs_schema = config.AudioOutputConfigSchema() _schemas = [ _core_schema, _logging_schema, _loglevels_schema, _logcolors_schema, _audio_schema, _proxy_schema] _INITIAL_HELP = """ # For further information about options in this file see: # http://docs.mopidy.com/ # # The initial commented out values reflect the defaults as of: # %(versions)s # # Available options and defaults might have changed since then, # run `mopidy config` to see the current effective config and # `mopidy --version` to check the current version. """ def read(config_file): """Helper to load config defaults in same way across core and extensions""" with io.open(config_file, 'rb') as filehandle: return filehandle.read() def load(files, ext_schemas, ext_defaults, overrides): config_dir = os.path.dirname(__file__) defaults = [read(os.path.join(config_dir, 'default.conf'))] defaults.extend(ext_defaults) raw_config = _load(files, defaults, keyring.fetch() + (overrides or [])) schemas = _schemas[:] schemas.extend(ext_schemas) return _validate(raw_config, schemas) def format(config, ext_schemas, comments=None, display=True): schemas = _schemas[:] schemas.extend(ext_schemas) return _format(config, comments or {}, schemas, display, False) def format_initial(extensions): config_dir = os.path.dirname(__file__) defaults = [read(os.path.join(config_dir, 'default.conf'))] defaults.extend(e.get_default_config() for e in extensions) raw_config = _load([], defaults, []) schemas = _schemas[:] schemas.extend(e.get_config_schema() for e in extensions) config, errors = _validate(raw_config, schemas) versions = ['Mopidy %s' % versioning.get_version()] for extension in sorted(extensions, key=lambda ext: ext.dist_name): versions.append('%s %s' % (extension.dist_name, extension.version)) header = _INITIAL_HELP.strip() % {'versions': '\n# '.join(versions)} formatted_config = _format( config=config, comments={}, schemas=schemas, display=False, disable=True).decode('utf-8') return header + '\n\n' + formatted_config def _load(files, defaults, overrides): parser = configparser.RawConfigParser() # TODO: simply return path to config file for defaults so we can load it # all in the same way? logger.info('Loading config from builtin defaults') for default in defaults: if isinstance(default, compat.text_type): default = default.encode('utf-8') parser.readfp(io.BytesIO(default)) # Load config from a series of config files files = [path.expand_path(f) for f in files] for name in files: if os.path.isdir(name): for filename in os.listdir(name): filename = os.path.join(name, filename) if os.path.isfile(filename) and filename.endswith('.conf'): _load_file(parser, filename) else: _load_file(parser, name) # If there have been parse errors there is a python bug that causes the # values to be lists, this little trick coerces these into strings. parser.readfp(io.BytesIO()) raw_config = {} for section in parser.sections(): raw_config[section] = dict(parser.items(section)) logger.info('Loading config from command line options') for section, key, value in overrides: raw_config.setdefault(section, {})[key] = value return raw_config def _load_file(parser, filename): if not os.path.exists(filename): logger.debug( 'Loading config from %s failed; it does not exist', filename) return if not os.access(filename, os.R_OK): logger.warning( 'Loading config from %s failed; read permission missing', filename) return try: logger.info('Loading config from %s', filename) with io.open(filename, 'rb') as filehandle: parser.readfp(filehandle) except configparser.MissingSectionHeaderError as e: logger.warning('%s does not have a config section, not loaded.', filename) except configparser.ParsingError as e: linenos = ', '.join(str(lineno) for lineno, line in e.errors) logger.warning( '%s has errors, line %s has been ignored.', filename, linenos) except IOError: # TODO: if this is the initial load of logging config we might not # have a logger at this point, we might want to handle this better. logger.debug('Config file %s not found; skipping', filename) def _validate(raw_config, schemas): # Get validated config config = {} errors = {} sections = set(raw_config) for schema in schemas: sections.discard(schema.name) values = raw_config.get(schema.name, {}) result, error = schema.deserialize(values) if error: errors[schema.name] = error if result: config[schema.name] = result for section in sections: logger.debug('Ignoring unknown config section: %s', section) return config, errors def _format(config, comments, schemas, display, disable): output = [] for schema in schemas: serialized = schema.serialize( config.get(schema.name, {}), display=display) if not serialized: continue output.append(b'[%s]' % bytes(schema.name)) for key, value in serialized.items(): if isinstance(value, types.DeprecatedValue): continue comment = bytes(comments.get(schema.name, {}).get(key, '')) output.append(b'%s =' % bytes(key)) if value is not None: output[-1] += b' ' + value if comment: output[-1] += b' ; ' + comment.capitalize() if disable: output[-1] = re.sub(r'^', b'#', output[-1], flags=re.M) output.append(b'') return b'\n'.join(output).strip() def _preprocess(config_string): """Convert a raw config into a form that preserves comments etc.""" results = ['[__COMMENTS__]'] counter = itertools.count(0) section_re = re.compile(r'^(\[[^\]]+\])\s*(.+)$') blank_line_re = re.compile(r'^\s*$') comment_re = re.compile(r'^(#|;)') inline_comment_re = re.compile(r' ;') def newlines(match): return '__BLANK%d__ =' % next(counter) def comments(match): if match.group(1) == '#': return '__HASH%d__ =' % next(counter) elif match.group(1) == ';': return '__SEMICOLON%d__ =' % next(counter) def inlinecomments(match): return '\n__INLINE%d__ =' % next(counter) def sections(match): return '%s\n__SECTION%d__ = %s' % ( match.group(1), next(counter), match.group(2)) for line in config_string.splitlines(): line = blank_line_re.sub(newlines, line) line = section_re.sub(sections, line) line = comment_re.sub(comments, line) line = inline_comment_re.sub(inlinecomments, line) results.append(line) return '\n'.join(results) def _postprocess(config_string): """Converts a preprocessed config back to original form.""" flags = re.IGNORECASE | re.MULTILINE result = re.sub(r'^\[__COMMENTS__\](\n|$)', '', config_string, flags=flags) result = re.sub(r'\n__INLINE\d+__ =(.*)$', ' ;\g<1>', result, flags=flags) result = re.sub(r'^__HASH\d+__ =(.*)$', '#\g<1>', result, flags=flags) result = re.sub(r'^__SEMICOLON\d+__ =(.*)$', ';\g<1>', result, flags=flags) result = re.sub(r'\n__SECTION\d+__ =(.*)$', '\g<1>', result, flags=flags) result = re.sub(r'^__BLANK\d+__ =$', '', result, flags=flags) return result class Proxy(collections.Mapping): def __init__(self, data): self._data = data def __getitem__(self, key): item = self._data.__getitem__(key) if isinstance(item, dict): return Proxy(item) return item def __iter__(self): return self._data.__iter__() def __len__(self): return self._data.__len__() def __repr__(self): return b'Proxy(%r)' % self._data
dbrgn/mopidy
mopidy/config/__init__.py
Python
apache-2.0
9,890
from django import template from django.template.loader import get_template import itertools register = template.Library() def grouper(n, iterable, fillvalue=None): "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx" args = [iter(iterable)] * n return itertools.izip_longest(fillvalue=fillvalue, *args) @register.filter def get_form_field_type(field): """Returns value for the given field for a form.""" return field.field.__class__.__name__ @register.simple_tag(takes_context=True) def reform(context, form, include=None, layout='vertical,false,1'): """This is my rendition of the as_bootstrap filter from the bootstrap_toolkit as tag because I want to take in the perms parameter and pass it on to the bootstrap templates for use in rendering fields.""" params = [param.strip() for param in layout.split(',')] layout = str(params[0]).lower() columns = int(params[2]) if include is None: ifields = [f for f in form] else: include = [f.strip() for f in include.split(',')] ifields = [] for ifield in include: for field in form: if field.name == ifield: ifields.append(field) try: bootstrap_float = str(params[1]).lower() == 'float' except IndexError: bootstrap_float = False ifields = grouper(columns, ifields) colspan = int(12/columns) context['form'] = form context['ifields'] = ifields context['colspan'] = colspan context['layout'] = layout context['float'] = bootstrap_float return get_template('reform/form.html').render(context)
alixedi/django_reform
reform/templatetags/reform.py
Python
bsd-3-clause
1,627
# -*- coding: utf-8 -*- from django.db import connection from django_orm.postgresql.aggregates import Unaccent from django.utils.unittest import TestCase from .models import Person class TestUnaccent(TestCase): def setUp(self): self.p1 = Person.objects.create(name='Andréi') self.p2 = Person.objects.create(name='Pèpâ') def tearDown(self): self.p1.delete() self.p2.delete() def test_annotate(self): qs = Person.objects.annotate(name_unaccent=Unaccent('name')).order_by('id') qs = list(qs) self.assertEqual(qs[0].name_unaccent, 'Andrei') self.assertEqual(qs[1].name_unaccent, 'Pepa')
EnTeQuAk/django-orm
tests/aggregates_unaccent_app/tests.py
Python
bsd-3-clause
671
"""Support for Nanoleaf Lights.""" import logging import voluptuous as vol from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_HS_COLOR, ATTR_TRANSITION, PLATFORM_SCHEMA, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT, SUPPORT_TRANSITION, Light) from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN import homeassistant.helpers.config_validation as cv from homeassistant.util import color as color_util from homeassistant.util.color import \ color_temperature_mired_to_kelvin as mired_to_kelvin from homeassistant.util.json import load_json, save_json _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = 'Nanoleaf' DATA_NANOLEAF = 'nanoleaf' CONFIG_FILE = '.nanoleaf.conf' ICON = 'mdi:triangle-outline' SUPPORT_NANOLEAF = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_EFFECT | SUPPORT_COLOR | SUPPORT_TRANSITION) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Required(CONF_TOKEN): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, }) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Nanoleaf light.""" from pynanoleaf import Nanoleaf, Unavailable if DATA_NANOLEAF not in hass.data: hass.data[DATA_NANOLEAF] = dict() token = '' if discovery_info is not None: host = discovery_info['host'] name = discovery_info['hostname'] # if device already exists via config, skip discovery setup if host in hass.data[DATA_NANOLEAF]: return _LOGGER.info("Discovered a new Nanoleaf: %s", discovery_info) conf = load_json(hass.config.path(CONFIG_FILE)) if conf.get(host, {}).get('token'): token = conf[host]['token'] else: host = config[CONF_HOST] name = config[CONF_NAME] token = config[CONF_TOKEN] nanoleaf_light = Nanoleaf(host) if not token: token = nanoleaf_light.request_token() if not token: _LOGGER.error("Could not generate the auth token, did you press " "and hold the power button on %s" "for 5-7 seconds?", name) return conf = load_json(hass.config.path(CONFIG_FILE)) conf[host] = {'token': token} save_json(hass.config.path(CONFIG_FILE), conf) nanoleaf_light.token = token try: nanoleaf_light.available except Unavailable: _LOGGER.error( "Could not connect to Nanoleaf Light: %s on %s", name, host) return hass.data[DATA_NANOLEAF][host] = nanoleaf_light add_entities([NanoleafLight(nanoleaf_light, name)], True) class NanoleafLight(Light): """Representation of a Nanoleaf Light.""" def __init__(self, light, name): """Initialize an Nanoleaf light.""" self._available = True self._brightness = None self._color_temp = None self._effect = None self._effects_list = None self._light = light self._name = name self._hs_color = None self._state = None @property def available(self): """Return availability.""" return self._available @property def brightness(self): """Return the brightness of the light.""" if self._brightness is not None: return int(self._brightness * 2.55) return None @property def color_temp(self): """Return the current color temperature.""" if self._color_temp is not None: return color_util.color_temperature_kelvin_to_mired( self._color_temp) return None @property def effect(self): """Return the current effect.""" return self._effect @property def effect_list(self): """Return the list of supported effects.""" return self._effects_list @property def min_mireds(self): """Return the coldest color_temp that this light supports.""" return 154 @property def max_mireds(self): """Return the warmest color_temp that this light supports.""" return 833 @property def name(self): """Return the display name of this light.""" return self._name @property def icon(self): """Return the icon to use in the frontend, if any.""" return ICON @property def is_on(self): """Return true if light is on.""" return self._state @property def hs_color(self): """Return the color in HS.""" return self._hs_color @property def supported_features(self): """Flag supported features.""" return SUPPORT_NANOLEAF def turn_on(self, **kwargs): """Instruct the light to turn on.""" brightness = kwargs.get(ATTR_BRIGHTNESS) hs_color = kwargs.get(ATTR_HS_COLOR) color_temp_mired = kwargs.get(ATTR_COLOR_TEMP) effect = kwargs.get(ATTR_EFFECT) transition = kwargs.get(ATTR_TRANSITION) if hs_color: hue, saturation = hs_color self._light.hue = int(hue) self._light.saturation = int(saturation) if color_temp_mired: self._light.color_temperature = mired_to_kelvin(color_temp_mired) if transition: if brightness: # tune to the required brightness in n seconds self._light.brightness_transition( int(brightness / 2.55), int(transition)) else: # If brightness is not specified, assume full brightness self._light.brightness_transition(100, int(transition)) else: # If no transition is occurring, turn on the light self._light.on = True if brightness: self._light.brightness = int(brightness / 2.55) if effect: self._light.effect = effect def turn_off(self, **kwargs): """Instruct the light to turn off.""" transition = kwargs.get(ATTR_TRANSITION) if transition: self._light.brightness_transition(0, int(transition)) else: self._light.on = False def update(self): """Fetch new state data for this light.""" from pynanoleaf import Unavailable try: self._available = self._light.available self._brightness = self._light.brightness self._color_temp = self._light.color_temperature self._effect = self._light.effect self._effects_list = self._light.effects self._hs_color = self._light.hue, self._light.saturation self._state = self._light.on except Unavailable as err: _LOGGER.error("Could not update status for %s (%s)", self.name, err) self._available = False
MartinHjelmare/home-assistant
homeassistant/components/nanoleaf/light.py
Python
apache-2.0
6,951
import datetime import pytz from dateutil.tz import tzoffset from django.utils import timezone from radioco.apps.radioco.utils import memorize timestamp = datetime.datetime(2009, 1, 1) # any unambiguous timestamp will work here class GMT(tzoffset): """ GMT implementation, it has a fixed offset """ def __init__(self, name, offset): hours = int(offset / 3600) if hours < 0: name = 'GMT-%s' % abs(hours) else: name = 'GMT+%s' % hours super(GMT, self).__init__(name=name, offset=offset) def localize(self, dt, is_dst=False): '''Convert naive time to local time''' if dt.tzinfo is not None: raise ValueError('Not naive datetime (tzinfo is already set)') return dt.replace(tzinfo=self) def normalize(self, dt, is_dst=False): '''Correct the timezone information on the given datetime''' if dt.tzinfo is self: return dt if dt.tzinfo is None: raise ValueError('Naive time - no tzinfo set') return dt.astimezone(self) def __repr__(self): return '<%s>' % self._name def __str__(self): return '%s' % self._name @memorize def get_timezone_offset(tz): return GMT(None, (tz.utcoffset(timestamp) - tz.dst(timestamp)).total_seconds()) def get_active_timezone(): """ Same method as timezone.get_current_timezone but returning utc if nothing was set """ return getattr(timezone._active, "value", pytz.utc) def transform_datetime_tz(dt, tz=None): """ Transform a datetime in other timezone to the current one """ if not tz: tz = timezone.get_current_timezone() return tz.normalize(dt.astimezone(tz)) def transform_dt_to_default_tz(dt): """ Transform a datetime in other timezone to the current one """ tz = timezone.get_default_timezone() return tz.normalize(dt.astimezone(tz)) def fix_recurrence_date(start_dt, dt): """ Fix for django-recurrence 1.3 rdates and exdates needs a datetime, we are combining the date with the time from start_date. Return: A datetime in the default timezone with the offset required to work in the recurrence """ current_dt = transform_dt_to_default_tz(dt) current_start_dt = transform_dt_to_default_tz(start_dt) tz = GMT(None, current_start_dt.utcoffset().total_seconds()) # tz without DST # We are localising a new dt in the DST naive tz fixed_dt = transform_dt_to_default_tz( tz.localize(datetime.datetime.combine(current_dt.date(), current_start_dt.time()))) return fixed_dt def fix_recurrence_dst(dt): """ Fix for django-recurrence 1.3 Function to fix a datetime tz aware with an incorrect offset Returns: A datetime in the same timezone but with the offset fixed """ if dt: tz = dt.tzinfo return tz.localize(datetime.datetime.combine(dt.date(), dt.time())) return None def _fix_invalid_dt(recurrence, dt): """ Check if start_dt is a valid result """ if not recurrence.rrules: return dt if dt in recurrence.rdates: return dt for rrule in recurrence.rrules: if not rrule.until: return dt elif dt < rrule.until: return dt return None def recurrence_after(recurrence, after_dt, start_dt): """ Fix for django-recurrence 1.3 Avoid outputting a impossible dt """ dt = recurrence.after(after_dt, True, dtstart=start_dt) if dt == start_dt: return _fix_invalid_dt(recurrence, dt) return dt def recurrence_before(recurrence, before_dt, start_dt): """ Fix for django-recurrence 1.3 Avoid outputting a impossible dt """ dt = recurrence.before(before_dt, True, dtstart=start_dt) if dt == start_dt: return _fix_invalid_dt(recurrence, dt) return dt
iago1460/django-radio
radioco/apps/radioco/tz_utils.py
Python
gpl-3.0
3,902
""" Transfer Student Management Command """ from textwrap import dedent from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user from django.db import transaction from opaque_keys.edx.keys import CourseKey from common.djangoapps.student.models import CourseEnrollment from common.djangoapps.track.management.tracked_command import TrackedCommand class TransferStudentError(Exception): """ Generic Error when handling student transfers. """ pass # lint-amnesty, pylint: disable=unnecessary-pass class Command(TrackedCommand): """ Transfer students enrolled in one course into one or more other courses. This will remove them from the first course. Their enrollment mode (i.e. honor, verified, audit, etc.) will persist into the other course(s). """ help = dedent(__doc__) def add_arguments(self, parser): parser.add_argument('-f', '--from', metavar='SOURCE_COURSE', dest='source_course', required=True, help='the course to transfer students from') parser.add_argument('-t', '--to', nargs='+', metavar='DEST_COURSE', dest='dest_course_list', required=True, help='the new course(s) to enroll the student into') @transaction.atomic def handle(self, *args, **options): source_key = CourseKey.from_string(options['source_course']) dest_keys = [] for course_key in options['dest_course_list']: dest_keys.append(CourseKey.from_string(course_key)) source_students = User.objects.filter( courseenrollment__course_id=source_key ) for user in source_students: with transaction.atomic(): print(f'Moving {user.username}.') # Find the old enrollment. enrollment = CourseEnrollment.objects.get( user=user, course_id=source_key ) # Move the Student between the classes. mode = enrollment.mode old_is_active = enrollment.is_active CourseEnrollment.unenroll(user, source_key, skip_refund=True) print('Unenrolled {} from {}'.format(user.username, str(source_key))) for dest_key in dest_keys: if CourseEnrollment.is_enrolled(user, dest_key): # Un Enroll from source course but don't mess # with the enrollment in the destination course. msg = 'Skipping {}, already enrolled in destination course {}' print(msg.format(user.username, str(dest_key))) else: new_enrollment = CourseEnrollment.enroll(user, dest_key, mode=mode) # Un-enroll from the new course if the user had un-enrolled # form the old course. if not old_is_active: new_enrollment.update_enrollment(is_active=False, skip_refund=True)
eduNEXT/edunext-platform
common/djangoapps/student/management/commands/transfer_students.py
Python
agpl-3.0
3,295
#!/usr/bin/env python # This work was created by participants in the DataONE project, and is # jointly copyrighted by participating institutions in DataONE. For # more information on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2017 DataONE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Create Science Object on Member Node. This is an example on how to use the DataONE Client and Common libraries for Python. It shows how to: - Upload a local file to a Member Node as a Science Object Operation: - The first time the script is run, a message indicating that the object was successfully created should be displayed, and the object should become available on the Member Node. - If the script is then launched again without changing the identifier (PID), an IdentifierNotUnique exception should be returned. This indicates that the identifier is now in use by the previously created object. - Any other errors will also be returned as DataONE exceptions. """ import logging import pathlib import sys import d1_scimeta.util import d1_common.const import d1_common.types.exceptions import d1_client.command_line import d1_client.d1client log = logging.getLogger(__name__) def main(): parser = d1_client.command_line.D1ClientArgParser(__doc__, add_base_url=True) parser.add_argument("--formats", action="store_true", help="List valid formatIds") # parser.add_argument( # "node_id", help="URN of target node (e.g.: urn:node:ABC)" # ) parser.add_argument("pid", help="Persistent Identifiers for Science Object") parser.add_argument("format_id", help="formatId for Science Object") parser.add_argument("path", help="Path to Science Object file") args = parser.parse_args() d1_client.command_line.log_setup(args.debug) if args.formats: d1_scimeta.util.get_supported_format_id_list() return 0 client = d1_client.d1client.DataONEClient(parser.get_method_args(args)) if client.auth_subj_tup[0] == d1_common.const.SUBJECT_PUBLIC: log.error( "Must provide a certificate in order to gain access to create objects on MN" ) return 1 try: client.create_sciobj(args.pid, args.format_id, pathlib.Path(args.path)) except d1_common.types.exceptions.DataONEException as e: log.error("Create failed. Error: {}".format(str(e))) return 1 log.error("Create successful") return 0 if __name__ == "__main__": sys.exit(main())
DataONEorg/d1_python
utilities/src/d1_util/create_object_on_member_node.py
Python
apache-2.0
3,005
from sklearn import svm from scipy.misc import imresize import glob import cv2 import numpy as np import pickle from sklearn.model_selection import train_test_split import matplotlib.image as mpimg from sklearn.preprocessing import LabelBinarizer from keras.models import Sequential from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda from keras.layers.convolutional import Convolution2D from keras.layers.pooling import MaxPooling2D from keras.models import load_model class TrainingSet: def __init__(self, size=32): self.size = size def LoadImages(self, path): images = [] for file in glob.glob(path, recursive=True): image = mpimg.imread(file) image = imresize(image, (self.size, self.size)) images.append(image) return np.asarray(images) def PrintStats(self, array): print(array.shape) print(array.dtype) print("Mean: ", np.mean(array)) print("Min: ", np.min(array)) print("Max: ", np.max(array)) print("STD: ", np.std(array)) def LoadTrainingData(self, test_split=0.05): cars_images = self.LoadImages('./data_cars/**/*.png') notcars_images = self.LoadImages('./data_nocars/**/*.png') print('Cars: {}, No: {} '.format(cars_images.shape[0], notcars_images.shape[0])) X = np.concatenate((cars_images, notcars_images), axis=0) y = np.hstack((np.ones(cars_images.shape[0]), np.zeros(notcars_images.shape[0]))).flatten() rand_state = np.random.randint(0, 100) self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y, test_size=test_split, random_state=rand_state) class Detector: def __init__(self, size=32): self.size = size def Build(self): size = self.size model = Sequential() model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(size, size, 3), output_shape=(size, size, 3))) model.add(Convolution2D(12, 3, 3, subsample=(2, 2))) model.add(Dropout(0.5)) model.add(Activation('relu')) model.add(Convolution2D(24, 3, 3, subsample=(2, 2))) model.add(Dropout(0.5)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(80)) model.add(Dropout(0.5)) model.add(Activation('relu')) model.add(Dense(5)) model.add(Activation('relu')) model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile('adam', 'binary_crossentropy', ['accuracy']) # rmsprop return model def Train(self, X, y): self.model = self.Build() self.history = self.model.fit(X, y, nb_epoch=25, validation_split=0.1, batch_size=128) def Test(self, X, y): #y_one_hot_test = self.label_binarizer.fit_transform(y) metrics = self.model.evaluate(X, y) for metric_i in range(len(self.model.metrics_names)): metric_name = self.model.metrics_names[metric_i] metric_value = metrics[metric_i] print('{}: {}'.format(metric_name, metric_value)) def Detect(self, X): return self.model.predict(X, batch_size=128) def Save(self, fname): self.model.save(fname) def Load(self, fname): self.model = load_model(fname)
strotz/SDC-Projects
project4/cardetect.py
Python
mit
3,317
# benchmark.py Simple benchmark for umqtt.simple # Assumes simple.py (from micropython-lib) is copied to ESP8266 # Outcome with mosquitto running on a Raspberry Pi on wired network, # Wemos D1 Mini running on WiFi: echo received in max 154 ms min 27 ms import ubinascii from simple import MQTTClient from machine import unique_id from utime import sleep, ticks_ms, ticks_diff def tdiff(): new_semantics = ticks_diff(2, 1) == 1 def func(old, new): nonlocal new_semantics if new_semantics: return ticks_diff(new, old) return ticks_diff(old, new) return func ticksdiff = tdiff() SERVER = "192.168.0.23" CLIENT_ID = ubinascii.hexlify(unique_id()) TOPIC = b"led" QOS = 1 t = 0 maxt = 0 mint = 5000 def sub_cb(topic, msg): global t, maxt, mint dt = ticksdiff(t, ticks_ms()) print('echo received in {} ms'.format(dt)) print((topic, msg)) maxt = max(maxt, dt) mint = min(mint, dt) def main(quit=True): global t c = MQTTClient(CLIENT_ID, SERVER) # Subscribed messages will be delivered to this callback c.set_callback(sub_cb) c.connect() c.subscribe(TOPIC, qos = QOS) print("Connected to %s, subscribed to %s topic" % (SERVER, TOPIC)) n = 0 pubs = 0 try: while 1: n += 1 if not n % 100: t = ticks_ms() c.publish(TOPIC, str(pubs).encode('UTF8'), retain = False, qos = QOS) c.wait_msg() pubs += 1 if not pubs % 100: print('echo received in max {} ms min {} ms'. format(maxt, mint)) if quit: return sleep(0.05) c.check_msg() finally: c.disconnect()
peterhinch/micropython-samples
ESP8266/benchmark.py
Python
mit
1,796
"""Module to handle entering building""" from game import entry_fee_checker from game import resource class AlreadyOccupiedError(Exception): pass class NotEnoughEntryFeeError(Exception): pass class NotPickedEnoughEntryFeeError(Exception): pass class EnterBuildingHandler(object): def __init__(self, building_obj, player_obj, picker_obj): self._building = building_obj self._player = player_obj self._picker = picker_obj self._checker = None self._picked_res = None self._CreateChecker() self._CheckCanEnter() if building_obj.IsOccupied(): raise AlreadyOccupiedError( 'Building %s is occupied' % self._building.GetName()) def _CreateChecker(self): self._checker = entry_fee_checker.EntryFeeChecker(self._building.GetFee()) def _CheckCanEnter(self): all_picked_res = resource.FilterResourceFood(self._player.GetResource()) can_enter = False try: can_enter = self._checker.Check(all_picked_res) # Too much is ok since we just check that player has enough resource to # pay entry fee. except entry_fee_checker.TooMuchError: can_enter = True if not can_enter: raise NotEnoughEntryFeeError('Not enough resource for entry fee.') def EnterBuilding(self): self._picked_res = self._picker.GetPicked() self._CheckPickedEnough() self._PayEntryFee() self._OccupyBuilding() def _CheckPickedEnough(self): can_enter = self._checker.Check(self._picked_res) if not can_enter: raise NotPickedEnoughEntryFeeError( 'Not picked enough resource for entry fee') def _PayEntryFee(self): self._player.SubtractResource(self._picked_res) def _OccupyBuilding(self): self._player.SetWorkerPlace(self._building.GetName()) self._building.SetCurrentWorker(self._player.GetName()) def GetPicker(self): return self._picker
chiang831/LeHavre
src/game/enter_building_handler.py
Python
gpl-2.0
1,876
import cv2 import numpy as np from picamera.array import PiRGBArray from picamera import PiCamera import scipy.misc import cPickle import os import time os.chdir("//home//pi/Desktop//Image_db/") import warnings warnings.filterwarnings('error', category=DeprecationWarning) def rgb2gray(rgb): return np.dot(rgb[:,:,:], [0.299, 0.587, 0.114]) def standard(X): return (X - X.mean())/X.max() def Pre_Process(face): from skimage.transform import resize X = standard(resize(face,(96,96))).reshape(-1,1,96,96) X_normal = X.reshape(-1,9216) return X,X_normal # load it again with open('/home/pi/Desktop/files/linear_model.pkl', 'rb') as fid: Net = cPickle.load(fid) map = np.load('/home/pi/Desktop/files/map.npy') #print map #face_cascade = cv2.CascadeClassifier('/home/pi/Downloads/opencv-2.4.13/data/haarcascades_GPU/haarcascade_frontalface_default.xml') #face_cascade = cv2.CascadeClassifier('/home/pi/Downloads/opencv-2.4.13/data/haarcascades/haarcascade_frontalface_default.xml') face_cascade = cv2.CascadeClassifier('/home/pi/Downloads/opencv-2.4.13/data/lbpcascades/lbpcascade_frontalface.xml') # initialize the camera and grab a reference to the raw camera capture camera = PiCamera() camera.resolution = (1000, 750) camera.framerate = 15 camera.zoom = (0,0,0.75,0.75) rawCapture = PiRGBArray(camera, size=(1000, 750)) cv2.namedWindow('Video',cv2.WINDOW_NORMAL) cv2.resizeWindow('Video',640,480) i = 0 for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): # grab the raw NumPy array representing the image, then initialize the timestamp # and occupied/unoccupied text frame = frame.array gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) start_time = time.time() faces = face_cascade.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=5, minSize=(90, 90) ) #print("--- %s seconds ---" % (time.time() - start_time)) # Draw a rectangle around the faces if len(faces)>0: for (x, y, w, h) in faces: i +=1 fac = np.array(frame)[y:(y+h),x:(x+h),:] fac_gray = np.array(gray)[y:(y+h),x:(x+h)] X,X_normal = Pre_Process(fac_gray) Probability = Net.predict_proba(X.reshape(-1,9216)) prob = np.amax(Probability) #print Probability index = np.argmax(Probability) #print index cv2.rectangle(frame, (x, y), (x+h, y+w), (0, 255, 0), 2) #cv2.putText(frame,'omar',(x,y+h), cv2.FONT_HERSHEY_DUPLEX,1,(0,0,255), 2,8) #cv2.putText(frame,str(map[index])+' '+str(round(prob*100,2) )+'%',(x,y), cv2.FONT_HERSHEY_DUPLEX,1,(255,255,255), 1,2) print("--- %s seconds ---" % (time.time() - start_time)) scipy.misc.toimage(cv2.cvtColor(fac, cv2.COLOR_RGB2BGR)).save(time.strftime('%Y-%m-%d')+'_'+str(i) +'.jpg') # Display the resulting frame cv2.imshow('Video', frame) #time.sleep(0.1) # clear the stream in preparation for the next frame rawCapture.truncate(0) if cv2.waitKey(1) & 0xFF == ord('q'): break if time.localtime(time.time()).tm_hour == 20: break #os.system("shutdown now -h") # When everything is done, release the capture cv2.destroyAllWindows()
mdomarsaleem/Facial_Plan
Database creation/database_cv.py
Python
mit
3,294
# Author: Nic Wolfe <nic@wolfeden.ca> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import datetime import threading import time from sickbeard import db, exceptions, helpers, search, scheduler from sickbeard import search_queue from sickbeard import logger from sickbeard import ui from sickbeard.common import * class BacklogSearchScheduler(scheduler.Scheduler): def forceSearch(self): self.action._set_lastBacklog(1) self.lastRun = datetime.datetime.fromordinal(1) def nextRun(self): if self.action._lastBacklog <= 1: return datetime.date.today() else: return datetime.date.fromordinal(self.action._lastBacklog + self.action.cycleTime) class BacklogSearcher: def __init__(self): self._lastBacklog = self._get_lastBacklog() self.cycleTime = 7 self.lock = threading.Lock() self.amActive = False self.amPaused = False self.amWaiting = False self._resetPI() def _resetPI(self): self.percentDone = 0 self.currentSearchInfo = {'title': 'Initializing'} def getProgressIndicator(self): if self.amActive: return ui.ProgressIndicator(self.percentDone, self.currentSearchInfo) else: return None def am_running(self): logger.log(u"amWaiting: "+str(self.amWaiting)+", amActive: "+str(self.amActive), logger.DEBUG) return (not self.amWaiting) and self.amActive def searchBacklog(self, which_shows=None): if which_shows: show_list = which_shows else: show_list = sickbeard.showList if self.amActive == True: logger.log(u"Backlog is still running, not starting it again", logger.DEBUG) return self._get_lastBacklog() curDate = datetime.date.today().toordinal() fromDate = datetime.date.fromordinal(1) if not which_shows and not curDate - self._lastBacklog >= self.cycleTime: logger.log(u"Running limited backlog on recently missed episodes only") fromDate = datetime.date.today() - datetime.timedelta(days=7) self.amActive = True self.amPaused = False myDB = db.DBConnection() numSeasonResults = myDB.select("SELECT DISTINCT(season), showid FROM tv_episodes ep, tv_shows show WHERE season != 0 AND ep.showid = show.tvdb_id AND show.paused = 0 AND ep.airdate > ?", [fromDate.toordinal()]) # get separate lists of the season/date shows season_shows = [x for x in show_list if not x.is_air_by_date] air_by_date_shows = [x for x in show_list if x.is_air_by_date] # figure out how many segments of air by date shows we're going to do air_by_date_segments = [] for cur_id in [x.tvdbid for x in air_by_date_shows]: air_by_date_segments += self._get_air_by_date_segments(cur_id, fromDate) logger.log(u"Air-by-date segments: "+str(air_by_date_segments), logger.DEBUG) totalSeasons = float(len(numSeasonResults) + len(air_by_date_segments)) numSeasonsDone = 0.0 # go through non air-by-date shows and see if they need any episodes for curShow in show_list: if curShow.paused: continue if curShow.is_air_by_date: segments = [x[1] for x in self._get_air_by_date_segments(curShow.tvdbid, fromDate)] else: segments = self._get_season_segments(curShow.tvdbid, fromDate) for cur_segment in segments: self.currentSearchInfo = {'title': curShow.name + " Season "+str(cur_segment)} backlog_queue_item = search_queue.BacklogQueueItem(curShow, cur_segment) if not backlog_queue_item.wantSeason: logger.log(u"Nothing in season "+str(cur_segment)+" needs to be downloaded, skipping this season", logger.DEBUG) else: sickbeard.searchQueueScheduler.action.add_item(backlog_queue_item) # don't consider this an actual backlog search if we only did recent eps # or if we only did certain shows if fromDate == datetime.date.fromordinal(1) or not which_shows: self._set_lastBacklog(curDate) self.amActive = False self._resetPI() def _get_lastBacklog(self): logger.log(u"Retrieving the last check time from the DB", logger.DEBUG) myDB = db.DBConnection() sqlResults = myDB.select("SELECT * FROM info") if len(sqlResults) == 0: lastBacklog = 1 elif sqlResults[0]["last_backlog"] == None or sqlResults[0]["last_backlog"] == "": lastBacklog = 1 else: lastBacklog = int(sqlResults[0]["last_backlog"]) self._lastBacklog = lastBacklog return self._lastBacklog def _get_season_segments(self, tvdb_id, fromDate): myDB = db.DBConnection() sqlResults = myDB.select("SELECT DISTINCT(season) as season FROM tv_episodes WHERE showid = ? AND season > 0 and airdate > ?", [tvdb_id, fromDate.toordinal()]) return [int(x["season"]) for x in sqlResults] def _get_air_by_date_segments(self, tvdb_id, fromDate): # query the DB for all dates for this show myDB = db.DBConnection() num_air_by_date_results = myDB.select("SELECT airdate, showid FROM tv_episodes ep, tv_shows show WHERE season != 0 AND ep.showid = show.tvdb_id AND show.paused = 0 ANd ep.airdate > ? AND ep.showid = ?", [fromDate.toordinal(), tvdb_id]) # break them apart into month/year strings air_by_date_segments = [] for cur_result in num_air_by_date_results: cur_date = datetime.date.fromordinal(int(cur_result["airdate"])) cur_date_str = str(cur_date)[:7] cur_tvdb_id = int(cur_result["showid"]) cur_result_tuple = (cur_tvdb_id, cur_date_str) if cur_result_tuple not in air_by_date_segments: air_by_date_segments.append(cur_result_tuple) return air_by_date_segments def _set_lastBacklog(self, when): logger.log(u"Setting the last backlog in the DB to " + str(when), logger.DEBUG) myDB = db.DBConnection() sqlResults = myDB.select("SELECT * FROM info") if len(sqlResults) == 0: myDB.action("INSERT INTO info (last_backlog, last_TVDB) VALUES (?,?)", [str(when), 0]) else: myDB.action("UPDATE info SET last_backlog=" + str(when)) def run(self): try: self.searchBacklog() except: self.amActive = False raise
wimac/home
Dropbox/skel/bin/sick-beard/sickbeard/searchBacklog (MOU-CDQT5R1's conflicted copy 2012-04-11).py
Python
gpl-2.0
7,632
"""The test for sensor device automation.""" from datetime import timedelta import pytest import homeassistant.components.automation as automation from homeassistant.components.sensor import DOMAIN from homeassistant.components.sensor.device_trigger import ENTITY_TRIGGERS from homeassistant.const import CONF_PLATFORM, PERCENTAGE, STATE_UNKNOWN from homeassistant.helpers import device_registry from homeassistant.setup import async_setup_component import homeassistant.util.dt as dt_util from tests.common import ( MockConfigEntry, async_fire_time_changed, async_get_device_automation_capabilities, async_get_device_automations, async_mock_service, mock_device_registry, mock_registry, ) from tests.testing_config.custom_components.test.sensor import DEVICE_CLASSES @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def entity_reg(hass): """Return an empty, loaded, registry.""" return mock_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, device_reg, entity_reg): """Test we get the expected triggers from a sensor.""" platform = getattr(hass.components, f"test.{DOMAIN}") platform.init() config_entry = MockConfigEntry(domain="test", data={}) config_entry.add_to_hass(hass) device_entry = device_reg.async_get_or_create( config_entry_id=config_entry.entry_id, connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")}, ) for device_class in DEVICE_CLASSES: entity_reg.async_get_or_create( DOMAIN, "test", platform.ENTITIES[device_class].unique_id, device_id=device_entry.id, ) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}}) await hass.async_block_till_done() expected_triggers = [ { "platform": "device", "domain": DOMAIN, "type": trigger["type"], "device_id": device_entry.id, "entity_id": platform.ENTITIES[device_class].entity_id, } for device_class in DEVICE_CLASSES for trigger in ENTITY_TRIGGERS[device_class] if device_class != "none" ] triggers = await async_get_device_automations(hass, "trigger", device_entry.id) assert len(triggers) == 12 assert triggers == expected_triggers async def test_get_trigger_capabilities(hass, device_reg, entity_reg): """Test we get the expected capabilities from a sensor trigger.""" platform = getattr(hass.components, f"test.{DOMAIN}") platform.init() config_entry = MockConfigEntry(domain="test", data={}) config_entry.add_to_hass(hass) device_entry = device_reg.async_get_or_create( config_entry_id=config_entry.entry_id, connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")}, ) entity_reg.async_get_or_create( DOMAIN, "test", platform.ENTITIES["battery"].unique_id, device_id=device_entry.id, ) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}}) await hass.async_block_till_done() expected_capabilities = { "extra_fields": [ { "description": {"suffix": PERCENTAGE}, "name": "above", "optional": True, "type": "float", }, { "description": {"suffix": PERCENTAGE}, "name": "below", "optional": True, "type": "float", }, {"name": "for", "optional": True, "type": "positive_time_period_dict"}, ] } triggers = await async_get_device_automations(hass, "trigger", device_entry.id) assert len(triggers) == 1 for trigger in triggers: capabilities = await async_get_device_automation_capabilities( hass, "trigger", trigger ) assert capabilities == expected_capabilities async def test_get_trigger_capabilities_none(hass, device_reg, entity_reg): """Test we get the expected capabilities from a sensor trigger.""" platform = getattr(hass.components, f"test.{DOMAIN}") platform.init() config_entry = MockConfigEntry(domain="test", data={}) config_entry.add_to_hass(hass) assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}}) await hass.async_block_till_done() triggers = [ { "platform": "device", "device_id": "8770c43885354d5fa27604db6817f63f", "domain": "sensor", "entity_id": "sensor.beer", "type": "is_battery_level", }, { "platform": "device", "device_id": "8770c43885354d5fa27604db6817f63f", "domain": "sensor", "entity_id": platform.ENTITIES["none"].entity_id, "type": "is_battery_level", }, ] expected_capabilities = {} for trigger in triggers: capabilities = await async_get_device_automation_capabilities( hass, "trigger", trigger ) assert capabilities == expected_capabilities async def test_if_fires_not_on_above_below(hass, calls, caplog): """Test for value triggers firing.""" platform = getattr(hass.components, f"test.{DOMAIN}") platform.init() assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}}) await hass.async_block_till_done() sensor1 = platform.ENTITIES["battery"] assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": DOMAIN, "device_id": "", "entity_id": sensor1.entity_id, "type": "battery_level", }, "action": {"service": "test.automation"}, } ] }, ) assert "must contain at least one of below, above" in caplog.text async def test_if_fires_on_state_above(hass, calls): """Test for value triggers firing.""" platform = getattr(hass.components, f"test.{DOMAIN}") platform.init() assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}}) await hass.async_block_till_done() sensor1 = platform.ENTITIES["battery"] assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": DOMAIN, "device_id": "", "entity_id": sensor1.entity_id, "type": "battery_level", "above": 10, }, "action": { "service": "test.automation", "data_template": { "some": "bat_low {{ trigger.%s }}" % "}} - {{ trigger.".join( ( "platform", "entity_id", "from_state.state", "to_state.state", "for", ) ) }, }, } ] }, ) await hass.async_block_till_done() assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN assert len(calls) == 0 hass.states.async_set(sensor1.entity_id, 9) await hass.async_block_till_done() assert len(calls) == 0 hass.states.async_set(sensor1.entity_id, 11) await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data["some"] == "bat_low device - {} - 9 - 11 - None".format( sensor1.entity_id ) async def test_if_fires_on_state_below(hass, calls): """Test for value triggers firing.""" platform = getattr(hass.components, f"test.{DOMAIN}") platform.init() assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}}) await hass.async_block_till_done() sensor1 = platform.ENTITIES["battery"] assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": DOMAIN, "device_id": "", "entity_id": sensor1.entity_id, "type": "battery_level", "below": 10, }, "action": { "service": "test.automation", "data_template": { "some": "bat_low {{ trigger.%s }}" % "}} - {{ trigger.".join( ( "platform", "entity_id", "from_state.state", "to_state.state", "for", ) ) }, }, } ] }, ) await hass.async_block_till_done() assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN assert len(calls) == 0 hass.states.async_set(sensor1.entity_id, 11) await hass.async_block_till_done() assert len(calls) == 0 hass.states.async_set(sensor1.entity_id, 9) await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data["some"] == "bat_low device - {} - 11 - 9 - None".format( sensor1.entity_id ) async def test_if_fires_on_state_between(hass, calls): """Test for value triggers firing.""" platform = getattr(hass.components, f"test.{DOMAIN}") platform.init() assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}}) await hass.async_block_till_done() sensor1 = platform.ENTITIES["battery"] assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": DOMAIN, "device_id": "", "entity_id": sensor1.entity_id, "type": "battery_level", "above": 10, "below": 20, }, "action": { "service": "test.automation", "data_template": { "some": "bat_low {{ trigger.%s }}" % "}} - {{ trigger.".join( ( "platform", "entity_id", "from_state.state", "to_state.state", "for", ) ) }, }, } ] }, ) await hass.async_block_till_done() assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN assert len(calls) == 0 hass.states.async_set(sensor1.entity_id, 9) await hass.async_block_till_done() assert len(calls) == 0 hass.states.async_set(sensor1.entity_id, 11) await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data["some"] == "bat_low device - {} - 9 - 11 - None".format( sensor1.entity_id ) hass.states.async_set(sensor1.entity_id, 21) await hass.async_block_till_done() assert len(calls) == 1 hass.states.async_set(sensor1.entity_id, 19) await hass.async_block_till_done() assert len(calls) == 2 assert calls[1].data["some"] == "bat_low device - {} - 21 - 19 - None".format( sensor1.entity_id ) async def test_if_fires_on_state_change_with_for(hass, calls): """Test for triggers firing with delay.""" platform = getattr(hass.components, f"test.{DOMAIN}") platform.init() assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}}) await hass.async_block_till_done() sensor1 = platform.ENTITIES["battery"] assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": DOMAIN, "device_id": "", "entity_id": sensor1.entity_id, "type": "battery_level", "above": 10, "for": {"seconds": 5}, }, "action": { "service": "test.automation", "data_template": { "some": "turn_off {{ trigger.%s }}" % "}} - {{ trigger.".join( ( "platform", "entity_id", "from_state.state", "to_state.state", "for", ) ) }, }, } ] }, ) await hass.async_block_till_done() assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN assert len(calls) == 0 hass.states.async_set(sensor1.entity_id, 11) await hass.async_block_till_done() assert len(calls) == 0 async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10)) await hass.async_block_till_done() assert len(calls) == 1 await hass.async_block_till_done() assert ( calls[0].data["some"] == f"turn_off device - {sensor1.entity_id} - unknown - 11 - 0:00:05" )
sdague/home-assistant
tests/components/sensor/test_device_trigger.py
Python
apache-2.0
14,830
from flask import session from appconfig import * class UserModel: def __init__(self): from models import Tag from models import Post from models import User self.Tag = Tag.Tag self.Post = Post.Post self.User = User.User def login(self, email, password): user = self.User.query.filter_by(Email = email).first() if user and user.check_password(password): session['email'] = user.Email session['nick'] = user.Nick session['Id'] = user.Id return True return False def register(self, email, password, nick, role, id = None): from models import db if id: u = self.User.query.filter_by(Id=id).first() u.Email = email u.Role = role u.set_password(password) u.Nick = nick subject = "You account is updated" else: u = self.User(nick, email, role, password) db.session.add(u) subject = "Account is created" db.session.commit() body = "<p>Hello "+nick+", </p> <p>Your login details for "+URL+" :</p> <p>Username: "+email+" <br />Password: "+password+"</p>" self.send_email(subject, email, body, nick) return u.Id def list(self): users = self.User.query.all() if users: return users return False def getUser(self, id): user = self.User.query.filter_by(Id=id).first() if user: return user return False def send_email(self, subject, recipients, html_body, nick): import mandrill try: mandrill_client = mandrill.Mandrill('ajQ8I81AVELYSYn--6xbmw') message = { 'from_email': ADMINS[0], 'from_name': 'Blog admin', 'headers': {'Reply-To': ADMINS[0]}, 'html': html_body, 'important': True, 'subject': subject, 'to': [{'email': recipients, 'name': nick, 'type': 'to'}], } result = mandrill_client.messages.send(message=message, async=False) ''' [{'_id': 'abc123abc123abc123abc123abc123', 'email': 'recipient.email@example.com', 'reject_reason': 'hard-bounce', 'status': 'sent'}] ''' except mandrill.Error, e: # Mandrill errors are thrown as exceptions print 'A mandrill error occurred: %s - %s' % (e.__class__, e) # A mandrill error occurred: <class 'mandrill.UnknownSubaccountError'> - No subaccount exists with the id 'customer-123' raise
goors/flask-microblog
UserModel.py
Python
apache-2.0
2,758
# Copyright (c) 2014 Kontron Europe GmbH # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA from .ipmitool import Ipmitool from .aardvark import Aardvark from .ipmbdev import IpmbDev from .mock import Mock from .rmcp import Rmcp INTERFACES = [ Ipmitool, Aardvark, IpmbDev, Mock, Rmcp, ] def create_interface(interface, *args, **kwargs): for intf in INTERFACES: if intf.NAME == interface: intf = intf(*args, **kwargs) return intf raise RuntimeError('unknown interface with name %s' % interface)
kontron/python-ipmi
pyipmi/interfaces/__init__.py
Python
lgpl-2.1
1,238
import pytest import json import re from datetime import datetime, timedelta from CommonServerPython import parse_date_range, DemistoException from CortexDataLake import FIRST_FAILURE_TIME_CONST, LAST_FAILURE_TIME_CONST HUMAN_READABLE_TIME_FROM_EPOCH_TIME_TEST_CASES = [(1582210145000000, False, '2020-02-20T14:49:05'), (1582210145000000, True, '2020-02-20T14:49:05Z')] QUERY_TIMESTAMPS_TEST_CASES = [ ( {'start_time': '2018-04-26 00:00:00', 'end_time': '2020-04-26 00:00:00'}, ('2018-04-26 00:00:00', '2020-04-26 00:00:00'), 'Only start time and end time' ), ( {'time_range': '1 days'}, '1 days', 'Only time range' ), ( {'start_time': '2018-04-26 00:00:00', 'end_time': '2020-04-26 00:00:00', 'time_range': '1 days'}, '1 days', 'Both start/end time and time range' ) ] def load_test_data(json_path): with open(json_path) as f: return json.load(f) @pytest.mark.parametrize('epoch_time, utc_time, expected_response', HUMAN_READABLE_TIME_FROM_EPOCH_TIME_TEST_CASES) def test_human_readable_time_from_epoch_time(epoch_time, utc_time, expected_response): from CortexDataLake import human_readable_time_from_epoch_time assert human_readable_time_from_epoch_time(epoch_time, utc_time=utc_time) == expected_response @pytest.mark.parametrize('args, expected_response, test_case', QUERY_TIMESTAMPS_TEST_CASES) def test_query_timestamp(args, expected_response, test_case): from CortexDataLake import query_timestamp if expected_response == '1 days': expected_start, expected_end = parse_date_range(expected_response) expected_start = expected_start.replace(microsecond=0) expected_end = expected_end.replace(microsecond=0) generated_start, generated_end = query_timestamp(args) generated_start = generated_start generated_end = generated_end assert (generated_start, generated_end) == (expected_start, expected_end), f'Failed: {test_case}' else: generated_start, generated_end = query_timestamp(args) assert (str(generated_start), str(generated_end)) == expected_response, f'Failed: {test_case}' def test_parse_tree_by_root_to_leaf_paths(): from CortexDataLake import parse_tree_by_root_to_leaf_paths root = 'a' body = {'b': 2, 'c': 3, 'd': {'e': 5, 'f': 6, 'g': {'h': 8, 'i': 9}}} expected_output = {'a.b': 2, 'a.c': 3, 'a.d.e': 5, 'a.d.f': 6, 'a.d.g.h': 8, 'a.d.g.i': 9} assert expected_output == parse_tree_by_root_to_leaf_paths(root, body) def test_build_where_clause(): from CortexDataLake import build_where_clause test_cases = [({'query': 'Test'}, 'Test'), ({'source_ip': 'ip1,ip2', 'dest_ip': 'ip3,ip4', 'rule_matched': 'rule1', 'from_zone': 'UTC,UTC2', 'dest_port': '555,666', 'action': 'allow,unknown', 'file_sha_256': 'hash1,hash2', 'file_name': 'name1,name2'}, '(source_ip.value = "ip1" OR source_ip.value = "ip2") ' 'AND (dest_ip.value = "ip3" OR dest_ip.value = "ip4") ' 'AND (rule_matched = "rule1") ' 'AND (from_zone = "UTC" OR from_zone = "UTC2") ' 'AND (action.value = "allow" OR action.value = "unknown") ' 'AND (file_sha_256 = "hash1" OR file_sha_256 = "hash2") ' 'AND (file_name = "name1" OR file_name = "name2") ' 'AND (dest_port = 555 OR dest_port = 666)' ), ({'source_ip': 'ip1', 'non_relevant_arg': 'value'}, '(source_ip.value = "ip1")')] for args, expected_result in test_cases: assert build_where_clause(args) == expected_result def test_build_where_clause_ip_port(): from CortexDataLake import build_where_clause test_cases = [({'query': 'Test'}, 'Test'), ({'ip': 'ip1,ip2', 'port': '555,888'}, '(source_ip.value = "ip1" OR dest_ip.value = "ip1" OR ' 'source_ip.value = "ip2" OR dest_ip.value = "ip2") ' 'AND (source_port = 555 OR dest_port = 555 OR source_port = 888 OR dest_port = 888)' ), ({'source_ip': 'ip1', 'non_relevant_arg': 'value'}, '(source_ip.value = "ip1")')] for args, expected_result in test_cases: assert build_where_clause(args) == expected_result def test_prepare_fetch_incidents_query(): from CortexDataLake import prepare_fetch_incidents_query timestamp = '2020-02-20T16:49:05' firewall_subtype = ['attack', 'url'] fetch_fields = "*" firewall_severity = ['Critical', 'High'] table_name = "firewall.threat" fetch_limit = 10 expected_response = 'SELECT * FROM `firewall.threat` WHERE ' \ 'time_generated Between TIMESTAMP("2020-02-20T16:49:05") ' \ 'AND CURRENT_TIMESTAMP AND' \ ' (sub_type.value = "attack" OR sub_type.value = "url") AND' \ ' (vendor_severity.value = "Critical" OR vendor_severity.value = "High") ' \ 'ORDER BY time_generated ASC ' \ 'LIMIT 10' assert expected_response == prepare_fetch_incidents_query(timestamp, firewall_severity, table_name, firewall_subtype, fetch_fields, fetch_limit) MILLISECONDS_HUMAN_READABLE_TIME_FROM_EPOCH_TIME_TEST_CASES = [(1582017903000000, '2020-02-18T09:25:03.001Z'), (1582027208002000, '2020-02-18T12:00:08.003Z')] @pytest.mark.parametrize('epoch_time, expected_response', MILLISECONDS_HUMAN_READABLE_TIME_FROM_EPOCH_TIME_TEST_CASES) def test_epoch_to_timestamp_and_add_milli(epoch_time, expected_response): from CortexDataLake import epoch_to_timestamp_and_add_milli assert epoch_to_timestamp_and_add_milli(epoch_time) == expected_response def test_get_table_name(): from CortexDataLake import get_table_name query = 'SELECT pcap FROM `firewall.threat` WHERE is_packet_capture = true AND severity = "Critical" LIMIT 10' assert get_table_name(query) == 'firewall.threat' query = 'Wrongly formmated query' assert get_table_name(query) == 'Unrecognized table name' def test_query_logs_command_transform_results_1(): """ Given: - a list of CDL query results When - running query_logs_command function Then - if transform_results is not specified, CDL query results are mapped into the CDL common context (test 1) - if transform_results is set to false, CDL query results are returned unaltered (test 2) """ from CortexDataLake import query_logs_command cdl_records = load_test_data('./test_data/test_query_logs_command_transform_results_original.json') cdl_records_xform = load_test_data('./test_data/test_query_logs_command_transform_results_xformed.json') class MockClient(): def query_loggings(self, query): return cdl_records, [] # test 1, with no transform_results options, should transform to common context _, results_xform, _ = query_logs_command({'limit': '1', 'query': 'SELECT * FROM `firewall.traffic`'}, MockClient()) assert results_xform == {'CDL.Logging': cdl_records_xform} # test 2, with transform_results options, should transform to common context _, results_noxform, _ = query_logs_command( {'limit': '1', 'query': 'SELECT * FROM `firewall.traffic`', 'transform_results': 'false'}, MockClient() ) assert results_noxform == {'CDL.Logging': cdl_records} class TestBackoffStrategy: """ A class to test the backoff strategy mechanism """ @pytest.mark.parametrize('integration_context, exception', [ ({FIRST_FAILURE_TIME_CONST: (datetime.utcnow() - timedelta(minutes=30)).isoformat(), LAST_FAILURE_TIME_CONST: datetime.utcnow().isoformat()}, True), ({FIRST_FAILURE_TIME_CONST: (datetime.utcnow() - timedelta(hours=3)).isoformat(), LAST_FAILURE_TIME_CONST: (datetime.utcnow() - timedelta(minutes=3)).isoformat()}, True), ({FIRST_FAILURE_TIME_CONST: (datetime.utcnow() - timedelta(hours=48)).isoformat(), LAST_FAILURE_TIME_CONST: (datetime.utcnow() - timedelta(minutes=30)).isoformat()}, True), ({FIRST_FAILURE_TIME_CONST: (datetime.utcnow() - timedelta(minutes=30)).isoformat(), LAST_FAILURE_TIME_CONST: (datetime.utcnow() - timedelta(minutes=1)).isoformat()}, False), ({FIRST_FAILURE_TIME_CONST: (datetime.utcnow() - timedelta(hours=3)).isoformat(), LAST_FAILURE_TIME_CONST: (datetime.utcnow() - timedelta(minutes=10)).isoformat()}, False), ({FIRST_FAILURE_TIME_CONST: (datetime.utcnow() - timedelta(hours=48)).isoformat(), LAST_FAILURE_TIME_CONST: (datetime.utcnow() - timedelta(minutes=60)).isoformat()}, False), ({}, False) ]) def test_backoff_strategy(self, integration_context, exception): """ Given: - An integration context that represents a try to fetch in the 1st hour & 1st minute window - An integration context that represents a try to fetch in the first 48 hours & 10 minutes window - An integration context that represents a try to fetch after 48 hours & 60 minutes window - An integration context that represents a try to fetch in the 1st hour & after 1st minute window - An integration context that represents a try to fetch in the first 48 hours & after 10 minutes window - An integration context that represents a try to fetch after 48 hours & after 60 minutes window - An integration context that represents the first time the integration has failed to fetch the access token When - Checking whether to allow access token fetching or failing the integration Then - Validate that a DemistoException is being raised - Validate that a DemistoException is being raised - Validate that a DemistoException is being raised - Validate that no DemistoException is being raised - Validate that no DemistoException is being raised - Validate that no DemistoException is being raised - Validate that no DemistoException is being raised """ from CortexDataLake import Client if exception: with pytest.raises(DemistoException): Client._backoff_strategy(integration_context) else: Client._backoff_strategy(integration_context) @pytest.mark.parametrize('integration_context', [ ({}), ({ FIRST_FAILURE_TIME_CONST: datetime(2020, 12, 10, 11, 27, 55, 764401).isoformat(), LAST_FAILURE_TIME_CONST: (datetime(2020, 12, 10, 11, 27, 55, 764401) + timedelta(minutes=1)).isoformat() }) ]) def test_cache_failure_times(self, integration_context): """ Given: - An empty integration context - An integration context with first failure data & last failure data When - Caching the failure times in the integration context Then - Validate that both first failure data & last failure data are in the integration context and have the same data - Validate that both first failure data & last failure data are in the integration context and have different data """ from CortexDataLake import Client updated_ic = Client._cache_failure_times(integration_context.copy()) assert FIRST_FAILURE_TIME_CONST in updated_ic assert LAST_FAILURE_TIME_CONST in updated_ic if integration_context: assert updated_ic[LAST_FAILURE_TIME_CONST] != updated_ic[FIRST_FAILURE_TIME_CONST] else: assert updated_ic[LAST_FAILURE_TIME_CONST] == updated_ic[FIRST_FAILURE_TIME_CONST] @pytest.mark.parametrize('exc, res', [ ('Error in API call [400] - $REASON', True), ('Error in API call [403] - $REASON', False) ]) def test_is_bad_request_error(self, exc, res): """ Given: - An exception message of status 400 - An exception message of status 403 When - Checking if the exception message is of status code 400 Then - Validate that there's a match with the BAD_REQUEST_REGEX regex - Validate that there's no match with the BAD_REQUEST_REGEX regex """ from CortexDataLake import BAD_REQUEST_REGEX ans = re.match(BAD_REQUEST_REGEX, exc) if res: assert ans is not None else: assert ans is None
VirusTotal/content
Packs/CortexDataLake/Integrations/CortexDataLake/CortexDataLake_test.py
Python
mit
13,261
""" A fake data generator """ import random import numpy as np from deepchem.data import NumpyDataset from deepchem.feat import GraphData class FakeGraphGenerator: """Generates a random graphs which can be used for testing or other purposes. The generated graph supports both node-level and graph-level labels. Example ------- >>> from deepchem.utils.fake_data_generator import FakeGraphGenerator >>> fgg = FakeGraphGenerator(min_nodes=8, max_nodes=10, n_node_features=5, avg_degree=8, n_edge_features=3, n_classes=2, task='graph', z=5) >>> graphs = fgg.sample(n_graphs=10) >>> type(graphs) <class 'deepchem.data.datasets.NumpyDataset'> >>> type(graphs.X[0]) <class 'deepchem.feat.graph_data.GraphData'> >>> len(graphs) == 10 # num_graphs True Note ---- The FakeGraphGenerator class is based on torch_geometric.dataset.FakeDataset class. """ def __init__(self, min_nodes: int = 10, max_nodes: int = 10, n_node_features: int = 5, avg_degree: int = 4, n_edge_features: int = 3, n_classes: int = 2, task: str = 'graph', **kwargs): """ Parameters ---------- min_nodes: int, default 10 Minimum number of permissible nodes in a graph max_nodes: int, default 10 Maximum number of permissible nodes in a graph n_node_features: int, default 5 Average number of node features in a graph avg_degree: int, default 4 Average degree of the graph (avg_degree should be a positive number greater than the min_nodes) n_edge_features: int, default 3 Average number of features in the edge task: str, default 'graph' Indicates node-level labels or graph-level labels kwargs: optional Additional graph attributes and their shapes , e.g. `global_features = 5` """ assert avg_degree >= 1, "Average degree should be greater than 0" self.min_nodes = min_nodes self.max_nodes = max_nodes self.avg_degree = avg_degree self.n_node_features = n_node_features self.n_edge_features = n_edge_features self.n_classes = n_classes self.task = task self.kwargs = kwargs def sample(self, n_graphs: int = 100) -> NumpyDataset: """Samples graphs Parameters ---------- n_graphs: int, default 100 Number of graphs to generate Returns ------- graphs: NumpyDataset Generated Graphs """ graphs, labels = [], [] for i in range(n_graphs): n_nodes = random.randint(self.min_nodes, self.max_nodes) edge_index = generate_edge_index(n_nodes, self.avg_degree) n_edges = edge_index.shape[1] if self.task == 'graph': graph_label = random.randint(0, self.n_classes - 1) node_features = np.random.rand(n_nodes, self.n_node_features) + graph_label edge_features = np.random.rand(n_edges, self.n_edge_features) + graph_label kwargs = {} for feature_name, feature_shape in self.kwargs.items(): kwargs[feature_name] = np.random.rand(1, feature_shape) + graph_label labels.append(graph_label) elif self.task == 'node': node_label = np.random.randint(0, self.n_classes - 1, n_nodes).astype(np.float64) node_features = np.random.rand( n_nodes, self.n_node_features) + node_label.reshape(-1, 1) # For a node-prediction task, label is not added to edge features and other global features # because label here is a node-level attribute and not a graph-level attribute edge_features = np.random.rand(n_edges, self.n_edge_features) kwargs = {} for feature_name, feature_shape in self.kwargs.items(): kwargs[feature_name] = np.random.rand(1, feature_shape) kwargs['y'] = node_label graph = GraphData(node_features, edge_index, edge_features, **kwargs) graphs.append(graph) if self.task == 'graph': graph_dataset = NumpyDataset(X=np.array(graphs), y=np.array(labels)) elif self.task == 'node': # In this case, the 'y' attribute of GraphData will contain the # node-level labels. graph_dataset = NumpyDataset(X=np.array(graphs)) return graph_dataset def generate_edge_index(n_nodes: int, avg_degree: int, remove_loops: bool = True) -> np.ndarray: """Returns source and destination nodes for `num_nodes * avg_degree` number of randomly generated edges. If remove_loops is True, then self-loops from the edge_index pairs are removed. Parameters ---------- n_nodes: int Number of nodes in the graph avg_degree: int Average degree per node in a graph remove_loops: bool Remove self-loops in a graph """ n_edges = n_nodes * avg_degree edge_index = np.random.randint(low=0, high=n_nodes, size=(2, n_edges)) if remove_loops: edge_index = remove_self_loops(edge_index) return edge_index def remove_self_loops(edge_index: np.ndarray) -> np.ndarray: """Removes self-loops from a given set of edges Parameters ---------- edge_index: numpy.ndarray An numpy array of shape (2, |num_edges|) representing edges in a graph """ mask = [] for i in range(edge_index.shape[1]): if edge_index[0][i] != edge_index[1][i]: # not a self-loop mask.append(i) return edge_index[:, mask]
deepchem/deepchem
deepchem/utils/fake_data_generator.py
Python
mit
5,524
from setuptools import setup from tofuroll import get_version setup( name='tofuroll', version=get_version(), url='http://github.com/nathanielksmith/tofuroll', description='A basic framework for creating command line applications', author='Nathaniel K Smith', author_email='nathanielksmith@gmail.com', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Application Frameworks', ], packages=['tofuroll'], )
nathanielksmith/tofuroll
setup.py
Python
gpl-3.0
738