code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/python
#-*- coding: utf-8 -*-
from sys import argv
from xml.parsers import expat
from re import compile,sub
SEP = ":"
ENC = "UTF-8"
r = compile("---+")
TagsOnly = False
class CXmlParser:
def __init__(self):
self.names = []
self.data = ""
def StartElement(self,name,attrs):
#print name,attrs
self.names.append(name)
def EndElement(self,name):
#print name
path = SEP.join(self.names)
if TagsOnly:
print path
else:
print path,self.data.strip()
del self.names[-1]
self.data = ""
def CharacterData(self,data):
#print data
self.data = data
def Parse(self,filename):
Parser = expat.ParserCreate(ENC)
Parser.StartElementHandler = self.StartElement
Parser.EndElementHandler = self.EndElement
Parser.CharacterDataHandler = self.CharacterData
text = open(filename,"r").read()
text = r.sub("--",text)
ParserStatus = Parser.Parse(text,1)
filename = argv[1]
parser = CXmlParser()
element = parser.Parse(filename)
| lubyagin/sqface | xmldump.py | Python | agpl-3.0 | 1,016 |
# This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Adding field 'ProjectInvolvementQuestion.key_string'
db.add_column('search_projectinvolvementquestion', 'key_string', orm['search.projectinvolvementquestion:key_string'])
def backwards(self, orm):
# Deleting field 'ProjectInvolvementQuestion.key_string'
db.delete_column('search_projectinvolvementquestion', 'key_string')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'search.answer': {
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['search.ProjectInvolvementQuestion']"}),
'text': ('django.db.models.fields.TextField', [], {})
},
'search.bug': {
'bize_size_tag_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'concerns_just_documentation': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.buganswer': {
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'details': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bug_answers'", 'to': "orm['search.ProjectInvolvementQuestion']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'search.hitcountcache': {
'hashed_query': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'hit_count': ('django.db.models.fields.IntegerField', [], {})
},
'search.project': {
'cached_contributor_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'logo_contains_name': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'search.projectinvolvementquestion': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bug_style': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'key_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'text': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['search']
| mzdaniel/oh-mainline | mysite/search/migrations/0040_project_involvement_question_key_string.py | Python | agpl-3.0 | 9,036 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs a ResNet model on the Cifar-10 dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from official.benchmark.models import cifar_preprocessing
from official.benchmark.models import resnet_cifar_model
from official.benchmark.models import synthetic_util
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils
from official.vision.image_classification.resnet import common
LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(0.1, 91), (0.01, 136), (0.001, 182)
]
def learning_rate_schedule(current_epoch,
current_batch,
batches_per_epoch,
batch_size):
"""Handles linear scaling rule and LR decay.
Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the
provided scaling factor.
Args:
current_epoch: integer, current epoch indexed from 0.
current_batch: integer, current batch in the current epoch, indexed from 0.
batches_per_epoch: integer, number of steps in an epoch.
batch_size: integer, total batch sized.
Returns:
Adjusted learning rate.
"""
del current_batch, batches_per_epoch # not used
initial_learning_rate = common.BASE_LEARNING_RATE * batch_size / 128
learning_rate = initial_learning_rate
for mult, start_epoch in LR_SCHEDULE:
if current_epoch >= start_epoch:
learning_rate = initial_learning_rate * mult
else:
break
return learning_rate
class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
"""Callback to update learning rate on every batch (not epoch boundaries).
N.B. Only support Keras optimizers, not TF optimizers.
Attributes:
schedule: a function that takes an epoch index and a batch index as input
(both integer, indexed from 0) and returns a new learning rate as
output (float).
"""
def __init__(self, schedule, batch_size, steps_per_epoch):
super(LearningRateBatchScheduler, self).__init__()
self.schedule = schedule
self.steps_per_epoch = steps_per_epoch
self.batch_size = batch_size
self.epochs = -1
self.prev_lr = -1
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'learning_rate'):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
self.epochs += 1
def on_batch_begin(self, batch, logs=None):
"""Executes before step begins."""
lr = self.schedule(self.epochs,
batch,
self.steps_per_epoch,
self.batch_size)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function should be float.')
if lr != self.prev_lr:
self.model.optimizer.learning_rate = lr # lr should be a float here
self.prev_lr = lr
logging.debug(
'Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)
def run(flags_obj):
"""Run ResNet Cifar-10 training and eval loop using native Keras APIs.
Args:
flags_obj: An object containing parsed flag values.
Raises:
ValueError: If fp16 is passed as it is not currently supported.
Returns:
Dictionary of training and eval stats.
"""
keras_utils.set_session_config(
enable_xla=flags_obj.enable_xla)
# Execute flag override logic for better model performance
if flags_obj.tf_gpu_thread_mode:
keras_utils.set_gpu_thread_mode_and_count(
per_gpu_thread_count=flags_obj.per_gpu_thread_count,
gpu_thread_mode=flags_obj.tf_gpu_thread_mode,
num_gpus=flags_obj.num_gpus,
datasets_num_private_threads=flags_obj.datasets_num_private_threads)
common.set_cudnn_batchnorm_mode()
dtype = flags_core.get_tf_dtype(flags_obj)
if dtype == 'fp16':
raise ValueError('dtype fp16 is not supported in Keras. Use the default '
'value(fp32).')
data_format = flags_obj.data_format
if data_format is None:
data_format = ('channels_first' if tf.config.list_physical_devices('GPU')
else 'channels_last')
tf.keras.backend.set_image_data_format(data_format)
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=flags_obj.distribution_strategy,
num_gpus=flags_obj.num_gpus,
all_reduce_alg=flags_obj.all_reduce_alg,
num_packs=flags_obj.num_packs)
if strategy:
# flags_obj.enable_get_next_as_optional controls whether enabling
# get_next_as_optional behavior in DistributedIterator. If true, last
# partial batch can be supported.
strategy.extended.experimental_enable_get_next_as_optional = (
flags_obj.enable_get_next_as_optional
)
strategy_scope = distribution_utils.get_strategy_scope(strategy)
if flags_obj.use_synthetic_data:
synthetic_util.set_up_synthetic_data()
input_fn = common.get_synth_input_fn(
height=cifar_preprocessing.HEIGHT,
width=cifar_preprocessing.WIDTH,
num_channels=cifar_preprocessing.NUM_CHANNELS,
num_classes=cifar_preprocessing.NUM_CLASSES,
dtype=flags_core.get_tf_dtype(flags_obj),
drop_remainder=True)
else:
synthetic_util.undo_set_up_synthetic_data()
input_fn = cifar_preprocessing.input_fn
train_input_dataset = input_fn(
is_training=True,
data_dir=flags_obj.data_dir,
batch_size=flags_obj.batch_size,
parse_record_fn=cifar_preprocessing.parse_record,
datasets_num_private_threads=flags_obj.datasets_num_private_threads,
dtype=dtype,
# Setting drop_remainder to avoid the partial batch logic in normalization
# layer, which triggers tf.where and leads to extra memory copy of input
# sizes between host and GPU.
drop_remainder=(not flags_obj.enable_get_next_as_optional))
eval_input_dataset = None
if not flags_obj.skip_eval:
eval_input_dataset = input_fn(
is_training=False,
data_dir=flags_obj.data_dir,
batch_size=flags_obj.batch_size,
parse_record_fn=cifar_preprocessing.parse_record)
steps_per_epoch = (
cifar_preprocessing.NUM_IMAGES['train'] // flags_obj.batch_size)
lr_schedule = 0.1
if flags_obj.use_tensor_lr:
initial_learning_rate = common.BASE_LEARNING_RATE * flags_obj.batch_size / 128
lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=list(p[1] * steps_per_epoch for p in LR_SCHEDULE),
values=[initial_learning_rate] +
list(p[0] * initial_learning_rate for p in LR_SCHEDULE))
with strategy_scope:
optimizer = common.get_optimizer(lr_schedule)
model = resnet_cifar_model.resnet56(classes=cifar_preprocessing.NUM_CLASSES)
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=(['sparse_categorical_accuracy']
if flags_obj.report_accuracy_metrics else None),
run_eagerly=flags_obj.run_eagerly)
train_epochs = flags_obj.train_epochs
callbacks = common.get_callbacks()
if not flags_obj.use_tensor_lr:
lr_callback = LearningRateBatchScheduler(
schedule=learning_rate_schedule,
batch_size=flags_obj.batch_size,
steps_per_epoch=steps_per_epoch)
callbacks.append(lr_callback)
# if mutliple epochs, ignore the train_steps flag.
if train_epochs <= 1 and flags_obj.train_steps:
steps_per_epoch = min(flags_obj.train_steps, steps_per_epoch)
train_epochs = 1
num_eval_steps = (cifar_preprocessing.NUM_IMAGES['validation'] //
flags_obj.batch_size)
validation_data = eval_input_dataset
if flags_obj.skip_eval:
if flags_obj.set_learning_phase_to_train:
# TODO(haoyuzhang): Understand slowdown of setting learning phase when
# not using distribution strategy.
tf.keras.backend.set_learning_phase(1)
num_eval_steps = None
validation_data = None
if not strategy and flags_obj.explicit_gpu_placement:
# TODO(b/135607227): Add device scope automatically in Keras training loop
# when not using distribition strategy.
no_dist_strat_device = tf.device('/device:GPU:0')
no_dist_strat_device.__enter__()
history = model.fit(train_input_dataset,
epochs=train_epochs,
steps_per_epoch=steps_per_epoch,
callbacks=callbacks,
validation_steps=num_eval_steps,
validation_data=validation_data,
validation_freq=flags_obj.epochs_between_evals,
verbose=2)
eval_output = None
if not flags_obj.skip_eval:
eval_output = model.evaluate(eval_input_dataset,
steps=num_eval_steps,
verbose=2)
if not strategy and flags_obj.explicit_gpu_placement:
no_dist_strat_device.__exit__()
stats = common.build_stats(history, eval_output, callbacks)
return stats
def define_cifar_flags():
common.define_keras_flags(dynamic_loss_scale=False)
flags_core.set_defaults(data_dir='/tmp/cifar10_data/cifar-10-batches-bin',
model_dir='/tmp/cifar10_model',
epochs_between_evals=10,
batch_size=128)
def main(_):
return run(flags.FLAGS)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
define_cifar_flags()
app.run(main)
| tombstone/models | official/benchmark/models/resnet_cifar_main.py | Python | apache-2.0 | 10,371 |
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from vehicles.models import Vehicle_version
from django.conf import settings
#############
# Create your models here.
class Client(models.Model):
BOOL_CHOICES = ((True, 'Activo'), (False, 'Inactivo'))
user_client = models.OneToOneField(settings.AUTH_USER_MODEL)
address = models.TextField(blank=True, null=True)
phone_num = models.CharField(max_length=12, blank=True, null=True)
birthday = models.DateField(blank=True, null=True)
rfc = models.CharField(max_length=13, blank=True, null=True)
curp = models.CharField(max_length=18, blank=True, null=True)
gender = models.CharField(max_length=40, blank=True, null=True)
nationality = models.CharField(max_length=100, blank=True, null=True)
marital_status = models.CharField(max_length=40, blank=True, null=True)
scholarship = models.CharField(max_length=40, blank=True, null=True)
ocuppation = models.CharField(max_length=200, blank=True, null=True)
salary = models.FloatField(blank=True, null=True)
personale_references = models.TextField(blank=True, null=True)
user_status = models.BooleanField(choices = BOOL_CHOICES, default=False)
photo = models.ImageField(upload_to="users", blank=True, null=True)
ide=models.CharField(max_length=50,null=True,blank=True)
correo = models.EmailField(blank=True,null=True)
class Meta:
verbose_name="Client"
verbose_name_plural = "Clients"
def __str__(self):
return 'Cliente {}'.format(self.user_client)
class Garage(models.Model):
user_garage = models.ForeignKey(Client, related_name='garage')
user_vehicle = models.ForeignKey(Vehicle_version, related_name='garage')
monthly_payment = models.IntegerField(blank=True, null=True)
status = models.BooleanField(default=False)
add_date = models.DateField(auto_now=True)
class Meta:
verbose_name = "Garage"
verbose_name_plural = "Garages"
def __str__(self):
return 'Garage numero {}'.format(self.id)
# class Agreement(models.Model):
# BOOL_CHOICES = ((True, 'Activo'), (False, 'Finalizado'))
# reference_agreement = models.CharField(max_length=50)
# zone = models.CharField(max_length=50)
# observations = models.TextField()
# date = models.DateField()
# status = models.BooleanField()
| SurielRuano/backend-autonomo | clients/models.py | Python | mit | 2,282 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import smart_selects.db_fields
class Migration(migrations.Migration):
dependencies = [
('restserver', '0013_auto_20150815_0058'),
]
operations = [
migrations.AddField(
model_name='schoolcoursevalue',
name='city',
field=smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'state', default=2, to='restserver.City', chained_field=b'state'),
preserve_default=False,
),
migrations.AddField(
model_name='schoolcoursevalue',
name='country',
field=models.ForeignKey(default=1, to='restserver.Country'),
preserve_default=False,
),
migrations.AddField(
model_name='schoolcoursevalue',
name='state',
field=smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'country', default=1, to='restserver.State', chained_field=b'country'),
preserve_default=False,
),
migrations.AlterField(
model_name='schoolcoursevalue',
name='school',
field=smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'city', to='restserver.School', chained_field=b'city'),
),
]
| thiagopa/planyourexchange-server | restserver/migrations/0014_auto_20150815_0122.py | Python | agpl-3.0 | 1,346 |
from common import *
from openglider.glider import ParametricGlider
from visual_test_glider import TestGlider
__ALL__ = ['GliderTestCase2D']
class GliderTestCaseParametric(TestCase):
def setUp(self):
self.glider = self.import_glider()
self.glider2d_ = ParametricGlider.fit_glider_3d(self.glider)
self.glider2d = self.import_glider_2d()
#self.glider = self.glider2d.get_glider_3d()
def test_fit(self):
self.assertEqualGlider(self.glider, self.glider2d.get_glider_3d(), precision=1)
def test_show_glider(self):
print(self.glider2d.cell_num)
self.glider2d.cell_num += 15
glider3d = self.glider2d.get_glider_3d()
TestGlider.show_glider(glider3d)
if __name__ == '__main__':
unittest.main() | hiaselhans/OpenGlider | tests/visual_test_parametric_glider.py | Python | gpl-3.0 | 778 |
from __future__ import print_function
from __future__ import absolute_import
import six
import random
import re
import os
import xml.etree.cElementTree
import gzip
if six.PY2:
import httplib
from urllib2 import urlopen, HTTPError, URLError
from StringIO import StringIO
else:
import http.client as httplib
from urllib.request import urlopen, Request # raises ImportError in Python 2
from urllib.error import HTTPError, URLError # raises ImportError in Python 2
from io import BytesIO
from enigma import getDesktop, eTimer
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.ProgressBar import ProgressBar
from Components.Sources.Progress import Progress
from Components.Sources.StaticText import StaticText
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from . crossepglib import *
from . crossepg_locale import _
class CrossEPG_Rytec_Source(object):
def __init__(self):
self.channels_urls = []
self.epg_urls = []
self.description = ""
class CrossEPG_Rytec_Update(Screen):
def __init__(self, session):
if (getDesktop(0).size().width() < 800):
skin = "%s/skins/downloader_sd.xml" % os.path.dirname(sys.modules[__name__].__file__)
self.isHD = 0
else:
skin = "%s/skins/downloader_hd.xml" % os.path.dirname(sys.modules[__name__].__file__)
self.isHD = 1
f = open(skin, "r")
self.skin = f.read()
f.close()
Screen.__init__(self, session)
self.skinName = "downloader"
Screen.setTitle(self, _("CrossEPG"))
self.onChangedEntry = []
self.sources = []
self.session = session
self.mirrors = []
self["background"] = Pixmap()
self["action"] = Label(_("Updating rytec providers..."))
self["summary_action"] = StaticText(_("Updating rytec providers..."))
self["status"] = Label("")
self["progress"] = ProgressBar()
self["progress"].hide()
self["progress_text"] = Progress()
self.config = CrossEPG_Config()
self.config.load()
self.timer = eTimer()
self.timer.callback.append(self.start)
self.onFirstExecBegin.append(self.firstExec)
def firstExec(self):
if self.isHD:
png = resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/crossepg/background_hd.png")
if png == None or not os.path.exists(png):
png = "%s/images/background_hd.png" % os.path.dirname(sys.modules[__name__].__file__)
else:
png = resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/crossepg/background.png")
if png == None or not os.path.exists(png):
png = "%s/images/background.png" % os.path.dirname(sys.modules[__name__].__file__)
self["background"].instance.setPixmapFromFile(png)
self.timer.start(100, 1)
def start(self):
self.loadSourceList()
if self.load():
self.save(self.config.home_directory + "/providers/")
self.session.open(MessageBox, _("%d providers updated") % len(self.sources), type=MessageBox.TYPE_INFO, timeout=5)
else:
self.session.open(MessageBox, _("Cannot retrieve rytec sources"), type=MessageBox.TYPE_ERROR, timeout=10)
self.close()
def loadSourceList(self):
try:
# url = "http://rytecepg.dyndns.tv/epg_data/crossepgsources.gz" # currently not available
# distro = getImageDistro()
# if distro in ("openvix", "openbh"):
url = "http://www.openvix.co.uk/crossepgsources.gz" # so use OpenViX url as holder
print("[crossepg_rytec_update:loadSourceList] downloading source list from %s" % url)
response = urlopen(url)
content_raw = response.read()
CType = response.info().getheader('Content-Type') if six.PY2 else response.getheader("Content-Type")
if 'gzip' in CType:
if six.PY2:
self.mirrors = [x.strip() for x in gzip.GzipFile(fileobj=StringIO(content_raw)).read().strip().split("\n")]
else:
self.mirrors = gzip.GzipFile(fileobj=BytesIO(content_raw), mode='rb').read()
self.mirrors = six.ensure_str(self.mirrors).strip().split("\n")
random.shuffle(self.mirrors)
print("[crossepg_rytec_update:loadSourceList] mirrors2 %s" % self.mirrors)
else:
print("[crossepg_rytec_update:loadSourceList] Fetched data is not Gzip format")
print("[crossepg_rytec_update:loadSourceList] content_raw:", content_raw)
except Exception as e:
print("[crossepg_rytec_update:loadSourceList] error fetching:", e)
def load(self):
ret = False
for mirror in self.mirrors:
mirror = mirror.replace('\t', '')
try:
print("[crossepg_rytec_update:load] downloading from %s" % (mirror))
smirror = mirror.lstrip("http://")
host = smirror.split("/")[0]
path = smirror.lstrip(host)
conn = httplib.HTTPConnection(host)
conn.request("GET", path)
httpres = conn.getresponse()
print("[crossepg_rytec_update:load] host =%s, path=%s, httpres=%s" % (host, path, httpres))
if httpres.status == 200:
f = open("/tmp/crossepg_rytec_tmp", "w")
databytes = httpres.read()
datastr = six.ensure_str(databytes)
f.write(datastr)
f.close()
self.loadFromFile("/tmp/crossepg_rytec_tmp")
os.unlink("/tmp/crossepg_rytec_tmp")
ret = True
else:
print("[crossepg_rytec_update:load] http error: %d (%s)" % (httpres.status, mirror))
except Exception as e:
print("[crossepg_rytec_update:load] exception =%s" % e)
return ret
def getServer(self, description):
for source in self.sources:
if source.description == description:
return source
return None
def loadFromFile(self, filename):
mdom = xml.etree.cElementTree.parse(filename)
root = mdom.getroot()
for node in root:
if node.tag == "source":
source = CrossEPG_Rytec_Source()
source.channels_urls.append(node.get("channels"))
for childnode in node:
if childnode.tag == "description":
source.description = childnode.text
elif childnode.tag == "url":
source.epg_urls.append(childnode.text)
oldsource = self.getServer(source.description)
if oldsource == None:
self.sources.append(source)
else:
if len(source.epg_urls) > 0:
if source.epg_urls[0] not in oldsource.epg_urls:
oldsource.epg_urls.append(source.epg_urls[0])
if len(source.channels_urls) > 0:
if source.channels_urls[0] not in oldsource.channels_urls:
oldsource.channels_urls.append(source.channels_urls[0])
def save(self, destination):
os.system("rm -f " + destination + "/rytec_*.conf")
for source in self.sources:
p = re.compile('[/:()<>|?*\s-]|(\\\)')
filename = p.sub('_', source.description).lower()
if filename[:6] != "rytec_":
filename = "rytec_" + filename
f = open(destination + "/" + filename + ".conf", "w")
f.write("description=" + source.description + "\n")
f.write("protocol=xmltv\n")
count = 0
for url in source.channels_urls:
f.write("channels_url_" + str(count) + "=" + url + "\n")
count += 1
count = 0
for url in source.epg_urls:
f.write("epg_url_" + str(count) + "=" + url + "\n")
count += 1
f.write("preferred_language=eng")
f.close()
| oe-alliance/e2openplugin-CrossEPG | src/enigma2/python/crossepg_rytec_update.py | Python | lgpl-2.1 | 7,001 |
import urllib2
from bs4 import BeautifulSoup
import mysql.connector
from mysql.connector import Error
from ConfigParser import SafeConfigParser
import csv
try:
conn = mysql.connector.connect(host='54.218.87.210', database='play', user='root', password='root')
cursor = conn.cursor(buffered = True)
cursor.execute("CREATE TABLE scrap ( id int(10) auto_increment primary key, name varchar(1000), genre varchar(100), developer varchar(1000), downloads varchar(100), updatedOn varchar(100), contentRating varchar(100), score varchar(100))")
except Error as e:
print(e)
parser = SafeConfigParser()
parser.read('config_scraper.ini')
baseURL = "https://play.google.com"
relat = "/store/apps/details?id=com.playappking.busrush&hl=en"
# response = urllib2.urlopen(url)
# html = response.read()
# soup = BeautifulSoup(html, 'html.parser')
# # readable = soup.prettify()
# file_name = str(soup.title).split('>')[1].split('<')[0] + ".html"
#print file_name
# t = soup.title.string
# print t
#file_name = url.split('/')[-1] + ".html"
# with open(file_name, 'wb') as f:
# f.write(str(soup))
with open('scrap.csv', 'a') as csvfile:
fieldnames = ['name' , 'genre', 'developer', 'downloads', 'updatedOn', 'contentRating', 'score', 'scoreClass']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
def scrap(relativeURL):
try:
url = baseURL + relativeURL
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, 'html.parser')
parent_name = parser.get('scraper_config_name','parent_tag')
child1_name = parser.get('scraper_config_name','child_1_tag')
child2_name = parser.get('scraper_config_name','child_2_tag')
parent_data = parser.get('scraper_config_data','parent_tag')
child1_data = parser.get('scraper_config_data','child_1_tag')
child2_data = parser.get('scraper_config_data','child_2_tag')
parent_genre = parser.get('scraper_config_genre','parent_tag')
child1_genre = parser.get('scraper_config_genre','child_1_tag')
child2_genre = parser.get('scraper_config_genre','child_2_tag')
parent_developer = parser.get('scraper_config_developer','parent_tag')
child1_developer = parser.get('scraper_config_developer','child_1_tag')
child2_developer = parser.get('scraper_config_developer','child_2_tag')
parent_downloads = parser.get('scraper_config_downloads','parent_tag')
child1_downloads = parser.get('scraper_config_downloads','child_1_tag')
child2_downloads = parser.get('scraper_config_downloads','child_2_tag')
parent_updatedOn = parser.get('scraper_config_updatedOn','parent_tag')
child1_updatedOn = parser.get('scraper_config_updatedOn','child_1_tag')
child2_updatedOn = parser.get('scraper_config_updatedOn','child_2_tag')
parent_score = parser.get('scraper_config_score','parent_tag')
child1_score = parser.get('scraper_config_score','child_1_tag')
child2_score = parser.get('scraper_config_score','child_2_tag')
title_name = soup.find(parent_name, attrs = {child1_name : child2_name}).text.strip()
print title_name
# data = soup.find(parent_data, attrs = {child1_data : child2_data}).text.strip()
genre = soup.find(parent_genre, attrs = {child1_genre : child2_genre}).text.strip()
developer = soup.find(parent_developer, attrs = {child1_developer : child2_developer}).text.strip()
downloads = soup.find(parent_downloads, attrs = {child1_downloads : child2_downloads}).text.strip()
updatedOn = soup.find(parent_updatedOn, attrs = {child1_updatedOn : child2_updatedOn}).text.strip()
score = float(soup.find(parent_score, attrs = {child1_score : child2_score}).text.strip())
contentRating = soup.find('div', attrs = {'itemprop' : 'contentRating'}).text.strip()
scoreClass = "1 (0.1 - 1.0)"
if(score < 1.1):
scoreClass = "1 (0.1 - 1.0)"
elif(score > 1.0 and score < 2.1):
scoreClass = "2 (1.1 - 2.0)"
elif(score > 2.0 and score < 3.1):
scoreClass = "3 (2.1 - 3.0)"
elif(score > 3.0 and score < 4.1):
scoreClass = "4 (3.1 - 4.0)"
elif(score > 4.0 and score <= 5.0):
scoreClass = "5 (4.1 - 5.0)"
add_entry = ("INSERT INTO scrap "
"(name , genre, developer, downloads, updatedOn, contentRating, score) "
"VALUES ( %(name)s, %(genre)s, %(developer)s, %(downloads)s, %(updatedOn)s, %(contentRating)s, %(score)s ); ")
args = {
'name': title_name,
'genre': genre,
'developer': developer,
'downloads': downloads,
'updatedOn': updatedOn,
'contentRating': contentRating,
'score': score,
}
cursor.execute(add_entry, args)
conn.commit()
with open('scrap.csv', 'a') as csvfile:
fieldnames = ['name' , 'genre', 'developer', 'downloads', 'updatedOn', 'contentRating', 'score', 'scoreClass']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'name' : title_name , 'genre' : genre , 'developer' : developer , 'downloads' : downloads , 'updatedOn' : updatedOn , 'contentRating' : contentRating, 'score' : score, 'scoreClass' : scoreClass })
except Error as e:
print e
except:
print "dodged exception !"
def main():
cursor = conn.cursor(buffered = True)
query = ("SELECT DISTINCT url FROM crawl limit 35000;")
cursor.execute(query)
for row in cursor:
scrap(row[0])
if __name__== "__main__":
main()
| devmukul44/Play_Crawler | cloud/scraper1.py | Python | mit | 5,554 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('musterapp', '0019_auto_20150626_1535'),
]
operations = [
migrations.AddField(
model_name='pattern',
name='vector_count',
field=models.IntegerField(editable=False, default=0),
),
]
| kleingeist/muster | musterapp/migrations/0020_pattern_vector_count.py | Python | gpl-2.0 | 424 |
"""
pyflot demo app based on Flask
"""
import flot
from flask import Flask, render_template
class Fx(flot.Series):
data = [(1, 3), (2, 2), (3, 5), (4, 4)]
class MyGraph(flot.Graph):
fx = Fx()
app = Flask(__name__)
@app.route("/")
def root():
my_graph = MyGraph()
return render_template('index.html', my_graph=my_graph)
if __name__ == "__main__":
app.run(debug=True)
| andrefsp/pyflot | examples/flask_app/sample/sample.py | Python | mit | 393 |
"""Test adding a validation hook for entity objects.
"""
import pytest
import icat
import icat.config
from conftest import getConfig
@pytest.fixture(scope="module")
def client(setupicat):
client, conf = getConfig(confSection="nbour")
client.login(conf.auth, conf.credentials)
return client
@pytest.fixture(scope="function")
def dataset(client):
"""Create a temporary Dataset for the tests.
"""
inv = client.assertedSearch("Investigation [name='08100122-EF']")[0]
dstype = client.assertedSearch("DatasetType [name='raw']")[0]
dataset = client.new("dataset",
name="test_07_entity_validate", complete=False,
investigation=inv, type=dstype)
dataset.create()
yield dataset
client.delete(dataset)
def validate_param(self):
"""Validate parameter objects.
Check that NUMERIC parameters have numericValue set and do not
have stringValue or dateTimeValue set.
"""
if self.type.valueType == "NUMERIC":
if self.stringValue is not None:
raise ValueError("NUMERIC parameter cannot set stringValue")
if self.dateTimeValue is not None:
raise ValueError("NUMERIC parameter cannot set dateTimeValue")
if self.numericValue is None:
raise ValueError("NUMERIC parameter must set numericValue")
elif self.type.valueType == "STRING":
if self.dateTimeValue is not None:
raise ValueError("STRING parameter cannot set dateTimeValue")
if self.numericValue is not None:
raise ValueError("STRING parameter cannot set numericValue")
if self.stringValue is None:
raise ValueError("STRING parameter must set stringValue")
query = ("PermissibleStringValue.value <-> ParameterType [id=%d]"
% self.type.id)
permissibleValues = self.client.search(query)
if permissibleValues and self.stringValue not in permissibleValues:
raise ValueError("Invalid string value")
elif self.type.valueType == "DATE_AND_TIME":
if self.numericValue is not None:
raise ValueError("DATE_AND_TIME parameter cannot set numericValue")
if self.stringValue is not None:
raise ValueError("DATE_AND_TIME parameter cannot set stringValue")
if self.dateTimeValue is None:
raise ValueError("DATE_AND_TIME parameter must set dateTimeValue")
else:
raise ValueError("Invalid valueType '%s'" % self.type.valueType)
def test_invalid_numeric_with_string_value(client, dataset):
"""Try setting stringValue on a NUMERIC parameter.
"""
client.typemap['parameter'].validate = validate_param
ptype = client.assertedSearch("ParameterType [name='Magnetic field']")[0]
assert ptype.valueType == "NUMERIC"
param = client.new("datasetParameter", dataset=dataset, type=ptype)
param.numericValue = 7
param.stringValue = "seven"
with pytest.raises(ValueError) as err:
param.create()
assert 'NUMERIC parameter cannot set stringValue' in str(err.value)
assert param.id is None
def test_invalid_numeric_missing_value(client, dataset):
"""Try creating a NUMERIC parameter without setting any value.
"""
client.typemap['parameter'].validate = validate_param
ptype = client.assertedSearch("ParameterType [name='Magnetic field']")[0]
assert ptype.valueType == "NUMERIC"
param = client.new("datasetParameter", dataset=dataset, type=ptype)
with pytest.raises(ValueError) as err:
param.create()
assert 'NUMERIC parameter must set numericValue' in str(err.value)
assert param.id is None
def test_valid_numeric_value(client, dataset):
"""Create a valid NUMERIC parameter.
"""
client.typemap['parameter'].validate = validate_param
ptype = client.assertedSearch("ParameterType [name='Magnetic field']")[0]
assert ptype.valueType == "NUMERIC"
param = client.new("datasetParameter", dataset=dataset, type=ptype)
param.numericValue = 7
param.create()
assert param.id is not None
def test_valid_string_permissible_value(client, dataset):
"""Create a simple STRING parameter.
"""
client.typemap['parameter'].validate = validate_param
ptype = client.assertedSearch("ParameterType [name='Comment']")[0]
assert ptype.valueType == "STRING"
param = client.new("datasetParameter", dataset=dataset, type=ptype)
param.stringValue = "Beam me up Scotty!"
param.create()
assert param.id is not None
def test_invalid_string_permissible_value(client, dataset):
"""Try creating a STRING parameter violating permissible values.
"""
client.typemap['parameter'].validate = validate_param
ptype = client.assertedSearch("ParameterType [name='Probe']")[0]
assert ptype.valueType == "STRING"
param = client.new("datasetParameter", dataset=dataset, type=ptype)
param.stringValue = "peanut"
with pytest.raises(ValueError) as err:
param.create()
assert 'Invalid string value' in str(err.value)
assert param.id is None
def test_valid_string_permissible_value(client, dataset):
"""Create a valid STRING parameter, picking a permissible value.
"""
client.typemap['parameter'].validate = validate_param
ptype = client.assertedSearch("ParameterType [name='Probe']")[0]
assert ptype.valueType == "STRING"
param = client.new("datasetParameter", dataset=dataset, type=ptype)
param.stringValue = "photon"
param.create()
assert param.id is not None
| icatproject/python-icat | tests/test_07_entity_validate.py | Python | apache-2.0 | 5,542 |
from typing import Set
from enum import Enum, IntEnum
class EAPTypes(IntEnum):
"""EAP Types accepted by the EAPClient.
See Also:
EAP8021X, EAP.h:51
"""
Invalid = 0
Identity = 1
Notification = 2
Nak = 3
MD5Challenge = 4
OneTimePassword = 5
GenericTokenCard = 6
TLS = 13
CiscoLEAP = 17
EAP_SIM = 18
SRP_SHA1 = 19
TTLS = 21
EAP_AKA = 23
PEAP = 25
MSCHAPv2 = 26
Extensions = 33
EAP_FAST = 43
EAP_AKA_Prime = 50
AcceptEAPTypes = Set[EAPTypes]
class TTLSInnerAuthentication(Enum):
PAP = 'PAP'
CHAP = 'CHAP'
MSCHAP = 'MSCHAP'
MSCHAPv2 = 'MSCHAPv2'
EAP = 'EAP'
| mosen/commandment | commandment/profiles/eap.py | Python | mit | 677 |
# -- coding: utf-8 --
#-------------------------------------------------------------------------------
# Name: astate_edu
# Purpose: Parse Arkansas State University
#
# Author: Ramakrishna
#
# Dated: 07/Mar/2016
# Copyright: (c) Ramakrishna 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
import requests, re, os, csv
from lxml import html
import socks, socket
from collections import OrderedDict
from queue import Queue
from threading import Thread
socks.setdefaultproxy(proxy_type=socks.PROXY_TYPE_SOCKS5, addr="127.0.0.1", port=9150)
socket.socket = socks.socksocket
url = 'http://webapps.astate.edu/directory/student/searchstuddir.php'
headers = {'User-Agent': 'Mozilla/5.0'}
def search(term):
try:
print(term.replace("\n", ""))
s = requests.session()
data = {'SearchField': term.replace("\n", "")}
r = s.post(url, headers=headers, data=data)
tree = html.fromstring(r.content)
table = tree.xpath("//table")
rows = []
for tr in table[0].xpath("//tr"):
rows.append("$$$".join(tr.xpath("*//text()[normalize-space()]")))
records = []
for r in rows:
if 'Name:' in r:
rec = OrderedDict()
rec['name'] = r[r.find('$$$')+3:].strip()
elif 'Email' in r:
rec['email'] = r[r.find("$$$")+3:].strip()
else:
records.append(rec)
file_exists = os.path.isfile('astate_data.csv')
wrote_header = False
with open('astate_data.csv', 'a') as f:
for d in records:
if file_exists == True and wrote_header == False:
w = csv.DictWriter(f, d.keys())
wrote_header = True
elif file_exists == False and wrote_header == False:
w = csv.DictWriter(f, d.keys())
w.writeheader()
wrote_header = True
w.writerow(d)
with open('astate_terms', 'a') as f:
f.write(term)
except Exception as e:
print(e.__doc__)
print(e.args)
return None
class Worker(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
term = self.queue.get()
search(term)
self.queue.task_done()
def main():
try:
terms = set(open('terms').readlines())
if os.path.isfile('astate_terms'):
finished_terms = set(open('astate_terms').readlines())
terms -= finished_terms
terms = list(terms)
queue = Queue()
for x in range(8):
worker = Worker(queue)
worker.daemon = True
worker.start()
terms_count = len(terms)
for i in range(0, terms_count):
queue.put(terms[i])
queue.join()
except Exception as e:
print(e.__doc__)
print(e.args)
if __name__ == '__main__':
main()
| brkrishna/freelance | univs/archives/astate_edu.py | Python | gpl-2.0 | 2,620 |
#!/usr/bin/env python
#coding: utf-8
# The MIT License
#
# Copyright (c) 2009-2011 the bpython authors.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import os
import code
import inspect
import traceback
import pydoc
import keyword
from pygments.token import Token
from bpython.completion import inspection
from bpython.completion.completers import import_completer
from bpython.util import getpreferredencoding, safe_eval, TimeOutException, debug, isolate
from bpython.str_util import get_closure_words
from bpython._py3compat import PythonLexer, PY3
from six import callable
class NothingType: pass
Nothing = NothingType()
command_tokenize = lambda s: get_closure_words(s.strip(' '))
class BPythonInterpreter(code.InteractiveInterpreter):
def __init__(self, locals=None, encoding=None):
"""The syntaxerror callback can be set at any time and will be called
on a caught syntax error. The purpose for this in bpython is so that
the repl can be instantiated after the interpreter (which it
necessarily must be with the current factoring) and then an exception
callback can be added to the Interpeter instance afterwards - more
specifically, this is so that autoindentation does not occur after a
traceback."""
self.command_table = {}
self.encoding = encoding or sys.getdefaultencoding()
self.syntaxerror_callback = None
# Unfortunately code.InteractiveInterpreter is a classic class, so no super()
code.InteractiveInterpreter.__init__(self, locals)
self.locals['__command_table'] = self.command_table
if not PY3:
def runsource(self, source, filename='<input>', symbol='single',
encode=True):
if encode:
source = '# coding: %s\n%s' % (self.encoding,
source.encode(self.encoding))
return code.InteractiveInterpreter.runsource(self, source,
filename, symbol)
def showsyntaxerror(self, filename=None):
"""Override the regular handler, the code's copied and pasted from
code.py, as per showtraceback, but with the syntaxerror callback called
and the text in a pretty colour."""
if self.syntaxerror_callback is not None:
self.syntaxerror_callback()
type, value, sys.last_traceback = sys.exc_info()
sys.last_type = type
sys.last_value = value
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename and right lineno
if not PY3:
lineno -= 1
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
self.writetb(list)
def showtraceback(self):
"""This needs to override the default traceback thing
so it can put it into a pretty colour and maybe other
stuff, I don't know"""
try:
t, v, tb = sys.exc_info()
sys.last_type = t
sys.last_value = v
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
# Set the right lineno (encoding header adds an extra line)
if not PY3:
for i, (filename, lineno, module, something) in enumerate(tblist):
if filename == '<input>':
tblist[i] = (filename, lineno - 1, module, something)
l = traceback.format_list(tblist)
if l:
l.insert(0, "Traceback (most recent call last):\n")
l[len(l):] = traceback.format_exception_only(t, v)
finally:
tblist = tb = None
self.writetb(l)
def writetb(self, lines):
"""This outputs the traceback and should be overridden for anything
fancy."""
for line in lines:
self.write(line)
def register_command(self, name, function):
if name not in self.command_table:
self.command_table[name] = function
return True
else:
return False
def is_commandline(self, line):
try:
if not PY3 and isinstance(line, unicode):
encoding = getpreferredencoding()
words = map(lambda s: s.decode(encoding), command_tokenize(line.encode(encoding)))
else:
words = command_tokenize(line)
except ValueError:
return False
else:
if len(words) > 0:
command_name = words[0]
return command_name in self.command_table
else:
return False
def get_command_spec(self, line):
try:
if not PY3 and isinstance(line, unicode):
encoding = getpreferredencoding()
words = map(lambda s: s.decode(encoding), command_tokenize(line.encode(encoding)))
else:
words = command_tokenize(line)
except ValueError:
pass
else:
if len(words) > 0:
command_name = words[0]
if command_name in self.command_table:
return [command_name, self.command_table[command_name]]
def runcommand(self, line):
try:
if not PY3 and isinstance(line, unicode):
encoding = getpreferredencoding()
words = map(lambda s: s.decode(encoding), command_tokenize(line.encode(encoding)))
else:
words = command_tokenize(line)
except ValueError:
pass
else:
if len(words) > 0:
command_name = words[0]
if command_name in self.command_table:
source = "__command_table['%s'](%s)" % (command_name, ','.join(words[1:]))
self.runsource(source)
def get_object(self, name):
try:
obj = safe_eval(name, self.locals)
except TimeOutException as e:
return e
except Exception:
return Nothing
else:
return obj
def get_raw_object(self, name):
try:
obj = eval(name, self.locals)
except Exception:
return Nothing
else:
return obj
def get_argspec(self, repl, func, arg_number):
line = repl.s
cw = repl.current_word
if func:
spec = self._get_argspec(func, arg_number)
else:
spec = None
if not spec:
if keyword.iskeyword(line):
spec = inspection.KeySpec([line])
elif self.is_commandline(line) and repl.is_only_word:
spec = self.get_command_spec(line)
spec = inspection.CommandSpec(spec)
elif line.startswith('from ') or line.startswith('import '):
obj = import_completer.get_object(cw, line)
if obj:
spec = inspection.ImpSpec([cw, obj])
else:
spec = None
else:
if line:
obj = self.get_object(line)
if obj is not Nothing:
spec = inspection.ObjSpec([line, obj])
elif cw:
obj = self.get_object(cw)
if obj is not Nothing:
spec = inspection.ObjSpec([cw, obj])
else:
spec = None
else:
spec = None
else:
spec = None
if spec is not None:
try:
f = spec[-1]
except (IndexError, TypeError):
spec.docstring = None
else:
if isinstance(spec, inspection.ImpSpec) and isinstance(f, str):
spec.docstring = None
try:
for token, value in PythonLexer().get_tokens(open(f).read()):
if token == Token.Literal.String.Doc:
spec.docstring = value.strip('"""').strip('r"""')
elif token == Token.Keyword and value == "def":
break
elif token == Token.Keyword and value == "class":
break
else:
pass
except Exception:
spec.docstring = None
finally:
spec[-1] = None
else:
try:
spec.docstring = pydoc.getdoc(f)
except IndexError:
spec.docstring = None
return spec
def _get_argspec(self, func, arg_number):
# Get the name of the current function and where we are in
# the arguments
# f = self.get_old_object(func)
f = self.get_raw_object(func)
if f is Nothing:
return None
if callable(f):
if inspect.isclass(f):
try:
if f.__init__ is not object.__init__:
f = f.__init__
except AttributeError:
return None
argspec = inspection.getargspec(func, f)
if argspec:
argspec.append(arg_number)
argspec.append(f)
argspec = inspection.ArgSpec(argspec)
return argspec
else:
nospec = inspection.NoSpec([func, f])
return nospec
else:
argspec = inspection.ObjSpec([func, f])
return argspec
def startup(self):
startup = os.environ.get('PYTHONSTARTUP')
default_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "default"))
default_rc = os.path.join(default_dir, "rc.py")
config_dir = os.path.expanduser('~/.bpython')
rc = os.path.join(config_dir, 'rc.py')
if PY3:
self.runsource("import sys; sys.path.append('%s')" % default_dir, default_dir, 'exec')
else:
self.runsource("import sys; sys.path.append('%s')" % default_dir, default_dir, 'exec', encode=False)
for filename in [startup, default_rc]:
if filename and os.path.isfile(filename):
with open(filename, 'r') as f:
if PY3:
self.runsource(f.read(), filename, 'exec')
else:
self.runsource(f.read(), filename, 'exec', encode=False)
if PY3:
self.runsource("sys.path.pop(); sys.path.append('%s'); del sys" % config_dir, config_dir, 'exec')
else:
self.runsource("sys.path.pop(); sys.path.append('%s'); del sys" % config_dir, config_dir, 'exec',
encode=False)
for filename in [rc]:
if filename and os.path.isfile(filename):
with open(filename, 'r') as f:
if PY3:
self.runsource(f.read(), filename, 'exec')
else:
self.runsource(f.read(), filename, 'exec', encode=False)
| hirochachacha/apython | bpython/interpreter.py | Python | mit | 12,791 |
# coding=utf-8
"""This is the main package for the application.
:copyright: (c) 2013 by Tim Sutton
:license: GPLv3, see LICENSE for more details.
"""
import os
import logging
from flask import Flask
from flask_mail import Mail
from flask.ext.appconfig import AppConfig
from users.database import db, migrate
APP = Flask(__name__)
# Load configuration from any possible means.
AppConfig(APP, default_settings="users.default_config")
def add_handler_once(logger, handler):
"""A helper to add a handler to a logger, ensuring there are no duplicates.
:param logger: The logger instance.
:type logger: logging.logger
:param handler: Hander instance to be added. It will not be
added if an instance of that Handler subclass already exists.
:type handler: logging.Handler
:returns: True if the logging handler was added
:rtype bool: bool
"""
class_name = handler.__class__.__name__
for logger_handler in logger.handlers:
if logger_handler.__class__.__name__ == class_name:
return False
logger.addHandler(handler)
return True
def setup_logger():
"""Set up our logger with sentry support.
"""
logger = logging.getLogger('user_map')
logger.setLevel(logging.DEBUG)
handler_level = logging.DEBUG
# create formatter that will be added to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log_dir = APP.config['LOG_DIR']
# so e.g. jenkins can override log dir.
if 'USER_MAP_LOGFILE' in os.environ:
file_name = os.environ['USER_MAP_LOGFILE']
else:
file_name = os.path.join(log_dir, 'user-map.log')
file_handler = logging.FileHandler(file_name)
file_handler.setLevel(handler_level)
# create console handler with a higher log level
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.ERROR)
#Set formatters
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
# add the handlers to the logger
add_handler_once(logger, file_handler)
add_handler_once(logger, console_handler)
setup_logger()
LOGGER = logging.getLogger('user_map')
# Mailer
mail = Mail(APP)
# backward-compat
APP.config['DATABASE'] = APP.config['SQLITE_DB_PATH']
db.init_app(APP)
migration_dir = os.path.join(os.path.dirname(__file__), "migrations")
migrate.init_app(APP, db, directory=migration_dir)
# Don't import actual view methods themselves - see:
# http://flask.pocoo.org/docs/patterns/packages/#larger-applications
# Also views must be imported AFTER app is created above.
# noinspection PyUnresolvedReferences
import users.views # noqa
| ariestiyansyah/atlas | users/__init__.py | Python | gpl-2.0 | 2,699 |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_proxy_arp
short_description: Configure proxy-ARP in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and proxy_arp category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_proxy_arp:
description:
- Configure proxy-ARP.
default: null
type: dict
suboptions:
end_ip:
description:
- End IP of IP range to be proxied.
type: str
id:
description:
- Unique integer ID of the entry.
required: true
type: int
interface:
description:
- Interface acting proxy-ARP. Source system.interface.name.
type: str
ip:
description:
- IP address or start IP to be proxied.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure proxy-ARP.
fortios_system_proxy_arp:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_proxy_arp:
end_ip: "<your_own_value>"
id: "4"
interface: "<your_own_value> (source system.interface.name)"
ip: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_proxy_arp_data(json):
option_list = ['end_ip', 'id', 'interface',
'ip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_proxy_arp(data, fos):
vdom = data['vdom']
state = data['state']
system_proxy_arp_data = data['system_proxy_arp']
filtered_data = underscore_to_hyphen(filter_system_proxy_arp_data(system_proxy_arp_data))
if state == "present":
return fos.set('system',
'proxy-arp',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system',
'proxy-arp',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_proxy_arp']:
resp = system_proxy_arp(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_proxy_arp": {
"required": False, "type": "dict", "default": None,
"options": {
"end_ip": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"interface": {"required": False, "type": "str"},
"ip": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| roadmapper/ansible | lib/ansible/modules/network/fortios/fortios_system_proxy_arp.py | Python | gpl-3.0 | 9,679 |
import time
from nxdrive.tests.common import OS_STAT_MTIME_RESOLUTION
from nxdrive.tests.common_unit_test import UnitTestCase
class TestReinitDatabase(UnitTestCase):
def setUp(self):
super(TestReinitDatabase, self).setUp()
self.local = self.local_client_1
self.remote = self.remote_document_client_1
# Make a folder and a file
self.test_remote_folder_id = self.remote.make_folder('/', 'Test folder')
self.remote.make_file('/Test folder', 'Test.txt', 'This is some content')
# Start engine and wait for synchronization
self.engine_1.start()
self.wait_sync(wait_for_async=True)
# Verify that everything is synchronized
self.assertTrue(self.local.exists('/Test folder'), 'Local folder should exist')
self.assertTrue(self.local.exists('/Test folder/Test.txt'), 'Local file should exist')
# Destroy database
self._reinit_database()
def _check_states(self):
rows = self.engine_1.get_dao().get_states_from_partial_local('/')
for row in rows:
self.assertEqual(row.pair_state, 'synchronized')
def _reinit_database(self):
# Unbind engine
self.manager_1.unbind_engine(self.engine_1.get_uid())
# Re-bind engine
self.engine_1 = self.manager_1.bind_server(self.local_nxdrive_folder_1, self.nuxeo_url, self.user_1,
self.password_1, start_engine=False)
self.engine_1.syncCompleted.connect(self.app.sync_completed)
self.engine_1.get_remote_watcher().remoteScanFinished.connect(self.app.remote_scan_completed)
self.engine_1.get_remote_watcher().changesFound.connect(self.app.remote_changes_found)
self.engine_1.get_remote_watcher().noChangesFound.connect(self.app.no_remote_changes_found)
def _check_conflict_automatic_resolution(self):
self.assertEqual(len(self.engine_1.get_dao().get_conflicts()), 0)
def _check_conflict_detection(self):
self.assertEqual(len(self.engine_1.get_dao().get_conflicts()), 1)
def test_synchronize_folderish_and_same_digest(self):
# Start engine and wait for synchronization
self.engine_1.start()
self.wait_remote_scan()
# Check everything is synchronized
self._check_states()
def test_synchronize_remote_change(self):
# Modify the remote file
self.remote.update_content('/Test folder/Test.txt', 'Content has changed')
# Start engine and wait for synchronization
self.engine_1.start()
self.wait_sync(wait_for_async=True, timeout=5, fail_if_timeout=False)
# Check that a conflict is detected
self._check_conflict_detection()
file_state = self.engine_1.get_dao().get_state_from_local('/' + self.workspace_title + '/Test folder/Test.txt')
self.assertIsNotNone(file_state)
self.assertEqual(file_state.pair_state, 'conflicted')
# Assert content of the local file has not changed
self.assertEqual(self.local.get_content('/Test folder/Test.txt'),
'This is some content',
'Local content should not have changed')
def test_synchronize_local_change(self):
# Modify the local file
time.sleep(OS_STAT_MTIME_RESOLUTION)
self.local.update_content('/Test folder/Test.txt', 'Content has changed')
# Start engine and wait for synchronization
self.engine_1.start()
self.wait_sync(timeout=5, fail_if_timeout=False)
# Check that a conflict is detected
self._check_conflict_detection()
file_state = self.engine_1.get_dao().get_state_from_local('/' + self.workspace_title + '/Test folder/Test.txt')
self.assertIsNotNone(file_state)
self.assertEqual(file_state.pair_state, 'conflicted')
# Assert content of the remote file has not changed
self.assertEqual(self.remote.get_content('/Test folder/Test.txt'),
'This is some content',
'Remote content should not have changed')
def test_synchronize_remote_and_local_change(self):
# Modify the remote file
self.remote.update_content('/Test folder/Test.txt',
'Content has remotely changed')
# Modify the local file
time.sleep(OS_STAT_MTIME_RESOLUTION)
self.local.update_content('/Test folder/Test.txt', 'Content has locally changed')
# Start engine and wait for synchronization
self.engine_1.start()
self.wait_sync(wait_for_async=True, timeout=5, fail_if_timeout=False)
# Check that a conflict is detected
self._check_conflict_detection()
file_state = self.engine_1.get_dao().get_state_from_local('/' + self.workspace_title + '/Test folder/Test.txt')
self.assertIsNotNone(file_state)
self.assertEqual(file_state.pair_state, 'conflicted')
# Assert content of the local and remote files has not changed
self.assertEqual(self.local.get_content('/Test folder/Test.txt'),
'Content has locally changed',
'Local content should not have changed')
self.assertEqual(self.remote.get_content('/Test folder/Test.txt'),
'Content has remotely changed',
'Remote content should not have changed')
| arameshkumar/nuxeo-drive | nuxeo-drive-client/nxdrive/tests/test_reinit_database.py | Python | lgpl-2.1 | 5,437 |
from cardboard import types
from cardboard.ability import (
AbilityNotImplemented, spell, activated, triggered, static
)
from cardboard.cards import card, common, keywords, match
@card("Crovax the Cursed")
def crovax_the_cursed(card, abilities):
def crovax_the_cursed():
return AbilityNotImplemented
def crovax_the_cursed():
return AbilityNotImplemented
def crovax_the_cursed():
return AbilityNotImplemented
return crovax_the_cursed, crovax_the_cursed, crovax_the_cursed,
@card("Intruder Alarm")
def intruder_alarm(card, abilities):
def intruder_alarm():
return AbilityNotImplemented
def intruder_alarm():
return AbilityNotImplemented
return intruder_alarm, intruder_alarm,
@card("Cannibalize")
def cannibalize(card, abilities):
def cannibalize():
return AbilityNotImplemented
return cannibalize,
@card("Spike Worker")
def spike_worker(card, abilities):
def spike_worker():
return AbilityNotImplemented
def spike_worker():
return AbilityNotImplemented
return spike_worker, spike_worker,
@card("Contemplation")
def contemplation(card, abilities):
def contemplation():
return AbilityNotImplemented
return contemplation,
@card("Megrim")
def megrim(card, abilities):
def megrim():
return AbilityNotImplemented
return megrim,
@card("Shifting Wall")
def shifting_wall(card, abilities):
def shifting_wall():
return AbilityNotImplemented
def shifting_wall():
return AbilityNotImplemented
return shifting_wall, shifting_wall,
@card("Dauthi Trapper")
def dauthi_trapper(card, abilities):
def dauthi_trapper():
return AbilityNotImplemented
return dauthi_trapper,
@card("Rabid Rats")
def rabid_rats(card, abilities):
def rabid_rats():
return AbilityNotImplemented
return rabid_rats,
@card("Morgue Thrull")
def morgue_thrull(card, abilities):
def morgue_thrull():
return AbilityNotImplemented
return morgue_thrull,
@card("Shard Phoenix")
def shard_phoenix(card, abilities):
def shard_phoenix():
return AbilityNotImplemented
def shard_phoenix():
return AbilityNotImplemented
def shard_phoenix():
return AbilityNotImplemented
return shard_phoenix, shard_phoenix, shard_phoenix,
@card("Skyshroud Archer")
def skyshroud_archer(card, abilities):
def skyshroud_archer():
return AbilityNotImplemented
return skyshroud_archer,
@card("Mask of the Mimic")
def mask_of_the_mimic(card, abilities):
def mask_of_the_mimic():
return AbilityNotImplemented
def mask_of_the_mimic():
return AbilityNotImplemented
return mask_of_the_mimic, mask_of_the_mimic,
@card("Provoke")
def provoke(card, abilities):
def provoke():
return AbilityNotImplemented
def provoke():
return AbilityNotImplemented
return provoke, provoke,
@card("Duct Crawler")
def duct_crawler(card, abilities):
def duct_crawler():
return AbilityNotImplemented
return duct_crawler,
@card("Nomads en-Kor")
def nomads_enkor(card, abilities):
def nomads_enkor():
return AbilityNotImplemented
return nomads_enkor,
@card("Change of Heart")
def change_of_heart(card, abilities):
def change_of_heart():
return AbilityNotImplemented
def change_of_heart():
return AbilityNotImplemented
return change_of_heart, change_of_heart,
@card("Overgrowth")
def overgrowth(card, abilities):
def overgrowth():
return AbilityNotImplemented
def overgrowth():
return AbilityNotImplemented
return overgrowth, overgrowth,
@card("Pursuit of Knowledge")
def pursuit_of_knowledge(card, abilities):
def pursuit_of_knowledge():
return AbilityNotImplemented
def pursuit_of_knowledge():
return AbilityNotImplemented
return pursuit_of_knowledge, pursuit_of_knowledge,
@card("Sift")
def sift(card, abilities):
def sift():
return AbilityNotImplemented
return sift,
@card("Portcullis")
def portcullis(card, abilities):
def portcullis():
return AbilityNotImplemented
return portcullis,
@card("Dream Halls")
def dream_halls(card, abilities):
def dream_halls():
return AbilityNotImplemented
return dream_halls,
@card("Ruination")
def ruination(card, abilities):
def ruination():
return AbilityNotImplemented
return ruination,
@card("Horn of Greed")
def horn_of_greed(card, abilities):
def horn_of_greed():
return AbilityNotImplemented
return horn_of_greed,
@card("Hermit Druid")
def hermit_druid(card, abilities):
def hermit_druid():
return AbilityNotImplemented
return hermit_druid,
@card("Spined Sliver")
def spined_sliver(card, abilities):
def spined_sliver():
return AbilityNotImplemented
return spined_sliver,
@card("Dream Prowler")
def dream_prowler(card, abilities):
def dream_prowler():
return AbilityNotImplemented
return dream_prowler,
@card("Spike Soldier")
def spike_soldier(card, abilities):
def spike_soldier():
return AbilityNotImplemented
def spike_soldier():
return AbilityNotImplemented
def spike_soldier():
return AbilityNotImplemented
return spike_soldier, spike_soldier, spike_soldier,
@card("Tortured Existence")
def tortured_existence(card, abilities):
def tortured_existence():
return AbilityNotImplemented
return tortured_existence,
@card("Mana Leak")
def mana_leak(card, abilities):
def mana_leak():
return AbilityNotImplemented
return mana_leak,
@card("Mob Justice")
def mob_justice(card, abilities):
def mob_justice():
return AbilityNotImplemented
return mob_justice,
@card("Primal Rage")
def primal_rage(card, abilities):
def primal_rage():
return AbilityNotImplemented
return primal_rage,
@card("Constant Mists")
def constant_mists(card, abilities):
def constant_mists():
return AbilityNotImplemented
def constant_mists():
return AbilityNotImplemented
return constant_mists, constant_mists,
@card("Crystalline Sliver")
def crystalline_sliver(card, abilities):
def crystalline_sliver():
return AbilityNotImplemented
return crystalline_sliver,
@card("Conviction")
def conviction(card, abilities):
def conviction():
return AbilityNotImplemented
def conviction():
return AbilityNotImplemented
def conviction():
return AbilityNotImplemented
return conviction, conviction, conviction,
@card("Death Stroke")
def death_stroke(card, abilities):
def death_stroke():
return AbilityNotImplemented
return death_stroke,
@card("Mindwarper")
def mindwarper(card, abilities):
def mindwarper():
return AbilityNotImplemented
def mindwarper():
return AbilityNotImplemented
return mindwarper, mindwarper,
@card("Silver Wyvern")
def silver_wyvern(card, abilities):
def silver_wyvern():
return AbilityNotImplemented
def silver_wyvern():
return AbilityNotImplemented
return silver_wyvern, silver_wyvern,
@card("Mind Peel")
def mind_peel(card, abilities):
def mind_peel():
return AbilityNotImplemented
def mind_peel():
return AbilityNotImplemented
return mind_peel, mind_peel,
@card("Scapegoat")
def scapegoat(card, abilities):
def scapegoat():
return AbilityNotImplemented
def scapegoat():
return AbilityNotImplemented
return scapegoat, scapegoat,
@card("Mind Games")
def mind_games(card, abilities):
def mind_games():
return AbilityNotImplemented
def mind_games():
return AbilityNotImplemented
return mind_games, mind_games,
@card("Flame Wave")
def flame_wave(card, abilities):
def flame_wave():
return AbilityNotImplemented
return flame_wave,
@card("Dungeon Shade")
def dungeon_shade(card, abilities):
def dungeon_shade():
return AbilityNotImplemented
def dungeon_shade():
return AbilityNotImplemented
return dungeon_shade, dungeon_shade,
@card("Convulsing Licid")
def convulsing_licid(card, abilities):
def convulsing_licid():
return AbilityNotImplemented
def convulsing_licid():
return AbilityNotImplemented
return convulsing_licid, convulsing_licid,
@card("Hesitation")
def hesitation(card, abilities):
def hesitation():
return AbilityNotImplemented
return hesitation,
@card("Lab Rats")
def lab_rats(card, abilities):
def lab_rats():
return AbilityNotImplemented
def lab_rats():
return AbilityNotImplemented
return lab_rats, lab_rats,
@card("Samite Blessing")
def samite_blessing(card, abilities):
def samite_blessing():
return AbilityNotImplemented
def samite_blessing():
return AbilityNotImplemented
return samite_blessing, samite_blessing,
@card("Lancers en-Kor")
def lancers_enkor(card, abilities):
def lancers_enkor():
return AbilityNotImplemented
def lancers_enkor():
return AbilityNotImplemented
return lancers_enkor, lancers_enkor,
@card("Heat of Battle")
def heat_of_battle(card, abilities):
def heat_of_battle():
return AbilityNotImplemented
return heat_of_battle,
@card("Contempt")
def contempt(card, abilities):
def contempt():
return AbilityNotImplemented
def contempt():
return AbilityNotImplemented
return contempt, contempt,
@card("Verdant Touch")
def verdant_touch(card, abilities):
def verdant_touch():
return AbilityNotImplemented
def verdant_touch():
return AbilityNotImplemented
return verdant_touch, verdant_touch,
@card("Torment")
def torment(card, abilities):
def torment():
return AbilityNotImplemented
def torment():
return AbilityNotImplemented
return torment, torment,
@card("Lowland Basilisk")
def lowland_basilisk(card, abilities):
def lowland_basilisk():
return AbilityNotImplemented
return lowland_basilisk,
@card("Skeleton Scavengers")
def skeleton_scavengers(card, abilities):
def skeleton_scavengers():
return AbilityNotImplemented
def skeleton_scavengers():
return AbilityNotImplemented
return skeleton_scavengers, skeleton_scavengers,
@card("Ransack")
def ransack(card, abilities):
def ransack():
return AbilityNotImplemented
return ransack,
@card("Mox Diamond")
def mox_diamond(card, abilities):
def mox_diamond():
return AbilityNotImplemented
def mox_diamond():
return AbilityNotImplemented
return mox_diamond, mox_diamond,
@card("Elven Rite")
def elven_rite(card, abilities):
def elven_rite():
return AbilityNotImplemented
return elven_rite,
@card("Hammerhead Shark")
def hammerhead_shark(card, abilities):
def hammerhead_shark():
return AbilityNotImplemented
return hammerhead_shark,
@card("Mortuary")
def mortuary(card, abilities):
def mortuary():
return AbilityNotImplemented
return mortuary,
@card("Jinxed Ring")
def jinxed_ring(card, abilities):
def jinxed_ring():
return AbilityNotImplemented
def jinxed_ring():
return AbilityNotImplemented
return jinxed_ring, jinxed_ring,
@card("Carnassid")
def carnassid(card, abilities):
def carnassid():
return AbilityNotImplemented
def carnassid():
return AbilityNotImplemented
return carnassid, carnassid,
@card("Heartstone")
def heartstone(card, abilities):
def heartstone():
return AbilityNotImplemented
return heartstone,
@card("Rebound")
def rebound(card, abilities):
def rebound():
return AbilityNotImplemented
return rebound,
@card("Mulch")
def mulch(card, abilities):
def mulch():
return AbilityNotImplemented
return mulch,
@card("Skyshroud Falcon")
def skyshroud_falcon(card, abilities):
def skyshroud_falcon():
return AbilityNotImplemented
return skyshroud_falcon,
@card("Rolling Stones")
def rolling_stones(card, abilities):
def rolling_stones():
return AbilityNotImplemented
return rolling_stones,
@card("Spindrift Drake")
def spindrift_drake(card, abilities):
def spindrift_drake():
return AbilityNotImplemented
def spindrift_drake():
return AbilityNotImplemented
return spindrift_drake, spindrift_drake,
@card("Hornet Cannon")
def hornet_cannon(card, abilities):
def hornet_cannon():
return AbilityNotImplemented
return hornet_cannon,
@card("Mogg Bombers")
def mogg_bombers(card, abilities):
def mogg_bombers():
return AbilityNotImplemented
return mogg_bombers,
@card("Smite")
def smite(card, abilities):
def smite():
return AbilityNotImplemented
return smite,
@card("Victual Sliver")
def victual_sliver(card, abilities):
def victual_sliver():
return AbilityNotImplemented
return victual_sliver,
@card("Volrath's Laboratory")
def volraths_laboratory(card, abilities):
def volraths_laboratory():
return AbilityNotImplemented
def volraths_laboratory():
return AbilityNotImplemented
return volraths_laboratory, volraths_laboratory,
@card("Spike Feeder")
def spike_feeder(card, abilities):
def spike_feeder():
return AbilityNotImplemented
def spike_feeder():
return AbilityNotImplemented
def spike_feeder():
return AbilityNotImplemented
return spike_feeder, spike_feeder, spike_feeder,
@card("Wall of Tears")
def wall_of_tears(card, abilities):
def wall_of_tears():
return AbilityNotImplemented
def wall_of_tears():
return AbilityNotImplemented
return wall_of_tears, wall_of_tears,
@card("Evacuation")
def evacuation(card, abilities):
def evacuation():
return AbilityNotImplemented
return evacuation,
@card("Bullwhip")
def bullwhip(card, abilities):
def bullwhip():
return AbilityNotImplemented
return bullwhip,
@card("Mogg Flunkies")
def mogg_flunkies(card, abilities):
def mogg_flunkies():
return AbilityNotImplemented
return mogg_flunkies,
@card("Ensnaring Bridge")
def ensnaring_bridge(card, abilities):
def ensnaring_bridge():
return AbilityNotImplemented
return ensnaring_bridge,
@card("Skyshroud Troopers")
def skyshroud_troopers(card, abilities):
def skyshroud_troopers():
return AbilityNotImplemented
return skyshroud_troopers,
@card("Sword of the Chosen")
def sword_of_the_chosen(card, abilities):
def sword_of_the_chosen():
return AbilityNotImplemented
return sword_of_the_chosen,
@card("Spike Breeder")
def spike_breeder(card, abilities):
def spike_breeder():
return AbilityNotImplemented
def spike_breeder():
return AbilityNotImplemented
def spike_breeder():
return AbilityNotImplemented
return spike_breeder, spike_breeder, spike_breeder,
@card("Reins of Power")
def reins_of_power(card, abilities):
def reins_of_power():
return AbilityNotImplemented
return reins_of_power,
@card("Warrior en-Kor")
def warrior_enkor(card, abilities):
def warrior_enkor():
return AbilityNotImplemented
return warrior_enkor,
@card("Mogg Infestation")
def mogg_infestation(card, abilities):
def mogg_infestation():
return AbilityNotImplemented
return mogg_infestation,
@card("Youthful Knight")
def youthful_knight(card, abilities):
def youthful_knight():
return AbilityNotImplemented
return youthful_knight,
@card("Fanning the Flames")
def fanning_the_flames(card, abilities):
def fanning_the_flames():
return AbilityNotImplemented
def fanning_the_flames():
return AbilityNotImplemented
return fanning_the_flames, fanning_the_flames,
@card("Foul Imp")
def foul_imp(card, abilities):
def foul_imp():
return AbilityNotImplemented
def foul_imp():
return AbilityNotImplemented
return foul_imp, foul_imp,
@card("Hibernation Sliver")
def hibernation_sliver(card, abilities):
def hibernation_sliver():
return AbilityNotImplemented
return hibernation_sliver,
@card("Tidal Warrior")
def tidal_warrior(card, abilities):
def tidal_warrior():
return AbilityNotImplemented
return tidal_warrior,
@card("Spitting Hydra")
def spitting_hydra(card, abilities):
def spitting_hydra():
return AbilityNotImplemented
def spitting_hydra():
return AbilityNotImplemented
return spitting_hydra, spitting_hydra,
@card("Corrupting Licid")
def corrupting_licid(card, abilities):
def corrupting_licid():
return AbilityNotImplemented
def corrupting_licid():
return AbilityNotImplemented
return corrupting_licid, corrupting_licid,
@card("Volrath's Gardens")
def volraths_gardens(card, abilities):
def volraths_gardens():
return AbilityNotImplemented
return volraths_gardens,
@card("Volrath's Shapeshifter")
def volraths_shapeshifter(card, abilities):
def volraths_shapeshifter():
return AbilityNotImplemented
def volraths_shapeshifter():
return AbilityNotImplemented
return volraths_shapeshifter, volraths_shapeshifter,
@card("Mogg Maniac")
def mogg_maniac(card, abilities):
def mogg_maniac():
return AbilityNotImplemented
return mogg_maniac,
@card("Hidden Retreat")
def hidden_retreat(card, abilities):
def hidden_retreat():
return AbilityNotImplemented
return hidden_retreat,
@card("Stronghold Assassin")
def stronghold_assassin(card, abilities):
def stronghold_assassin():
return AbilityNotImplemented
return stronghold_assassin,
@card("Tempting Licid")
def tempting_licid(card, abilities):
def tempting_licid():
return AbilityNotImplemented
def tempting_licid():
return AbilityNotImplemented
return tempting_licid, tempting_licid,
@card("Wall of Blossoms")
def wall_of_blossoms(card, abilities):
def wall_of_blossoms():
return AbilityNotImplemented
def wall_of_blossoms():
return AbilityNotImplemented
return wall_of_blossoms, wall_of_blossoms,
@card("Awakening")
def awakening(card, abilities):
def awakening():
return AbilityNotImplemented
return awakening,
@card("Leap")
def leap(card, abilities):
def leap():
return AbilityNotImplemented
def leap():
return AbilityNotImplemented
return leap, leap,
@card("Revenant")
def revenant(card, abilities):
def revenant():
return AbilityNotImplemented
def revenant():
return AbilityNotImplemented
return revenant, revenant,
@card("Soltari Champion")
def soltari_champion(card, abilities):
def soltari_champion():
return AbilityNotImplemented
def soltari_champion():
return AbilityNotImplemented
return soltari_champion, soltari_champion,
@card("Honor Guard")
def honor_guard(card, abilities):
def honor_guard():
return AbilityNotImplemented
return honor_guard,
@card("Wall of Essence")
def wall_of_essence(card, abilities):
def wall_of_essence():
return AbilityNotImplemented
def wall_of_essence():
return AbilityNotImplemented
return wall_of_essence, wall_of_essence,
@card("Flowstone Mauler")
def flowstone_mauler(card, abilities):
def flowstone_mauler():
return AbilityNotImplemented
def flowstone_mauler():
return AbilityNotImplemented
return flowstone_mauler, flowstone_mauler,
@card("Shaman en-Kor")
def shaman_enkor(card, abilities):
def shaman_enkor():
return AbilityNotImplemented
def shaman_enkor():
return AbilityNotImplemented
return shaman_enkor, shaman_enkor,
@card("Calming Licid")
def calming_licid(card, abilities):
def calming_licid():
return AbilityNotImplemented
def calming_licid():
return AbilityNotImplemented
return calming_licid, calming_licid,
@card("Flowstone Hellion")
def flowstone_hellion(card, abilities):
def flowstone_hellion():
return AbilityNotImplemented
def flowstone_hellion():
return AbilityNotImplemented
return flowstone_hellion, flowstone_hellion,
@card("Wall of Souls")
def wall_of_souls(card, abilities):
def wall_of_souls():
return AbilityNotImplemented
def wall_of_souls():
return AbilityNotImplemented
return wall_of_souls, wall_of_souls,
@card("Flowstone Blade")
def flowstone_blade(card, abilities):
def flowstone_blade():
return AbilityNotImplemented
def flowstone_blade():
return AbilityNotImplemented
return flowstone_blade, flowstone_blade,
@card("Flowstone Shambler")
def flowstone_shambler(card, abilities):
def flowstone_shambler():
return AbilityNotImplemented
return flowstone_shambler,
@card("Bandage")
def bandage(card, abilities):
def bandage():
return AbilityNotImplemented
def bandage():
return AbilityNotImplemented
return bandage, bandage,
@card("Amok")
def amok(card, abilities):
def amok():
return AbilityNotImplemented
return amok,
@card("Spirit en-Kor")
def spirit_enkor(card, abilities):
def spirit_enkor():
return AbilityNotImplemented
def spirit_enkor():
return AbilityNotImplemented
return spirit_enkor, spirit_enkor,
@card("Sliver Queen")
def sliver_queen(card, abilities):
def sliver_queen():
return AbilityNotImplemented
return sliver_queen,
@card("Gliding Licid")
def gliding_licid(card, abilities):
def gliding_licid():
return AbilityNotImplemented
def gliding_licid():
return AbilityNotImplemented
return gliding_licid, gliding_licid,
@card("Stronghold Taskmaster")
def stronghold_taskmaster(card, abilities):
def stronghold_taskmaster():
return AbilityNotImplemented
return stronghold_taskmaster,
@card("Brush with Death")
def brush_with_death(card, abilities):
def brush_with_death():
return AbilityNotImplemented
def brush_with_death():
return AbilityNotImplemented
return brush_with_death, brush_with_death,
@card("Grave Pact")
def grave_pact(card, abilities):
def grave_pact():
return AbilityNotImplemented
return grave_pact,
@card("Wall of Razors")
def wall_of_razors(card, abilities):
def wall_of_razors():
return AbilityNotImplemented
def wall_of_razors():
return AbilityNotImplemented
return wall_of_razors, wall_of_razors,
@card("Temper")
def temper(card, abilities):
def temper():
return AbilityNotImplemented
return temper,
@card("Walking Dream")
def walking_dream(card, abilities):
def walking_dream():
return AbilityNotImplemented
def walking_dream():
return AbilityNotImplemented
return walking_dream, walking_dream,
@card("Invasion Plans")
def invasion_plans(card, abilities):
def invasion_plans():
return AbilityNotImplemented
def invasion_plans():
return AbilityNotImplemented
return invasion_plans, invasion_plans,
@card("Fling")
def fling(card, abilities):
def fling():
return AbilityNotImplemented
def fling():
return AbilityNotImplemented
return fling, fling,
@card("Sacred Ground")
def sacred_ground(card, abilities):
def sacred_ground():
return AbilityNotImplemented
return sacred_ground,
@card("Volrath's Stronghold")
def volraths_stronghold(card, abilities):
def volraths_stronghold():
return AbilityNotImplemented
def volraths_stronghold():
return AbilityNotImplemented
return volraths_stronghold, volraths_stronghold,
@card("Spike Colony")
def spike_colony(card, abilities):
def spike_colony():
return AbilityNotImplemented
def spike_colony():
return AbilityNotImplemented
return spike_colony, spike_colony,
@card("Thalakos Deceiver")
def thalakos_deceiver(card, abilities):
def thalakos_deceiver():
return AbilityNotImplemented
def thalakos_deceiver():
return AbilityNotImplemented
return thalakos_deceiver, thalakos_deceiver,
@card("Warrior Angel")
def warrior_angel(card, abilities):
def warrior_angel():
return AbilityNotImplemented
def warrior_angel():
return AbilityNotImplemented
return warrior_angel, warrior_angel,
@card("Furnace Spirit")
def furnace_spirit(card, abilities):
def furnace_spirit():
return AbilityNotImplemented
def furnace_spirit():
return AbilityNotImplemented
return furnace_spirit, furnace_spirit,
@card("Burgeoning")
def burgeoning(card, abilities):
def burgeoning():
return AbilityNotImplemented
return burgeoning,
@card("Bottomless Pit")
def bottomless_pit(card, abilities):
def bottomless_pit():
return AbilityNotImplemented
return bottomless_pit,
@card("Endangered Armodon")
def endangered_armodon(card, abilities):
def endangered_armodon():
return AbilityNotImplemented
return endangered_armodon,
@card("Acidic Sliver")
def acidic_sliver(card, abilities):
def acidic_sliver():
return AbilityNotImplemented
return acidic_sliver,
@card("Crossbow Ambush")
def crossbow_ambush(card, abilities):
def crossbow_ambush():
return AbilityNotImplemented
return crossbow_ambush,
@card("Shock")
def shock(card, abilities):
def shock():
return AbilityNotImplemented
return shock,
@card("Seething Anger")
def seething_anger(card, abilities):
def seething_anger():
return AbilityNotImplemented
def seething_anger():
return AbilityNotImplemented
return seething_anger, seething_anger, | Julian/cardboard | cardboard/cards/sets/stronghold.py | Python | mit | 26,294 |
'''
Created on Oct 3, 2012
Copyright © 2013
The Board of Trustees of The Leland Stanford Junior University.
All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: dstrauss
'''
import numpy as np
D = {'solverType':'splitField', 'flavor':'both', 'numRuns':500, 'expt':'intParameters', 'numProcs':16}
def getMyVars(parseNumber, D):
'''routine to return the parameters to test at the current iteration.'''
rhos, xis = np.meshgrid(np.logspace(2,4,10), np.logspace(-4,-2,10))
rhos = rhos.flatten()
xis = xis.flatten()
noFreqs = np.array(8)
bkg = 0
D['freqs'] = np.round(np.logspace(np.log10(1000), np.log10(50000), noFreqs))
D['inc'] = np.array([45*np.pi/180.0])
D['rho'] = rhos[parseNumber%100]
D['xi'] = xis[parseNumber%100]
D['bkgNo'] = int(parseNumber/100) + 100
return D
| daStrauss/subsurface | src/expts/paramSplitFieldBoth.py | Python | apache-2.0 | 1,344 |
#!/usr/bin/python
usage = "pointy2OmegaScan.py [--options] gps,pointy.out gps,pointy.out ..."
description = "builds an OmegaScan config file based off the channels with pvalues below --pvalueThr"
author = "reed.essick@ligo.org"
#-------------------------------------------------
import os
import subprocess as sp
import numpy as np
from optparse import OptionParser
#-------------------------------------------------
parser = OptionParser(usage=usage, description=description)
parser.add_option("-v", "--verbose", default=False, action="store_true")
parser.add_option('-p', '--pvalueThr', default=1e-3, type='float')
parser.add_option("", "--frame-type", default="H1_R", type="string")
parser.add_option("", "--timeRange", default=64, type="int")
parser.add_option("", "--freq-map", default=None, type="string", help="the output of FrChannels, used to map channel names to sample frequencies")
parser.add_option("", "--gwchan", default="H1:CAL-DELTAL_EXTERNAL_DQ", type="string")
parser.add_option("", "--output-dir", default=".", type="string")
parser.add_option("", "--condor", default=False, action="store_true", help="write a condor_sub file instead of a shell script")
parser.add_option("", "--accounting-group", default="ligo.dev.o2.detchar.explore.test", type="string")
parser.add_option("", "--accounting-group-user", default="reed.essick", type="string")
parser.add_option("", "--request-memory", default=2000000, type="int", help="measured in kB")
opts, args = parser.parse_args()
if len(args)<1:
raise ValueError("Please supply exactly at least one input argument\n%s"%(usage))
pointys = []
gps = []
for arg in args:
g, p = arg.split(',')
pointys.append( p )
gps.append( float(g) )
if opts.freq_map==None:
opts.freq_map = raw_input("--freq-map=")
if not os.path.exists(opts.output_dir):
os.makedirs( opts.output_dir )
ifo = opts.frame_type[0]
#-------------------------------------------------
### set up commands and such
gwdf_cmd = "gw_data_find -o %s --type %s"%(ifo, opts.frame_type) + " -s %d -e %d -u file"
os_cmd = "/home/omega/opt/omega/bin/wpipeline scan %.9f -r -c %s -o %s -f %s"
header = """# Q Scan configuration file
# Automatically generated with wconfigure.sh
# by user bhughey on 2009-07-09 10:33:18 PDT
# from sample frame files:
# /archive/frames/S6/L1/LHO/H-H1_RDS_R_L1-9311/H-H1_RDS_R_L1-931194752-64.gwf
[Context,Context]
[Parameters,Parameter Estimation]
[Notes,Notes]
[Aux Channels,Identified interesting Aux channels]
"""
template = """{
channelName: '%s'
frameType: '%s'
sampleFrequency: %s
searchTimeRange: %d
searchFrequencyRange: [0 Inf]
searchQRange: [4 64]
searchMaximumEnergyLoss: 0.2
whiteNoiseFalseRate: 1e-3
searchWindowDuration: 0.5
plotTimeRanges: [0.1 1 4 16]
plotFrequencyRange: []
plotNormalizedEnergyRange: [0 25.5]
alwaysPlotFlag: 1
}"""%('%s', opts.frame_type, '%s', opts.timeRange)
if opts.condor:
cmd_file = "%s/run_Qscan.sub"%(opts.output_dir)
cmd_obj = open(cmd_file, "w")
print >> cmd_obj, """universe = vanilla
executable = /home/omega/opt/omega/bin/wpipeline
getenv = True
accounting_group = %s
accounting_group_user = %s
log = %s/Qscan.log
error = %s/Qscan-$(cluster)-$(process).err
output = %s/Qscan-$(cluster)-$(process).out
request_memory = %d KB
notification = never"""%(opts.accounting_group, opts.accounting_group_user, opts.output_dir, opts.output_dir, opts.output_dir, opts.request_memory)
else:
cmd_file = "%s/run_Qscan.sh"%(opts.output_dir)
cmd_obj = open(cmd_file, "w")
#-------------------------------------------------
if opts.verbose:
print "reading in sample frequencies from :"+opts.freq_map
file_obj = open(opts.freq_map, "r")
freq_map = dict( [l.strip().split() for l in file_obj] )
file_obj.close()
### extract relevant channels from pointy.out
for t, pointy in zip(gps, pointys):
if opts.verbose:
print "writing Qscan config files for: %d -> %s"%(t, pointy)
outdir = "%s/%.6f"%(opts.output_dir, t)
if not os.path.exists(outdir):
os.makedirs(outdir)
chans = []
if opts.verbose:
print "processing : %s"%(pointy)
file_obj = open(pointy, "r")
lines = file_obj.readlines()
file_obj.close()
nlines = len(lines)
ind = 0
while ind < nlines:
if "channel" in lines[ind]:
chan = lines[ind].strip().split("=")[-1]
nind = ind + 4
if "pvalue" not in lines[nind]:
nind += 4
pvalue = float(lines[nind].strip().split("=")[-1])
if pvalue <= opts.pvalueThr: ### only include if pvalue is small enough
chans.append( chan )
ind += 1
if opts.verbose:
print " found %d relevant KW channels"%(len(chans))
### convert to raw channel names
channels = set()
for chan in chans:
chan = chan.split("_")
chan = "%s:%s"%(chan[0], "_".join(chan[1:-2]))
channels.add( chan )
if opts.verbose:
print " corresponds to %d raw channels"%(len(channels))
### set up channel frequency stuff
channels = dict( (chan, freq_map[chan]) for chan in channels )
### write config file
conf_file = "%s/Qscan.cnf"%(outdir)
if opts.verbose:
print " "+conf_file
conf_obj = open(conf_file, "w")
print >> conf_obj, header
print >> conf_obj, template%(opts.gwchan, freq_map[opts.gwchan])
for chan in sorted(channels.keys()): ### assumes KW naming conventions
print >> conf_obj, template%(chan, channels[chan])
conf_obj.close()
### set up command
this_cmd = gwdf_cmd%(int(t), int(t)+1)
if opts.verbose:
print " "+this_cmd
frame = sp.Popen( this_cmd.split(), stdout=sp.PIPE).communicate()[0].split()[0]
directory = os.path.dirname( frame.replace("file://localhost","") )
that_cmd = os_cmd%(t, conf_file, outdir, directory)
if opts.verbose:
print " "+that_cmd
if opts.condor:
print >> cmd_obj, "arguments = \" %s \"\nqueue 1"%(" ".join(that_cmd.split()[1:]))
else:
print >> cmd_obj, that_cmd
cmd_obj.close()
if opts.verbose:
if opts.condor:
print "now run :\ncondor_submit %s"%(cmd_file)
else:
print "now run :\n%s"%(cmd_file)
| reedessick/pointy-Poisson | pointy2OmegaScan.py | Python | mit | 6,424 |
import fem.geometry as g
import fem.model as m
import fem.material as mat
import fem.solver as s
import fem.mesh as me
import plot
import pickle
from fem.matrices import stiffness_matrix, mass_matrix, stiffness_matrix_nl
def generate_layers(thickness, layers_count, material):
layer_top = thickness / 2
layer_thickness = thickness / layers_count
layers = set()
for i in range(layers_count):
layer = m.Layer(layer_top - layer_thickness, layer_top, material, i)
layers.add(layer)
layer_top -= layer_thickness
return layers
def solve(geometry, thickness, linear):
layers_count = 1
layers = generate_layers(thickness, layers_count, mat.IsotropicMaterial.steel())
model = m.Model(geometry, layers, m.Model.FIXED_BOTTOM_LEFT_RIGHT_POINTS)
mesh = me.Mesh.generate(width, layers, N, M, m.Model.FIXED_BOTTOM_LEFT_RIGHT_POINTS)
if (linear):
lam, vec = s.solve(model, mesh, stiffness_matrix, mass_matrix)
else:
lam, vec = s.solve_nl(model, mesh, stiffness_matrix, mass_matrix, stiffness_matrix_nl)
return lam, vec, mesh, geometry
# r=2
# width = r*2*3.14
# curvature = 1/r
width = 2
curvature = 0.8
thickness = 0.05
corrugation_amplitude = 0.03
corrugation_frequency = 20
#geometry = g.CorrugatedCylindricalPlate(width, curvature, corrugation_amplitude, corrugation_frequency)
#geometry = g.CylindricalPlate(width, curvature)
geometry = g.Plate(width)
N = 50
M = 4
toCalculate = True
#toCalculate = False
linear = True
def save_mesh(filename, mesh):
with open(filename + '.mesh', 'wb') as f:
pickle.dump(mesh, f)
def save_geometry(filename, geometry):
with open(filename + '.geom', 'wb') as f:
pickle.dump(geometry, f)
def load_geometry(filename):
with open(filename + '.geom', 'rb') as f:
return pickle.load(f)
def load_mesh(filename):
with open(filename + '.mesh', 'rb') as f:
return pickle.load(f)
def save_results(filename, results):
with open(filename + '.res', 'wb') as f:
pickle.dump(results, f)
def load_results(filename):
with open(filename + '.res', 'rb') as f:
return pickle.load(f)
lin_suf = "l"
if (not linear):
lin_suf = "nl"
filename = str(geometry) + "_{}x{}_{}".format(N,M,lin_suf)
if (toCalculate):
lam, vec, mesh, geometry = solve(geometry, thickness, linear)
results = s.convert_to_results(lam, vec, mesh, geometry)
# save_results(filename, results)
##
## save_mesh(meshfile, mesh)
##
## save_geometry(geometryfile, geometry)
#
#else:
# results = load_results(filename)
results_index = 0
# plot.plot_mesh(results[results_index].mesh, width, thickness)
# plot.plot_deformed_mesh(results[results_index], width, thickness)
plot.plot_init_and_deformed_geometry(results[results_index], 0, width, -thickness / 2, thickness / 2, 0)
# plot.plot_init_geometry(results[results_index].geometry, 0, width, -thickness / 2, thickness / 2, 0)
# for i in range(6):
# plot.plot_strain_2(results[results_index], N, M, 0, width, -thickness / 2, thickness / 2, 0, i)
# plot.plot_strain(results[results_index], 0, width, -thickness / 2, thickness / 2, 0, i)
to_print = 20
if (len(results) < to_print):
to_print = len(results)
for i in range(to_print):
print(results[i].freq)
| tarashor/vibrations | py/main3.py | Python | mit | 3,422 |
# coding=utf-8
"""
Base classes for data formats only
"""
import os
from abc import ABCMeta
from typing import Any, List, Dict
from ultros.core.storage.base import MutableFileStorageBase
__author__ = "Gareth Coles"
class DataFile(MutableFileStorageBase, metaclass=ABCMeta):
"""
Base class representing any data file
"""
def __init__(self, owner: Any, manager, path: str, *args: List[Any], **kwargs: Dict[Any, Any]):
super().__init__(owner, manager, path, *args, **kwargs)
self.path = os.path.join(self.manager.data_location, self.path)
| UltrosBot/Ultros3K | src/ultros/core/storage/data/base.py | Python | artistic-2.0 | 574 |
# -*- coding: utf-8 -*-
# Copyright (c) 2007 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the subversion repository browser dialog.
"""
from __future__ import unicode_literals
try:
str = unicode
except NameError:
pass
import os
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import QHeaderView, QLineEdit, QDialog, QApplication, \
QDialogButtonBox, QTreeWidgetItem
from PyQt5.QtCore import QTimer, QProcess, QRegExp, Qt, pyqtSlot
from E5Gui import E5MessageBox
from .Ui_SvnRepoBrowserDialog import Ui_SvnRepoBrowserDialog
import UI.PixmapCache
import Preferences
class SvnRepoBrowserDialog(QDialog, Ui_SvnRepoBrowserDialog):
"""
Class implementing the subversion repository browser dialog.
"""
def __init__(self, vcs, mode="browse", parent=None):
"""
Constructor
@param vcs reference to the vcs object
@param mode mode of the dialog (string, "browse" or "select")
@param parent parent widget (QWidget)
"""
super(SvnRepoBrowserDialog, self).__init__(parent)
self.setupUi(self)
self.setWindowFlags(Qt.Window)
self.repoTree.headerItem().setText(self.repoTree.columnCount(), "")
self.repoTree.header().setSortIndicator(0, Qt.AscendingOrder)
self.vcs = vcs
self.mode = mode
self.process = QProcess()
self.process.finished.connect(self.__procFinished)
self.process.readyReadStandardOutput.connect(self.__readStdout)
self.process.readyReadStandardError.connect(self.__readStderr)
if self.mode == "select":
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Close).hide()
else:
self.buttonBox.button(QDialogButtonBox.Ok).hide()
self.buttonBox.button(QDialogButtonBox.Cancel).hide()
self.__dirIcon = UI.PixmapCache.getIcon("dirClosed.png")
self.__fileIcon = UI.PixmapCache.getIcon("fileMisc.png")
self.__urlRole = Qt.UserRole
self.__ignoreExpand = False
self.intercept = False
self.__rx_dir = QRegExp(
r"""\s*([0-9]+)\s+(\w+)\s+"""
r"""((?:\w+\s+\d+|[0-9.]+\s+\w+)\s+[0-9:]+)\s+(.+)\s*""")
self.__rx_file = QRegExp(
r"""\s*([0-9]+)\s+(\w+)\s+([0-9]+)\s"""
r"""((?:\w+\s+\d+|[0-9.]+\s+\w+)\s+[0-9:]+)\s+(.+)\s*""")
def closeEvent(self, e):
"""
Protected slot implementing a close event handler.
@param e close event (QCloseEvent)
"""
if self.process is not None and \
self.process.state() != QProcess.NotRunning:
self.process.terminate()
QTimer.singleShot(2000, self.process.kill)
self.process.waitForFinished(3000)
e.accept()
def __resort(self):
"""
Private method to resort the tree.
"""
self.repoTree.sortItems(
self.repoTree.sortColumn(),
self.repoTree.header().sortIndicatorOrder())
def __resizeColumns(self):
"""
Private method to resize the tree columns.
"""
self.repoTree.header().resizeSections(QHeaderView.ResizeToContents)
self.repoTree.header().setStretchLastSection(True)
def __generateItem(self, repopath, revision, author, size, date,
nodekind, url):
"""
Private method to generate a tree item in the repository tree.
@param repopath path of the item (string)
@param revision revision info (string)
@param author author info (string)
@param size size info (string)
@param date date info (string)
@param nodekind node kind info (string, "dir" or "file")
@param url url of the entry (string)
@return reference to the generated item (QTreeWidgetItem)
"""
path = repopath
if revision == "":
rev = ""
else:
rev = int(revision)
if size == "":
sz = ""
else:
sz = int(size)
itm = QTreeWidgetItem(self.parentItem)
itm.setData(0, Qt.DisplayRole, path)
itm.setData(1, Qt.DisplayRole, rev)
itm.setData(2, Qt.DisplayRole, author)
itm.setData(3, Qt.DisplayRole, sz)
itm.setData(4, Qt.DisplayRole, date)
if nodekind == "dir":
itm.setIcon(0, self.__dirIcon)
itm.setChildIndicatorPolicy(QTreeWidgetItem.ShowIndicator)
elif nodekind == "file":
itm.setIcon(0, self.__fileIcon)
itm.setData(0, self.__urlRole, url)
itm.setTextAlignment(0, Qt.AlignLeft)
itm.setTextAlignment(1, Qt.AlignRight)
itm.setTextAlignment(2, Qt.AlignLeft)
itm.setTextAlignment(3, Qt.AlignRight)
itm.setTextAlignment(4, Qt.AlignLeft)
return itm
def __repoRoot(self, url):
"""
Private method to get the repository root using the svn info command.
@param url the repository URL to browser (string)
@return repository root (string)
"""
ioEncoding = Preferences.getSystem("IOEncoding")
repoRoot = None
process = QProcess()
args = []
args.append('info')
self.vcs.addArguments(args, self.vcs.options['global'])
args.append('--xml')
args.append(url)
process.start('svn', args)
procStarted = process.waitForStarted(5000)
if procStarted:
finished = process.waitForFinished(30000)
if finished:
if process.exitCode() == 0:
output = str(process.readAllStandardOutput(), ioEncoding,
'replace')
for line in output.splitlines():
line = line.strip()
if line.startswith('<root>'):
repoRoot = line.replace('<root>', '')\
.replace('</root>', '')
break
else:
error = str(process.readAllStandardError(),
Preferences.getSystem("IOEncoding"),
'replace')
self.errors.insertPlainText(error)
self.errors.ensureCursorVisible()
else:
QApplication.restoreOverrideCursor()
E5MessageBox.critical(
self,
self.tr('Process Generation Error'),
self.tr(
'The process {0} could not be started. '
'Ensure, that it is in the search path.'
).format('svn'))
return repoRoot
def __listRepo(self, url, parent=None):
"""
Private method to perform the svn list command.
@param url the repository URL to browse (string)
@param parent reference to the item, the data should be appended to
(QTreeWidget or QTreeWidgetItem)
"""
self.errorGroup.hide()
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
self.repoUrl = url
if parent is None:
self.parentItem = self.repoTree
else:
self.parentItem = parent
if self.parentItem == self.repoTree:
repoRoot = self.__repoRoot(url)
if repoRoot is None:
self.__finish()
return
self.__ignoreExpand = True
itm = self.__generateItem(
repoRoot, "", "", "", "", "dir", repoRoot)
itm.setExpanded(True)
self.parentItem = itm
urlPart = repoRoot
for element in url.replace(repoRoot, "").split("/"):
if element:
urlPart = "{0}/{1}".format(urlPart, element)
itm = self.__generateItem(
element, "", "", "", "", "dir", urlPart)
itm.setExpanded(True)
self.parentItem = itm
itm.setExpanded(False)
self.__ignoreExpand = False
self.__finish()
return
self.intercept = False
self.process.kill()
args = []
args.append('list')
self.vcs.addArguments(args, self.vcs.options['global'])
if '--verbose' not in self.vcs.options['global']:
args.append('--verbose')
args.append(url)
self.process.start('svn', args)
procStarted = self.process.waitForStarted(5000)
if not procStarted:
self.__finish()
self.inputGroup.setEnabled(False)
self.inputGroup.hide()
E5MessageBox.critical(
self,
self.tr('Process Generation Error'),
self.tr(
'The process {0} could not be started. '
'Ensure, that it is in the search path.'
).format('svn'))
else:
self.inputGroup.setEnabled(True)
self.inputGroup.show()
def __normalizeUrl(self, url):
"""
Private method to normalite the url.
@param url the url to normalize (string)
@return normalized URL (string)
"""
if url.endswith("/"):
return url[:-1]
return url
def start(self, url):
"""
Public slot to start the svn info command.
@param url the repository URL to browser (string)
"""
self.repoTree.clear()
self.url = ""
url = self.__normalizeUrl(url)
if self.urlCombo.findText(url) == -1:
self.urlCombo.addItem(url)
@pyqtSlot(str)
def on_urlCombo_currentIndexChanged(self, text):
"""
Private slot called, when a new repository URL is entered or selected.
@param text the text of the current item (string)
"""
url = self.__normalizeUrl(text)
if url != self.url:
self.url = url
self.repoTree.clear()
self.__listRepo(url)
@pyqtSlot(QTreeWidgetItem)
def on_repoTree_itemExpanded(self, item):
"""
Private slot called when an item is expanded.
@param item reference to the item to be expanded (QTreeWidgetItem)
"""
if not self.__ignoreExpand:
url = item.data(0, self.__urlRole)
self.__listRepo(url, item)
@pyqtSlot(QTreeWidgetItem)
def on_repoTree_itemCollapsed(self, item):
"""
Private slot called when an item is collapsed.
@param item reference to the item to be collapsed (QTreeWidgetItem)
"""
for child in item.takeChildren():
del child
@pyqtSlot()
def on_repoTree_itemSelectionChanged(self):
"""
Private slot called when the selection changes.
"""
if self.mode == "select":
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(True)
def accept(self):
"""
Public slot called when the dialog is accepted.
"""
if self.focusWidget() == self.urlCombo:
return
super(SvnRepoBrowserDialog, self).accept()
def getSelectedUrl(self):
"""
Public method to retrieve the selected repository URL.
@return the selected repository URL (string)
"""
items = self.repoTree.selectedItems()
if len(items) == 1:
return items[0].data(0, self.__urlRole)
else:
return ""
def __finish(self):
"""
Private slot called when the process finished or the user pressed the
button.
"""
if self.process is not None and \
self.process.state() != QProcess.NotRunning:
self.process.terminate()
QTimer.singleShot(2000, self.process.kill)
self.process.waitForFinished(3000)
self.inputGroup.setEnabled(False)
self.inputGroup.hide()
self.__resizeColumns()
self.__resort()
QApplication.restoreOverrideCursor()
def __procFinished(self, exitCode, exitStatus):
"""
Private slot connected to the finished signal.
@param exitCode exit code of the process (integer)
@param exitStatus exit status of the process (QProcess.ExitStatus)
"""
self.__finish()
def __readStdout(self):
"""
Private slot to handle the readyReadStandardOutput signal.
It reads the output of the process, formats it and inserts it into
the contents pane.
"""
if self.process is not None:
self.process.setReadChannel(QProcess.StandardOutput)
while self.process.canReadLine():
s = str(self.process.readLine(),
Preferences.getSystem("IOEncoding"),
'replace')
if self.__rx_dir.exactMatch(s):
revision = self.__rx_dir.cap(1)
author = self.__rx_dir.cap(2)
date = self.__rx_dir.cap(3)
name = self.__rx_dir.cap(4).strip()
if name.endswith("/"):
name = name[:-1]
size = ""
nodekind = "dir"
if name == ".":
continue
elif self.__rx_file.exactMatch(s):
revision = self.__rx_file.cap(1)
author = self.__rx_file.cap(2)
size = self.__rx_file.cap(3)
date = self.__rx_file.cap(4)
name = self.__rx_file.cap(5).strip()
nodekind = "file"
else:
continue
url = "{0}/{1}".format(self.repoUrl, name)
self.__generateItem(
name, revision, author, size, date, nodekind, url)
def __readStderr(self):
"""
Private slot to handle the readyReadStandardError signal.
It reads the error output of the process and inserts it into the
error pane.
"""
if self.process is not None:
s = str(self.process.readAllStandardError(),
Preferences.getSystem("IOEncoding"),
'replace')
self.errors.insertPlainText(s)
self.errors.ensureCursorVisible()
self.errorGroup.show()
def on_passwordCheckBox_toggled(self, isOn):
"""
Private slot to handle the password checkbox toggled.
@param isOn flag indicating the status of the check box (boolean)
"""
if isOn:
self.input.setEchoMode(QLineEdit.Password)
else:
self.input.setEchoMode(QLineEdit.Normal)
@pyqtSlot()
def on_sendButton_clicked(self):
"""
Private slot to send the input to the subversion process.
"""
input = self.input.text()
input += os.linesep
if self.passwordCheckBox.isChecked():
self.errors.insertPlainText(os.linesep)
self.errors.ensureCursorVisible()
else:
self.errors.insertPlainText(input)
self.errors.ensureCursorVisible()
self.process.write(input)
self.passwordCheckBox.setChecked(False)
self.input.clear()
def on_input_returnPressed(self):
"""
Private slot to handle the press of the return key in the input field.
"""
self.intercept = True
self.on_sendButton_clicked()
def keyPressEvent(self, evt):
"""
Protected slot to handle a key press event.
@param evt the key press event (QKeyEvent)
"""
if self.intercept:
self.intercept = False
evt.accept()
return
super(SvnRepoBrowserDialog, self).keyPressEvent(evt)
| testmana2/test | Plugins/VcsPlugins/vcsSubversion/SvnRepoBrowserDialog.py | Python | gpl-3.0 | 16,503 |
import sys
import os
if sys.version < '3':
from .btcommon import *
else:
from bluetooth.btcommon import *
__version__ = 0.22
def _dbg(*args):
return
sys.stderr.write(*args)
sys.stderr.write("\n")
if sys.platform == "win32":
_dbg("trying widcomm")
have_widcomm = False
dll = "wbtapi.dll"
sysroot = os.getenv ("SystemRoot")
if os.path.exists (dll) or \
os.path.exists (os.path.join (sysroot, "system32", dll)) or \
os.path.exists (os.path.join (sysroot, dll)):
try:
from . import widcomm
if widcomm.inquirer.is_device_ready ():
# if the Widcomm stack is active and a Bluetooth device on that
# stack is detected, then use the Widcomm stack
from .widcomm import *
have_widcomm = True
except ImportError:
pass
if not have_widcomm:
# otherwise, fall back to the Microsoft stack
_dbg("Widcomm not ready. falling back to MS stack")
if sys.version < '3':
from .msbt import *
else:
from bluetooth.msbt import *
elif sys.platform.startswith("linux"):
if sys.version < '3':
from .bluez import *
else:
from bluetooth.bluez import *
elif sys.platform == "darwin":
from .osx import *
else:
raise Exception("This platform (%s) is currently not supported by pybluez." % sys.platform)
discover_devices.__doc__ = \
"""
performs a bluetooth device discovery using the first available bluetooth
resource.
if lookup_names is False, returns a list of bluetooth addresses.
if lookup_names is True, returns a list of (address, name) tuples
lookup_names=False
if set to True, then discover_devices also attempts to lookup the
display name of each detected device.
if lookup_class is True, the class of the device is added to the tuple
"""
lookup_name.__doc__ = \
"""
Tries to determine the friendly name (human readable) of the device with
the specified bluetooth address. Returns the name on success, and None
on failure.
"""
advertise_service.__doc__ = \
"""
Advertises a service with the local SDP server. sock must be a bound,
listening socket. name should be the name of the service, and service_id
(if specified) should be a string of the form
"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", where each 'X' is a hexadecimal
digit.
service_classes is a list of service classes whose this service belongs to.
Each class service is a 16-bit UUID in the form "XXXX", where each 'X' is a
hexadecimal digit, or a 128-bit UUID in the form
"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX". There are some constants for
standard services, e.g. SERIAL_PORT_CLASS that equals to "1101". Some class
constants:
SERIAL_PORT_CLASS LAN_ACCESS_CLASS DIALUP_NET_CLASS
HEADSET_CLASS CORDLESS_TELEPHONY_CLASS AUDIO_SOURCE_CLASS
AUDIO_SINK_CLASS PANU_CLASS NAP_CLASS
GN_CLASS
profiles is a list of service profiles that thie service fulfills. Each
profile is a tuple with ( uuid, version). Most standard profiles use
standard classes as UUIDs. PyBluez offers a list of standard profiles,
for example SERIAL_PORT_PROFILE. All standard profiles have the same
name as the classes, except that _CLASS suffix is replaced by _PROFILE.
provider is a text string specifying the provider of the service
description is a text string describing the service
A note on working with Symbian smartphones:
bt_discover in Python for Series 60 will only detect service records
with service class SERIAL_PORT_CLASS and profile SERIAL_PORT_PROFILE
"""
stop_advertising.__doc__ = \
"""
Instructs the local SDP server to stop advertising the service associated
with sock. You should typically call this right before you close sock.
"""
find_service.__doc__ = \
"""
find_service (name = None, uuid = None, address = None)
Searches for SDP services that match the specified criteria and returns
the search results. If no criteria are specified, then returns a list of
all nearby services detected. If more than one is specified, then
the search results will match all the criteria specified. If uuid is
specified, it must be either a 16-bit UUID in the form "XXXX", where each
'X' is a hexadecimal digit, or as a 128-bit UUID in the form
"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX". A special case of address is
"localhost", which will search for services on the local machine.
The search results will be a list of dictionaries. Each dictionary
represents a search match and will have the following key/value pairs:
host - the bluetooth address of the device advertising the
service
name - the name of the service being advertised
description - a description of the service being advertised
provider - the name of the person/organization providing the service
protocol - either 'RFCOMM', 'L2CAP', None if the protocol was not
specified, or 'UNKNOWN' if the protocol was specified but
unrecognized
port - the L2CAP PSM # if the protocol is 'L2CAP', the RFCOMM
channel # if the protocol is 'RFCOMM', or None if it
wasn't specified
service-classes - a list of service class IDs (UUID strings). possibly
empty
profiles - a list of profiles - (UUID, version) pairs - the
service claims to support. possibly empty.
service-id - the Service ID of the service. None if it wasn't set
See the Bluetooth spec for the difference between
Service ID and Service Class ID List
"""
| KHU-YoungBo/pybluez | bluetooth/__init__.py | Python | gpl-2.0 | 5,991 |
# -*- coding: utf-8 -*-
# pylint: disable=R0904,C0103
# R0904: Too many public methods (X/20)
# C0103: Invalid name "xX" (should match [a-z_][a-z0-9_]{2,30}$)
from __future__ import absolute_import, print_function, unicode_literals
import unittest
import email
from traceback import format_exc
from mailman.email.message import Message
from kittystore.scrub import Scrubber
from kittystore.test import get_test_file
class TestScrubber(unittest.TestCase):
def test_attachment_1(self):
with open(get_test_file("attachment-1.txt")) as email_file:
msg = email.message_from_file(email_file, _class=Message)
scrubber = Scrubber("testlist@example.com", msg)
contents, attachments = scrubber.scrub()
self.assertEqual(len(attachments), 1)
self.assertEqual(attachments[0], (
2, u'puntogil.vcf', u'text/x-vcard', u"utf-8",
'begin:vcard\r\nfn:gil\r\nn:;gil\r\nversion:2.1\r\n'
'end:vcard\r\n\r\n'))
self.assertEqual(contents,
"This is a test message.\r\n\r\n"
"\n-- \ndevel mailing list\ndevel@lists.fedoraproject.org\n"
"https://admin.fedoraproject.org/mailman/listinfo/devel\n"
)
def test_attachment_2(self):
with open(get_test_file("attachment-2.txt")) as email_file:
msg = email.message_from_file(email_file, _class=Message)
scrubber = Scrubber("testlist@example.com", msg)
contents, attachments = scrubber.scrub()
self.assertEqual(len(attachments), 1)
self.assertEqual(attachments[0], (
3, u'signature.asc', u'application/pgp-signature', None,
'-----BEGIN PGP SIGNATURE-----\r\nVersion: GnuPG v1.4.12 '
'(GNU/Linux)\r\nComment: Using GnuPG with Mozilla - '
'http://www.enigmail.net/\r\n\r\niEYEARECAAYFAlBhm3oACgkQhmBj'
'z394AnmMnQCcC+6tWcqE1dPQmIdRbLXgKGVp\r\nEeUAn2OqtaXaXaQV7rx+'
'SmOldmSzcFw4\r\n=OEJv\r\n-----END PGP SIGNATURE-----\r\n'))
self.assertEqual(contents,
u"This is a test message\r\nNon-ascii chars: Hofm\xfchlgasse\r\n"
u"\n-- \ndevel mailing list\ndevel@lists.fedoraproject.org\n"
u"https://admin.fedoraproject.org/mailman/listinfo/devel\n"
)
def test_attachment_3(self):
with open(get_test_file("attachment-3.txt")) as email_file:
msg = email.message_from_file(email_file, _class=Message)
scrubber = Scrubber("testlist@example.com", msg)
contents, attachments = scrubber.scrub()
self.assertEqual(len(attachments), 2)
# HTML part
self.assertEqual(attachments[0][0:4],
(3, u"attachment.html", "text/html", "iso-8859-1"))
self.assertEqual(len(attachments[0][4]), 3134)
# Image attachment
self.assertEqual(attachments[1][0:4],
(4, u"GeoffreyRoucourt.jpg", "image/jpeg", None))
self.assertEqual(len(attachments[1][4]), 282180)
# Scrubbed content
self.assertEqual(contents, u"This is a test message\r\n")
def test_html_email_1(self):
with open(get_test_file("html-email-1.txt")) as email_file:
msg = email.message_from_file(email_file, _class=Message)
scrubber = Scrubber("testlist@example.com", msg)
contents, attachments = scrubber.scrub()
self.assertEqual(len(attachments), 1)
# HTML part
self.assertEqual(attachments[0][0:4],
(2, u"attachment.html", "text/html", "iso-8859-1"))
self.assertEqual(len(attachments[0][4]), 2723)
# Scrubbed content
self.assertEqual(contents,
u"This is a test message\r\n"
u"Non-ASCII chars: r\xe9ponse fran\xe7ais \n")
def test_html_only_email(self):
# This email only has an HTML part, thus the scrubbed content will be
# empty. It should be an unicode empty string, not str.
with open(get_test_file("html-email-2.txt")) as email_file:
msg = email.message_from_file(email_file, _class=Message)
scrubber = Scrubber("testlist@example.com", msg)
contents, attachments = scrubber.scrub()
self.assertTrue(isinstance(contents, unicode),
u"Scrubbed content should always be unicode")
def test_non_ascii_payload(self):
"""Scrubber must handle non-ascii messages"""
for enc in ["utf8", "iso8859"]:
with open(get_test_file("payload-%s.txt" % enc)) as email_file:
msg = email.message_from_file(email_file, _class=Message)
scrubber = Scrubber("testlist@example.com", msg)
contents, attachments = scrubber.scrub()
self.assertTrue(isinstance(contents, unicode))
self.assertEqual(contents, u'This message contains non-ascii '
u'characters:\n\xe9 \xe8 \xe7 \xe0 \xee \xef \xeb \u20ac\n')
def test_bad_content_type(self):
"""Scrubber must handle unknown content-types"""
with open(get_test_file("payload-unknown.txt")) as email_file:
msg = email.message_from_file(email_file, _class=Message)
scrubber = Scrubber("testlist@example.com", msg)
try:
contents, attachments = scrubber.scrub()
except LookupError, e:
import traceback;
print(traceback.format_exc())
self.fail(e) # codec not found
self.assertTrue(isinstance(contents, unicode))
def test_attachment_4(self):
with open(get_test_file("attachment-4.txt")) as email_file:
msg = email.message_from_file(email_file, _class=Message)
scrubber = Scrubber("testlist@example.com", msg)
contents, attachments = scrubber.scrub()
self.assertEqual(len(attachments), 2)
# HTML part
self.assertEqual(attachments[0][0:4],
(3, u"attachment.html", "text/html", "iso-8859-1"))
self.assertEqual(len(attachments[0][4]), 114)
# text attachment
self.assertEqual(attachments[1][0:4],
#(4, u"todo-déjeuner.txt", "text/plain", "utf-8"))
(4, u"todo-djeuner.txt", "text/plain", "utf-8"))
self.assertEqual(len(attachments[1][4]), 112)
# Scrubbed content
self.assertEqual(contents, u'This is a test, HTML message with '
u'accented letters : \xe9 \xe8 \xe7 \xe0.\r\nAnd an '
u'attachment with an accented filename\r\n')
def test_attachment_5(self):
with open(get_test_file("attachment-5.txt")) as email_file:
msg = email.message_from_file(email_file, _class=Message)
scrubber = Scrubber("testlist@example.com", msg)
contents, attachments = scrubber.scrub()
self.assertEqual(len(attachments), 1)
# text attachment
self.assertEqual(attachments[0][0:4],
(2, u"todo-djeuner.txt", "text/plain", "utf-8"))
self.assertEqual(len(attachments[0][4]), 112)
# Scrubbed content
self.assertEqual(contents, u'This is a test, HTML message with '
u'accented letters : \xe9 \xe8 \xe7 \xe0.\r\nAnd an '
u'attachment with an accented filename\r\n\r\n\r\n\r\n')
def test_attachment_name_badly_encoded(self):
msg = email.message.Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg.set_payload(b"Dummy content")
msg.add_header(b'Content-Disposition', b'attachment', filename=b'non-ascii-\xb8\xb1\xb1\xbe.jpg')
scrubber = Scrubber("testlist@example.com", msg)
try:
contents, attachments = scrubber.scrub()
except UnicodeDecodeError:
print(format_exc())
self.fail("Could not decode the filename")
self.assertEqual(attachments,
[(0, u'attachment.bin', 'text/plain', None, b'Dummy content')])
def test_remove_next_part_from_content(self):
with open(get_test_file("pipermail_nextpart.txt")) as email_file:
msg = email.message_from_file(email_file, _class=Message)
scrubber = Scrubber("testlist@example.com", msg)
contents, attachments = scrubber.scrub()
self.failIf("-------------- next part --------------" in contents)
def test_name_unicode(self):
for num in range(1, 6):
with open(get_test_file("attachment-%d.txt" % num)) as email_file:
msg = email.message_from_file(email_file, _class=Message)
scrubber = Scrubber("testlist@example.com", msg)
contents, attachments = scrubber.scrub()
for attachment in attachments:
name = attachment[1]
self.assertTrue(isinstance(name, unicode),
"attachment %r must be unicode" % name)
| hyperkitty/kittystore | kittystore/test/test_scrub.py | Python | gpl-3.0 | 8,917 |
from pyTrivialFTP import pyTrivialFTP
| roberto-reale/pyTrivialFTP | pyTrivialFTP/__init__.py | Python | mit | 38 |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HttpClients in this module use httplib to make HTTP requests.
This module make HTTP requests based on httplib, but there are environments
in which an httplib based approach will not work (if running in Google App
Engine for example). In those cases, higher level classes (like AtomService
and GDataService) can swap out the HttpClient to transparently use a
different mechanism for making HTTP requests.
HttpClient: Contains a request method which performs an HTTP call to the
server.
ProxiedHttpClient: Contains a request method which connects to a proxy using
settings stored in operating system environment variables then
performs an HTTP call to the endpoint server.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import types
import os
import httplib
import socket
import base64
import atom.url
import atom.http_interface
import atom.http_core
ssl_imported = False
ssl = None
try:
import ssl
ssl_imported = True
except ImportError:
pass
class ProxyError(atom.http_interface.Error):
pass
class TestConfigurationError(Exception):
pass
DEFAULT_CONTENT_TYPE = 'application/atom+xml'
class HttpClient(atom.http_interface.GenericHttpClient):
# Added to allow old v1 HttpClient objects to use the new
# http_code.HttpClient. Used in unit tests to inject a mock client.
v2_http_client = None
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
if isinstance(data, types.StringTypes):
all_headers['Content-Length'] = str(len(data))
else:
raise atom.http_interface.ContentLengthRequired('Unable to calculate '
'the length of the data parameter. Specify a value for '
'Content-Length')
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = DEFAULT_CONTENT_TYPE
if self.v2_http_client is not None:
http_request = atom.http_core.HttpRequest(method=operation)
atom.http_core.Uri.parse_uri(str(url)).modify_request(http_request)
http_request.headers = all_headers
if data:
http_request._body_parts.append(data)
return self.v2_http_client.request(http_request=http_request)
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
connection = self._prepare_connection(url, all_headers)
if self.debug:
connection.debuglevel = 1
connection.putrequest(operation, self._get_access_url(url),
skip_host=True)
if url.port is not None:
connection.putheader('Host', '%s:%s' % (url.host, url.port))
else:
connection.putheader('Host', url.host)
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (url.protocol == 'https' and int(url.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % url.host
replacement_header_line = 'Host: %s' % url.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# Send the HTTP headers.
for header_name in all_headers:
connection.putheader(header_name, all_headers[header_name])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
_send_data_part(data_part, connection)
else:
_send_data_part(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _prepare_connection(self, url, headers):
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
if url.protocol == 'https':
if not url.port:
return httplib.HTTPSConnection(url.host)
return httplib.HTTPSConnection(url.host, int(url.port))
else:
if not url.port:
return httplib.HTTPConnection(url.host)
return httplib.HTTPConnection(url.host, int(url.port))
def _get_access_url(self, url):
return url.to_string()
class ProxiedHttpClient(HttpClient):
"""Performs an HTTP request through a proxy.
The proxy settings are obtained from enviroment variables. The URL of the
proxy server is assumed to be stored in the environment variables
'https_proxy' and 'http_proxy' respectively. If the proxy server requires
a Basic Auth authorization header, the username and password are expected to
be in the 'proxy-username' or 'proxy_username' variable and the
'proxy-password' or 'proxy_password' variable, or in 'http_proxy' or
'https_proxy' as "protocol://[username:password@]host:port".
After connecting to the proxy server, the request is completed as in
HttpClient.request.
"""
def _prepare_connection(self, url, headers):
proxy_settings = os.environ.get('%s_proxy' % url.protocol)
if not proxy_settings:
# The request was HTTP or HTTPS, but there was no appropriate proxy set.
return HttpClient._prepare_connection(self, url, headers)
else:
proxy_auth = _get_proxy_auth(proxy_settings)
proxy_netloc = _get_proxy_net_location(proxy_settings)
if url.protocol == 'https':
# Set any proxy auth headers
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = url.port
if not port:
port = '443'
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (url.host, port)
# Set the user agent to send to the proxy
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
else:
user_agent = 'User-Agent: python\r\n'
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy_netloc)
if not proxy_url.port:
proxy_url.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_url.host, int(proxy_url.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
sslobj = None
if ssl_imported:
sslobj = ssl.wrap_socket(p_sock, None, None)
else:
sock_ssl = socket.ssl(p_sock, None, None)
sslobj = httplib.FakeSocket(p_sock, sock_ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(proxy_url.host)
connection.sock = sslobj
return connection
else:
# If protocol was not https.
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy_netloc)
if not proxy_url.port:
proxy_url.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return httplib.HTTPConnection(proxy_url.host, int(proxy_url.port))
def _get_access_url(self, url):
return url.to_string()
def _get_proxy_auth(proxy_settings):
"""Returns proxy authentication string for header.
Will check environment variables for proxy authentication info, starting with
proxy(_/-)username and proxy(_/-)password before checking the given
proxy_settings for a [protocol://]username:password@host[:port] string.
Args:
proxy_settings: String from http_proxy or https_proxy environment variable.
Returns:
Authentication string for proxy, or empty string if no proxy username was
found.
"""
proxy_username = None
proxy_password = None
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if not proxy_username:
if '@' in proxy_settings:
protocol_and_proxy_auth = proxy_settings.split('@')[0].split(':')
if len(protocol_and_proxy_auth) == 3:
# 3 elements means we have [<protocol>, //<user>, <password>]
proxy_username = protocol_and_proxy_auth[1].lstrip('/')
proxy_password = protocol_and_proxy_auth[2]
elif len(protocol_and_proxy_auth) == 2:
# 2 elements means we have [<user>, <password>]
proxy_username = protocol_and_proxy_auth[0]
proxy_password = protocol_and_proxy_auth[1]
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
def _get_proxy_net_location(proxy_settings):
"""Returns proxy host and port.
Args:
proxy_settings: String from http_proxy or https_proxy environment variable.
Must be in the form of protocol://[username:password@]host:port
Returns:
String in the form of protocol://host:port
"""
if '@' in proxy_settings:
protocol = proxy_settings.split(':')[0]
netloc = proxy_settings.split('@')[1]
return '%s://%s' % (protocol, netloc)
else:
return proxy_settings
def _send_data_part(data, connection):
if isinstance(data, types.StringTypes):
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
| Eforcers/inbox-cleaner | src/lib/atom/http.py | Python | mit | 12,803 |
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
from cfme import test_requirements
from cfme.base.login import BaseLoggedInPage
from cfme.utils.appliance import ViaREST, ViaUI
from cfme.utils.update import update
from cfme.utils.appliance.implementations.ui import navigate_to
pytestmark = [
test_requirements.generic_objects,
pytest.mark.uncollectif(lambda appliance: appliance.version < "5.9",
reason="5.8 appliance doesn't support generic objects")
]
@pytest.mark.sauce
@pytest.mark.parametrize('context', [ViaREST, ViaUI])
def test_generic_object_definition_crud(appliance, context, soft_assert):
with appliance.context.use(context):
definition = appliance.collections.generic_object_definitions.create(
name="{}_generic_class{}".format(context.name.lower(), fauxfactory.gen_alphanumeric()),
description="Generic Object Definition",
attributes={"addr01": "string"},
associations={"services": "Service"},
methods=["hello_world"])
if context.name == 'UI':
view = appliance.browser.create_view(BaseLoggedInPage)
view.flash.assert_success_message(
'Generic Object Class "{}" has been successfully added.'.format(definition.name))
assert definition.exists
with update(definition):
definition.name = '{}_updated'.format(definition.name)
definition.attributes = {"new_address": "string"}
if context.name == 'UI':
view.flash.assert_success_message(
'Generic Object Class "{}" has been successfully saved.'.format(definition.name))
view = navigate_to(definition, 'Details')
soft_assert(view.summary('Attributes (2)').get_text_of('new_address'))
soft_assert(view.summary('Attributes (2)').get_text_of('addr01'))
soft_assert(view.summary('Associations (1)').get_text_of('services'))
else:
rest_definition = appliance.rest_api.collections.generic_object_definitions.get(
name=definition.name)
soft_assert("new_address" in rest_definition.properties['attributes'])
soft_assert("addr01" not in rest_definition.properties['attributes'])
definition.delete()
if context.name == 'UI':
view.flash.assert_success_message(
'Generic Object Class:"{}" was successfully deleted'.format(definition.name))
assert not definition.exists
| anurag03/integration_tests | cfme/tests/generic_objects/test_definitions.py | Python | gpl-2.0 | 2,521 |
from django.views.generic.detail import DetailView
from .base import GoodsSellBase
class GoodsDetailView(GoodsSellBase, DetailView):
template_name = "trades/goods_detail.html"
slug_field = "hash_id"
def dispatch(self, request, *args, **kwargs):
# from IPython import embed; embed()
return super(GoodsDetailView, self).dispatch(request, args, kwargs)
| yevgnenll/but | but/trades/views/goods_detail.py | Python | mit | 385 |
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import logbook
import logbook.more
import structlog
class UnstructuredRenderer(structlog.processors.KeyValueRenderer):
def __call__(self, logger, method_name, event_dict):
event = None
if 'event' in event_dict:
event = event_dict.pop('event')
if event_dict or event is None:
# if there are other keys, use the parent class to render them
# and append to the event
rendered = super(UnstructuredRenderer, self).__call__(
logger, method_name, event_dict)
return f'{event} ({rendered})'
else:
return event
def setup_papertrail(project_name, channel, PAPERTRAIL_HOST, PAPERTRAIL_PORT):
'''
Setup papertrail account using taskcluster secrets
'''
# Setup papertrail
papertrail = logbook.SyslogHandler(
application_name=f'mozilla/release-services/{channel}/{project_name}',
address=(PAPERTRAIL_HOST, int(PAPERTRAIL_PORT)),
level=logbook.INFO,
format_string='{record.time} {record.channel}: {record.message}',
bubble=True,
)
papertrail.push_application()
def setup_sentry(project_name, channel, SENTRY_DSN):
'''
Setup sentry account using taskcluster secrets
'''
import raven
import raven.handlers.logbook
sentry_client = raven.Client(
dsn=SENTRY_DSN,
site=project_name,
name='mozilla/release-services',
environment=channel,
# TODO:
# release=read(VERSION) we need to promote that as well via secrets
# tags=...
# repos=...
)
sentry_handler = raven.handlers.logbook.SentryHandler(
sentry_client,
level=logbook.WARNING,
bubble=True,
)
sentry_handler.push_application()
def init_logger(project_name,
channel=None,
level=logbook.INFO,
PAPERTRAIL_HOST=None,
PAPERTRAIL_PORT=None,
SENTRY_DSN=None
):
if not channel:
channel = os.environ.get('APP_CHANNEL')
# Output logs on stderr, with color support on consoles
fmt = '{record.time} [{record.level_name:<8}] {record.channel}: {record.message}'
handler = logbook.more.ColorizedStderrHandler(level=level, format_string=fmt)
handler.push_application()
# Log to papertrail
if channel and PAPERTRAIL_HOST and PAPERTRAIL_PORT:
setup_papertrail(project_name, channel, PAPERTRAIL_HOST, PAPERTRAIL_PORT)
# Log to senty
if channel and SENTRY_DSN:
setup_sentry(project_name, channel, SENTRY_DSN)
def logbook_factory(*args, **kwargs):
# Logger given to structlog
logbook.compat.redirect_logging()
return logbook.Logger(level=level, *args, **kwargs)
# Setup structlog over logbook, with args list at the end
processors = [
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
UnstructuredRenderer(),
]
structlog.configure(
context_class=structlog.threadlocal.wrap_dict(dict),
processors=processors,
logger_factory=logbook_factory,
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
| La0/mozilla-relengapi | src/pulselistener/pulselistener/lib/log.py | Python | mpl-2.0 | 3,559 |
#!/usr/bin/env/ python3
from distutils.core import setup
setup(name="temsc",
description="message flow generator",
version="v1.1",
author="Wolfgang Beck",
author_email="beckw@telekom.de",
url="https://github.com/bewo001/temsc",
packages=["msc"])
| bewo001/temsc | setup.py | Python | gpl-2.0 | 272 |
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import discriminator
except ImportError:
discriminator = sys.modules["onshape_client.oas.models.discriminator"]
try:
from onshape_client.oas.models import external_documentation
except ImportError:
external_documentation = sys.modules[
"onshape_client.oas.models.external_documentation"
]
try:
from onshape_client.oas.models import xml
except ImportError:
xml = sys.modules["onshape_client.oas.models.xml"]
class Schema(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"additional_properties": (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
), # noqa: E501
"default": (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
), # noqa: E501
"deprecated": (bool,), # noqa: E501
"description": (str,), # noqa: E501
"discriminator": (discriminator.Discriminator,), # noqa: E501
"enum": (
[bool, date, datetime, dict, float, int, list, str],
), # noqa: E501
"example": (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
), # noqa: E501
"exclusive_maximum": (bool,), # noqa: E501
"exclusive_minimum": (bool,), # noqa: E501
"extensions": (
{str: (bool, date, datetime, dict, float, int, list, str,)},
), # noqa: E501
"external_docs": (
external_documentation.ExternalDocumentation,
), # noqa: E501
"format": (str,), # noqa: E501
"getref": (str,), # noqa: E501
"max_items": (int,), # noqa: E501
"max_length": (int,), # noqa: E501
"max_properties": (int,), # noqa: E501
"maximum": (float,), # noqa: E501
"min_items": (int,), # noqa: E501
"min_length": (int,), # noqa: E501
"min_properties": (int,), # noqa: E501
"minimum": (float,), # noqa: E501
"multiple_of": (float,), # noqa: E501
"_not": (Schema,), # noqa: E501
"nullable": (bool,), # noqa: E501
"pattern": (str,), # noqa: E501
"properties": ({str: (Schema,)},), # noqa: E501
"read_only": (bool,), # noqa: E501
"required": ([str],), # noqa: E501
"title": (str,), # noqa: E501
"type": (str,), # noqa: E501
"unique_items": (bool,), # noqa: E501
"write_only": (bool,), # noqa: E501
"xml": (xml.XML,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"additional_properties": "additionalProperties", # noqa: E501
"default": "default", # noqa: E501
"deprecated": "deprecated", # noqa: E501
"description": "description", # noqa: E501
"discriminator": "discriminator", # noqa: E501
"enum": "enum", # noqa: E501
"example": "example", # noqa: E501
"exclusive_maximum": "exclusiveMaximum", # noqa: E501
"exclusive_minimum": "exclusiveMinimum", # noqa: E501
"extensions": "extensions", # noqa: E501
"external_docs": "externalDocs", # noqa: E501
"format": "format", # noqa: E501
"getref": "get$ref", # noqa: E501
"max_items": "maxItems", # noqa: E501
"max_length": "maxLength", # noqa: E501
"max_properties": "maxProperties", # noqa: E501
"maximum": "maximum", # noqa: E501
"min_items": "minItems", # noqa: E501
"min_length": "minLength", # noqa: E501
"min_properties": "minProperties", # noqa: E501
"minimum": "minimum", # noqa: E501
"multiple_of": "multipleOf", # noqa: E501
"_not": "not", # noqa: E501
"nullable": "nullable", # noqa: E501
"pattern": "pattern", # noqa: E501
"properties": "properties", # noqa: E501
"read_only": "readOnly", # noqa: E501
"required": "required", # noqa: E501
"title": "title", # noqa: E501
"type": "type", # noqa: E501
"unique_items": "uniqueItems", # noqa: E501
"write_only": "writeOnly", # noqa: E501
"xml": "xml", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""schema.Schema - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
additional_properties (bool, date, datetime, dict, float, int, list, str): [optional] # noqa: E501
default (bool, date, datetime, dict, float, int, list, str): [optional] # noqa: E501
deprecated (bool): [optional] # noqa: E501
description (str): [optional] # noqa: E501
discriminator (discriminator.Discriminator): [optional] # noqa: E501
enum ([bool, date, datetime, dict, float, int, list, str]): [optional] # noqa: E501
example (bool, date, datetime, dict, float, int, list, str): [optional] # noqa: E501
exclusive_maximum (bool): [optional] # noqa: E501
exclusive_minimum (bool): [optional] # noqa: E501
extensions ({str: (bool, date, datetime, dict, float, int, list, str,)}): [optional] # noqa: E501
external_docs (external_documentation.ExternalDocumentation): [optional] # noqa: E501
format (str): [optional] # noqa: E501
getref (str): [optional] # noqa: E501
max_items (int): [optional] # noqa: E501
max_length (int): [optional] # noqa: E501
max_properties (int): [optional] # noqa: E501
maximum (float): [optional] # noqa: E501
min_items (int): [optional] # noqa: E501
min_length (int): [optional] # noqa: E501
min_properties (int): [optional] # noqa: E501
minimum (float): [optional] # noqa: E501
multiple_of (float): [optional] # noqa: E501
_not (Schema): [optional] # noqa: E501
nullable (bool): [optional] # noqa: E501
pattern (str): [optional] # noqa: E501
properties ({str: (Schema,)}): [optional] # noqa: E501
read_only (bool): [optional] # noqa: E501
required ([str]): [optional] # noqa: E501
title (str): [optional] # noqa: E501
type (str): [optional] # noqa: E501
unique_items (bool): [optional] # noqa: E501
write_only (bool): [optional] # noqa: E501
xml (xml.XML): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
setattr(self, var_name, var_value)
| onshape-public/onshape-clients | python/onshape_client/oas/models/schema.py | Python | mit | 10,702 |
import distutils, os
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
class rotate(Command):
"""Delete older distributions"""
description = "delete older distributions, keeping N newest files"
user_options = [
('match=', 'm', "patterns to match (required)"),
('dist-dir=', 'd', "directory where the distributions are"),
('keep=', 'k', "number of matching distributions to keep"),
]
boolean_options = []
def initialize_options(self):
self.match = None
self.dist_dir = None
self.keep = None
def finalize_options(self):
if self.match is None:
raise DistutilsOptionError(
"Must specify one or more (comma-separated) match patterns "
"(e.g. '.zip' or '.egg')"
)
if self.keep is None:
raise DistutilsOptionError("Must specify number of files to keep")
try:
self.keep = int(self.keep)
except ValueError:
raise DistutilsOptionError("--keep must be an integer")
if isinstance(self.match, basestring):
self.match = [
convert_path(p.strip()) for p in self.match.split(',')
]
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
def run(self):
self.run_command("egg_info")
from glob import glob
for pattern in self.match:
pattern = self.distribution.get_name()+'*'+pattern
files = glob(os.path.join(self.dist_dir,pattern))
files = [(os.path.getmtime(f),f) for f in files]
files.sort()
files.reverse()
log.info("%d file(s) matching %s", len(files), pattern)
files = files[self.keep:]
for (t,f) in files:
log.info("Deleting %s", f)
if not self.dry_run:
os.unlink(f)
| igoralmeida/tahoe-lafs | setuptools-0.6c16dev6.egg/setuptools/command/rotate.py | Python | gpl-2.0 | 1,985 |
import rsvg
from random import random
from livingthing import LivingThing
class Plant(LivingThing):
def __init__(self):
LivingThing.__init__(self)
self.x = (random() * 2.0) - 1
self.y = (random() * 2.0) - 1
size = random() * 0.02 + 0.02
self.width = size
self.height = size
self.rotation = random() * 360.0
# image
self.svg = rsvg.Handle('resources/plant.svg')
self.code = """\
if random() > 0.5:
self.right()
else:
self.left()"""
# default behaviour
def myfn(self, arg):
if random() > 0.5:
self.right()
else:
self.left()
def draw(self, context):
LivingThing.draw(self, context)
#context.set_line_width(0.02)
#context.set_source_rgb(0.0, 1.0, 0.0)
#context.rectangle(-0.5, -0.5, 1.0, 1.0)
#context.stroke()
| antoinevg/survival | ontology/plant.py | Python | gpl-2.0 | 825 |
import unittest
from unittest.mock import patch, MagicMock
from twitchcancer.storage.readonlystorage import ReadOnlyStorage
# ReadOnlyStorage.*()
class TestReadOnlyStorageNotImplemented(unittest.TestCase):
# check that we don't answer calls we can't answer
@patch('twitchcancer.storage.readonlystorage.ReadOnlyStorage.__init__', return_value=None)
def test_not_implemented(self, init):
r = ReadOnlyStorage()
self.assertRaises(NotImplementedError, lambda: r.record())
self.assertRaises(NotImplementedError, lambda: r.store(None, None))
# ReadOnlyStorage.cancer()
class TestReadOnlyStorageCancer(unittest.TestCase):
# check that we request cancer from a live message store
@patch('twitchcancer.storage.readonlystorage.ReadOnlyStorage.__init__', return_value=None)
def test_request(self, init):
r = ReadOnlyStorage()
r.socket = MagicMock()
r.socket.recv_pyobj = MagicMock(return_value="data")
r.poller = MagicMock()
result = r.cancer()
self.assertEqual(result, "data")
# check that we gracefully fail if the message store doesn't reply
@patch('twitchcancer.storage.readonlystorage.ReadOnlyStorage.__init__', return_value=None)
@patch('twitchcancer.storage.readonlystorage.ReadOnlyStorage._disconnect')
@patch('twitchcancer.storage.readonlystorage.ReadOnlyStorage._connect')
def test_fail(self, connect, disconnect, init):
r = ReadOnlyStorage()
r.socket = MagicMock()
r.poller = MagicMock()
r.poller.poll = MagicMock(return_value=False)
result = r.cancer()
self.assertEqual(result, [])
self.assertEqual(connect.call_count, 1)
self.assertEqual(disconnect.call_count, 1)
# ReadOnlyStorage.leaderboards()
class TestReadOnlyStorageLeaderboards(unittest.TestCase):
# check that we transmit the call to a store
@patch('twitchcancer.storage.readonlystorage.ReadOnlyStorage.__init__', return_value=None)
def test_default(self, init):
r = ReadOnlyStorage()
r._store = MagicMock()
r._store.leaderboards = MagicMock(return_value="data")
result = r.leaderboards("foo")
self.assertEqual(result, "data")
r._store.leaderboards.assert_called_once_with("foo")
# ReadOnlyStorage.leaderboard()
class TestReadOnlyStorageLeaderboard(unittest.TestCase):
# check that we transmit the call to a store
@patch('twitchcancer.storage.readonlystorage.ReadOnlyStorage.__init__', return_value=None)
def test_default(self, init):
r = ReadOnlyStorage()
r._store = MagicMock()
r._store.leaderboard = MagicMock(return_value="data")
result = r.leaderboard("foo")
self.assertEqual(result, "data")
self.assertEqual(r._store.leaderboard.call_count, 1)
# ReadOnlyStorage.channel()
class TestReadOnlyStorageChannel(unittest.TestCase):
# check that we transmit the call to a store
@patch('twitchcancer.storage.readonlystorage.ReadOnlyStorage.__init__', return_value=None)
def test_default(self, init):
r = ReadOnlyStorage()
r._store = MagicMock()
r._store.channel = MagicMock(return_value="data")
result = r.channel("channel")
self.assertEqual(result, "data")
r._store.channel.assert_called_once_with("channel")
# ReadOnlyStorage.status()
class TestReadOnlyStorageStatus(unittest.TestCase):
# check that we return historical stats and live status correctly
@patch('twitchcancer.storage.readonlystorage.ReadOnlyStorage.__init__', return_value=None)
def test_default(self, init):
r = ReadOnlyStorage()
r._store = MagicMock()
r._store.status = MagicMock(return_value={
'channels': 1,
'messages': 2,
'cancer': 3,
})
r.cancer = MagicMock(return_value=[
{
'messages': 1,
'cancer': 2
},
{
'messages': 3,
'cancer': 4
},
])
expected = {
'total': {
'channels': 1,
'messages': 2,
'cancer': 3,
},
'live': {
'channels': 2,
'messages': 4,
'cancer': 6,
}
}
result = r.status()
self.assertEqual(result, expected)
# check that we gracefully fail if we don't get live data
@patch('twitchcancer.storage.readonlystorage.ReadOnlyStorage.__init__', return_value=None)
@patch('twitchcancer.storage.readonlystorage.ReadOnlyStorage.cancer', return_value=[])
def test_fail(self, cancer, init):
r = ReadOnlyStorage()
r._store = MagicMock()
r._store.status = MagicMock(return_value={
'channels': 1,
'messages': 2,
'cancer': 3,
})
expected = {
'total': {
'channels': 1,
'messages': 2,
'cancer': 3,
},
'live': {
'channels': 0,
'messages': 0,
'cancer': 0,
}
}
result = r.status()
self.assertEqual(result, expected)
# ReadOnlyStorage.search()
class TestReadOnlyStorageSearch(unittest.TestCase):
# check that we transmit the call to a store
@patch('twitchcancer.storage.readonlystorage.ReadOnlyStorage.__init__', return_value=None)
def test_default(self, init):
r = ReadOnlyStorage()
r._store = MagicMock()
r._store.search = MagicMock(return_value="data")
result = r.search("foo")
self.assertEqual(result, "data")
self.assertEqual(r._store.search.call_count, 1)
# ReadOnlyStorage._connect()
class TestReadOnlyStorageConnect(unittest.TestCase):
pass
# ReadOnlyStorage._disconnect()
class TestReadOnlyStorageDisconnect(unittest.TestCase):
pass
| Benzhaomin/TwitchCancer | twitchcancer/storage/tests/test_readonlystorage.py | Python | gpl-3.0 | 5,982 |
#!/usr/bin/python
# glib-ginterface-gen.py: service-side interface generator
#
# Generate dbus-glib 0.x service GInterfaces from the Telepathy specification.
# The master copy of this program is in the telepathy-glib repository -
# please make any changes there.
#
# Copyright (C) 2006, 2007 Collabora Limited
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import os.path
import xml.dom.minidom
from libglibcodegen import Signature, type_to_gtype, cmp_by_name, \
NS_TP, dbus_gutils_wincaps_to_uscore, \
signal_to_marshal_name, method_to_glue_marshal_name
NS_TP = "http://telepathy.freedesktop.org/wiki/DbusSpec#extensions-v0"
class Generator(object):
def __init__(self, dom, prefix, basename, signal_marshal_prefix,
headers, end_headers, not_implemented_func,
allow_havoc):
self.dom = dom
self.__header = []
self.__body = []
assert prefix.endswith('_')
assert not signal_marshal_prefix.endswith('_')
# The main_prefix, sub_prefix thing is to get:
# FOO_ -> (FOO_, _)
# FOO_SVC_ -> (FOO_, _SVC_)
# but
# FOO_BAR/ -> (FOO_BAR_, _)
# FOO_BAR/SVC_ -> (FOO_BAR_, _SVC_)
if '/' in prefix:
main_prefix, sub_prefix = prefix.upper().split('/', 1)
prefix = prefix.replace('/', '_')
else:
main_prefix, sub_prefix = prefix.upper().split('_', 1)
self.MAIN_PREFIX_ = main_prefix + '_'
self._SUB_PREFIX_ = '_' + sub_prefix
self.Prefix_ = prefix
self.Prefix = prefix.replace('_', '')
self.prefix_ = prefix.lower()
self.PREFIX_ = prefix.upper()
self.basename = basename
self.signal_marshal_prefix = signal_marshal_prefix
self.headers = headers
self.end_headers = end_headers
self.not_implemented_func = not_implemented_func
self.allow_havoc = allow_havoc
def h(self, s):
self.__header.append(s)
def b(self, s):
self.__body.append(s)
def do_node(self, node):
node_name = node.getAttribute('name').replace('/', '')
node_name_mixed = self.node_name_mixed = node_name.replace('_', '')
node_name_lc = self.node_name_lc = node_name.lower()
node_name_uc = self.node_name_uc = node_name.upper()
interfaces = node.getElementsByTagName('interface')
assert len(interfaces) == 1, interfaces
interface = interfaces[0]
self.iface_name = interface.getAttribute('name')
tmp = interface.getAttribute('tp:implement-service')
if tmp == "no":
return
tmp = interface.getAttribute('tp:causes-havoc')
if tmp and not self.allow_havoc:
raise AssertionError('%s is %s' % (self.iface_name, tmp))
self.b('static const DBusGObjectInfo _%s%s_object_info;'
% (self.prefix_, node_name_lc))
self.b('')
methods = interface.getElementsByTagName('method')
signals = interface.getElementsByTagName('signal')
properties = interface.getElementsByTagName('property')
# Don't put properties in dbus-glib glue
glue_properties = []
self.b('struct _%s%sClass {' % (self.Prefix, node_name_mixed))
self.b(' GTypeInterface parent_class;')
for method in methods:
self.b(' %s %s;' % self.get_method_impl_names(method))
self.b('};')
self.b('')
if signals:
self.b('enum {')
for signal in signals:
self.b(' %s,' % self.get_signal_const_entry(signal))
self.b(' N_%s_SIGNALS' % node_name_uc)
self.b('};')
self.b('static guint %s_signals[N_%s_SIGNALS] = {0};'
% (node_name_lc, node_name_uc))
self.b('')
self.b('static void %s%s_base_init (gpointer klass);'
% (self.prefix_, node_name_lc))
self.b('')
self.b('GType')
self.b('%s%s_get_type (void)'
% (self.prefix_, node_name_lc))
self.b('{')
self.b(' static GType type = 0;')
self.b('')
self.b(' if (G_UNLIKELY (type == 0))')
self.b(' {')
self.b(' static const GTypeInfo info = {')
self.b(' sizeof (%s%sClass),' % (self.Prefix, node_name_mixed))
self.b(' %s%s_base_init, /* base_init */'
% (self.prefix_, node_name_lc))
self.b(' NULL, /* base_finalize */')
self.b(' NULL, /* class_init */')
self.b(' NULL, /* class_finalize */')
self.b(' NULL, /* class_data */')
self.b(' 0,')
self.b(' 0, /* n_preallocs */')
self.b(' NULL /* instance_init */')
self.b(' };')
self.b('')
self.b(' type = g_type_register_static (G_TYPE_INTERFACE,')
self.b(' "%s%s", &info, 0);' % (self.Prefix, node_name_mixed))
self.b(' }')
self.b('')
self.b(' return type;')
self.b('}')
self.b('')
self.h('/**')
self.h(' * %s%s:' % (self.Prefix, node_name_mixed))
self.h(' *')
self.h(' * Dummy typedef representing any implementation of this '
'interface.')
self.h(' */')
self.h('typedef struct _%s%s %s%s;'
% (self.Prefix, node_name_mixed, self.Prefix, node_name_mixed))
self.h('')
self.h('/**')
self.h(' * %s%sClass:' % (self.Prefix, node_name_mixed))
self.h(' *')
self.h(' * The class of %s%s.' % (self.Prefix, node_name_mixed))
self.h(' */')
self.h('typedef struct _%s%sClass %s%sClass;'
% (self.Prefix, node_name_mixed, self.Prefix, node_name_mixed))
self.h('')
self.h('GType %s%s_get_type (void);'
% (self.prefix_, node_name_lc))
gtype = self.current_gtype = \
self.MAIN_PREFIX_ + 'TYPE' + self._SUB_PREFIX_ + node_name_uc
classname = self.Prefix + node_name_mixed
self.h('#define %s \\\n (%s%s_get_type ())'
% (gtype, self.prefix_, node_name_lc))
self.h('#define %s%s(obj) \\\n'
' (G_TYPE_CHECK_INSTANCE_CAST((obj), %s, %s))'
% (self.PREFIX_, node_name_uc, gtype, classname))
self.h('#define %sIS%s%s(obj) \\\n'
' (G_TYPE_CHECK_INSTANCE_TYPE((obj), %s))'
% (self.MAIN_PREFIX_, self._SUB_PREFIX_, node_name_uc, gtype))
self.h('#define %s%s_GET_CLASS(obj) \\\n'
' (G_TYPE_INSTANCE_GET_INTERFACE((obj), %s, %sClass))'
% (self.PREFIX_, node_name_uc, gtype, classname))
self.h('')
self.h('')
base_init_code = []
for method in methods:
self.do_method(method)
for signal in signals:
base_init_code.extend(self.do_signal(signal))
self.b('static inline void')
self.b('%s%s_base_init_once (gpointer klass G_GNUC_UNUSED)'
% (self.prefix_, node_name_lc))
self.b('{')
if properties:
self.b(' static TpDBusPropertiesMixinPropInfo properties[%d] = {'
% (len(properties) + 1))
for m in properties:
access = m.getAttribute('access')
assert access in ('read', 'write', 'readwrite')
if access == 'read':
flags = 'TP_DBUS_PROPERTIES_MIXIN_FLAG_READ'
elif access == 'write':
flags = 'TP_DBUS_PROPERTIES_MIXIN_FLAG_WRITE'
else:
flags = ('TP_DBUS_PROPERTIES_MIXIN_FLAG_READ | '
'TP_DBUS_PROPERTIES_MIXIN_FLAG_WRITE')
self.b(' { 0, %s, "%s", 0, NULL, NULL }, /* %s */'
% (flags, m.getAttribute('type'), m.getAttribute('name')))
self.b(' { 0, 0, NULL, 0, NULL, NULL }')
self.b(' };')
self.b(' static TpDBusPropertiesMixinIfaceInfo interface =')
self.b(' { 0, properties, NULL, NULL };')
self.b('')
self.b(' dbus_g_object_type_install_info (%s%s_get_type (),'
% (self.prefix_, node_name_lc))
self.b(' &_%s%s_object_info);'
% (self.prefix_, node_name_lc))
self.b('')
if properties:
self.b(' interface.dbus_interface = g_quark_from_static_string '
'("%s");' % self.iface_name)
for i, m in enumerate(properties):
self.b(' properties[%d].name = g_quark_from_static_string ("%s");'
% (i, m.getAttribute('name')))
self.b(' properties[%d].type = %s;'
% (i, type_to_gtype(m.getAttribute('type'))[1]))
self.b(' tp_svc_interface_set_dbus_properties_info (%s, &interface);'
% self.current_gtype)
self.b('')
for s in base_init_code:
self.b(s)
self.b('}')
self.b('static void')
self.b('%s%s_base_init (gpointer klass)'
% (self.prefix_, node_name_lc))
self.b('{')
self.b(' static gboolean initialized = FALSE;')
self.b('')
self.b(' if (!initialized)')
self.b(' {')
self.b(' initialized = TRUE;')
self.b(' %s%s_base_init_once (klass);'
% (self.prefix_, node_name_lc))
self.b(' }')
# insert anything we need to do per implementation here
self.b('}')
self.h('')
self.b('static const DBusGMethodInfo _%s%s_methods[] = {'
% (self.prefix_, node_name_lc))
method_blob, offsets = self.get_method_glue(methods)
for method, offset in zip(methods, offsets):
self.do_method_glue(method, offset)
if len(methods) == 0:
# empty arrays are a gcc extension, so put in a dummy member
self.b(" { NULL, NULL, 0 }")
self.b('};')
self.b('')
self.b('static const DBusGObjectInfo _%s%s_object_info = {'
% (self.prefix_, node_name_lc))
self.b(' 0,') # version
self.b(' _%s%s_methods,' % (self.prefix_, node_name_lc))
self.b(' %d,' % len(methods))
self.b('"' + method_blob.replace('\0', '\\0') + '",')
self.b('"' + self.get_signal_glue(signals).replace('\0', '\\0') + '",')
self.b('"' +
self.get_property_glue(glue_properties).replace('\0', '\\0') +
'",')
self.b('};')
self.b('')
self.node_name_mixed = None
self.node_name_lc = None
self.node_name_uc = None
def get_method_glue(self, methods):
info = []
offsets = []
for method in methods:
offsets.append(len(''.join(info)))
info.append(self.iface_name + '\0')
info.append(method.getAttribute('name') + '\0')
info.append('A\0') # async
counter = 0
for arg in method.getElementsByTagName('arg'):
out = arg.getAttribute('direction') == 'out'
name = arg.getAttribute('name')
if not name:
assert out
name = 'arg%u' % counter
counter += 1
info.append(name + '\0')
if out:
info.append('O\0')
else:
info.append('I\0')
if out:
info.append('F\0') # not const
info.append('N\0') # not error or return
info.append(arg.getAttribute('type') + '\0')
info.append('\0')
return ''.join(info) + '\0', offsets
def do_method_glue(self, method, offset):
lc_name = method.getAttribute('tp:name-for-bindings')
if method.getAttribute('name') != lc_name.replace('_', ''):
raise AssertionError('Method %s tp:name-for-bindings (%s) does '
'not match' % (method.getAttribute('name'), lc_name))
lc_name = lc_name.lower()
marshaller = method_to_glue_marshal_name(method,
self.signal_marshal_prefix)
wrapper = self.prefix_ + self.node_name_lc + '_' + lc_name
self.b(" { (GCallback) %s, %s, %d }," % (wrapper, marshaller, offset))
def get_signal_glue(self, signals):
info = []
for signal in signals:
info.append(self.iface_name)
info.append(signal.getAttribute('name'))
return '\0'.join(info) + '\0\0'
# the implementation can be the same
get_property_glue = get_signal_glue
def get_method_impl_names(self, method):
dbus_method_name = method.getAttribute('name')
class_member_name = method.getAttribute('tp:name-for-bindings')
if dbus_method_name != class_member_name.replace('_', ''):
raise AssertionError('Method %s tp:name-for-bindings (%s) does '
'not match' % (dbus_method_name, class_member_name))
class_member_name = class_member_name.lower()
stub_name = (self.prefix_ + self.node_name_lc + '_' +
class_member_name)
return (stub_name + '_impl', class_member_name)
def do_method(self, method):
assert self.node_name_mixed is not None
in_class = []
# Examples refer to Thing.DoStuff (su) -> ii
# DoStuff
dbus_method_name = method.getAttribute('name')
# do_stuff
class_member_name = method.getAttribute('tp:name-for-bindings')
if dbus_method_name != class_member_name.replace('_', ''):
raise AssertionError('Method %s tp:name-for-bindings (%s) does '
'not match' % (dbus_method_name, class_member_name))
class_member_name = class_member_name.lower()
# void tp_svc_thing_do_stuff (TpSvcThing *, const char *, guint,
# DBusGMethodInvocation *);
stub_name = (self.prefix_ + self.node_name_lc + '_' +
class_member_name)
# typedef void (*tp_svc_thing_do_stuff_impl) (TpSvcThing *,
# const char *, guint, DBusGMethodInvocation);
impl_name = stub_name + '_impl'
# void tp_svc_thing_return_from_do_stuff (DBusGMethodInvocation *,
# gint, gint);
ret_name = (self.prefix_ + self.node_name_lc + '_return_from_' +
class_member_name)
# Gather arguments
in_args = []
out_args = []
for i in method.getElementsByTagName('arg'):
name = i.getAttribute('name')
direction = i.getAttribute('direction') or 'in'
dtype = i.getAttribute('type')
assert direction in ('in', 'out')
if name:
name = direction + '_' + name
elif direction == 'in':
name = direction + str(len(in_args))
else:
name = direction + str(len(out_args))
ctype, gtype, marshaller, pointer = type_to_gtype(dtype)
if pointer:
ctype = 'const ' + ctype
struct = (ctype, name)
if direction == 'in':
in_args.append(struct)
else:
out_args.append(struct)
# Implementation type declaration (in header, docs in body)
self.b('/**')
self.b(' * %s:' % impl_name)
self.b(' * @self: The object implementing this interface')
for (ctype, name) in in_args:
self.b(' * @%s: %s (FIXME, generate documentation)'
% (name, ctype))
self.b(' * @context: Used to return values or throw an error')
self.b(' *')
self.b(' * The signature of an implementation of the D-Bus method')
self.b(' * %s on interface %s.' % (dbus_method_name, self.iface_name))
self.b(' */')
self.h('typedef void (*%s) (%s%s *self,'
% (impl_name, self.Prefix, self.node_name_mixed))
for (ctype, name) in in_args:
self.h(' %s%s,' % (ctype, name))
self.h(' DBusGMethodInvocation *context);')
# Class member (in class definition)
in_class.append(' %s %s;' % (impl_name, class_member_name))
# Stub definition (in body only - it's static)
self.b('static void')
self.b('%s (%s%s *self,'
% (stub_name, self.Prefix, self.node_name_mixed))
for (ctype, name) in in_args:
self.b(' %s%s,' % (ctype, name))
self.b(' DBusGMethodInvocation *context)')
self.b('{')
self.b(' %s impl = (%s%s_GET_CLASS (self)->%s);'
% (impl_name, self.PREFIX_, self.node_name_uc, class_member_name))
self.b('')
self.b(' if (impl != NULL)')
tmp = ['self'] + [name for (ctype, name) in in_args] + ['context']
self.b(' {')
self.b(' (impl) (%s);' % ',\n '.join(tmp))
self.b(' }')
self.b(' else')
self.b(' {')
if self.not_implemented_func:
self.b(' %s (context);' % self.not_implemented_func)
else:
self.b(' GError e = { DBUS_GERROR, ')
self.b(' DBUS_GERROR_UNKNOWN_METHOD,')
self.b(' "Method not implemented" };')
self.b('')
self.b(' dbus_g_method_return_error (context, &e);')
self.b(' }')
self.b('}')
self.b('')
# Implementation registration (in both header and body)
self.h('void %s%s_implement_%s (%s%sClass *klass, %s impl);'
% (self.prefix_, self.node_name_lc, class_member_name,
self.Prefix, self.node_name_mixed, impl_name))
self.b('/**')
self.b(' * %s%s_implement_%s:'
% (self.prefix_, self.node_name_lc, class_member_name))
self.b(' * @klass: A class whose instances implement this interface')
self.b(' * @impl: A callback used to implement the %s D-Bus method'
% dbus_method_name)
self.b(' *')
self.b(' * Register an implementation for the %s method in the vtable'
% dbus_method_name)
self.b(' * of an implementation of this interface. To be called from')
self.b(' * the interface init function.')
self.b(' */')
self.b('void')
self.b('%s%s_implement_%s (%s%sClass *klass, %s impl)'
% (self.prefix_, self.node_name_lc, class_member_name,
self.Prefix, self.node_name_mixed, impl_name))
self.b('{')
self.b(' klass->%s = impl;' % class_member_name)
self.b('}')
self.b('')
# Return convenience function (static inline, in header)
self.h('/**')
self.h(' * %s:' % ret_name)
self.h(' * @context: The D-Bus method invocation context')
for (ctype, name) in out_args:
self.h(' * @%s: %s (FIXME, generate documentation)'
% (name, ctype))
self.h(' *')
self.h(' * Return successfully by calling dbus_g_method_return().')
self.h(' * This inline function exists only to provide type-safety.')
self.h(' */')
tmp = (['DBusGMethodInvocation *context'] +
[ctype + name for (ctype, name) in out_args])
self.h('static inline')
self.h('/* this comment is to stop gtkdoc realising this is static */')
self.h(('void %s (' % ret_name) + (',\n '.join(tmp)) + ');')
self.h('static inline void')
self.h(('%s (' % ret_name) + (',\n '.join(tmp)) + ')')
self.h('{')
tmp = ['context'] + [name for (ctype, name) in out_args]
self.h(' dbus_g_method_return (' + ',\n '.join(tmp) + ');')
self.h('}')
self.h('')
return in_class
def get_signal_const_entry(self, signal):
assert self.node_name_uc is not None
return ('SIGNAL_%s_%s'
% (self.node_name_uc, signal.getAttribute('name')))
def do_signal(self, signal):
assert self.node_name_mixed is not None
in_base_init = []
# for signal: Thing::StuffHappened (s, u)
# we want to emit:
# void tp_svc_thing_emit_stuff_happened (gpointer instance,
# const char *arg0, guint arg1);
dbus_name = signal.getAttribute('name')
ugly_name = signal.getAttribute('tp:name-for-bindings')
if dbus_name != ugly_name.replace('_', ''):
raise AssertionError('Signal %s tp:name-for-bindings (%s) does '
'not match' % (dbus_name, ugly_name))
stub_name = (self.prefix_ + self.node_name_lc + '_emit_' +
ugly_name.lower())
const_name = self.get_signal_const_entry(signal)
# Gather arguments
args = []
for i in signal.getElementsByTagName('arg'):
name = i.getAttribute('name')
dtype = i.getAttribute('type')
tp_type = i.getAttribute('tp:type')
if name:
name = 'arg_' + name
else:
name = 'arg' + str(len(args))
ctype, gtype, marshaller, pointer = type_to_gtype(dtype)
if pointer:
ctype = 'const ' + ctype
struct = (ctype, name, gtype)
args.append(struct)
tmp = (['gpointer instance'] +
[ctype + name for (ctype, name, gtype) in args])
self.h(('void %s (' % stub_name) + (',\n '.join(tmp)) + ');')
# FIXME: emit docs
self.b('/**')
self.b(' * %s:' % stub_name)
self.b(' * @instance: The object implementing this interface')
for (ctype, name, gtype) in args:
self.b(' * @%s: %s (FIXME, generate documentation)'
% (name, ctype))
self.b(' *')
self.b(' * Type-safe wrapper around g_signal_emit to emit the')
self.b(' * %s signal on interface %s.'
% (dbus_name, self.iface_name))
self.b(' */')
self.b('void')
self.b(('%s (' % stub_name) + (',\n '.join(tmp)) + ')')
self.b('{')
self.b(' g_assert (instance != NULL);')
self.b(' g_assert (G_TYPE_CHECK_INSTANCE_TYPE (instance, %s));'
% (self.current_gtype))
tmp = (['instance', '%s_signals[%s]' % (self.node_name_lc, const_name),
'0'] + [name for (ctype, name, gtype) in args])
self.b(' g_signal_emit (' + ',\n '.join(tmp) + ');')
self.b('}')
self.b('')
signal_name = dbus_gutils_wincaps_to_uscore(dbus_name).replace('_',
'-')
in_base_init.append(' /**')
in_base_init.append(' * %s%s::%s:'
% (self.Prefix, self.node_name_mixed, signal_name))
for (ctype, name, gtype) in args:
in_base_init.append(' * @%s: %s (FIXME, generate documentation)'
% (name, ctype))
in_base_init.append(' *')
in_base_init.append(' * The %s D-Bus signal is emitted whenever '
'this GObject signal is.' % dbus_name)
in_base_init.append(' */')
in_base_init.append(' %s_signals[%s] ='
% (self.node_name_lc, const_name))
in_base_init.append(' g_signal_new ("%s",' % signal_name)
in_base_init.append(' G_OBJECT_CLASS_TYPE (klass),')
in_base_init.append(' G_SIGNAL_RUN_LAST|G_SIGNAL_DETAILED,')
in_base_init.append(' 0,')
in_base_init.append(' NULL, NULL,')
in_base_init.append(' %s,'
% signal_to_marshal_name(signal, self.signal_marshal_prefix))
in_base_init.append(' G_TYPE_NONE,')
tmp = ['%d' % len(args)] + [gtype for (ctype, name, gtype) in args]
in_base_init.append(' %s);' % ',\n '.join(tmp))
in_base_init.append('')
return in_base_init
def have_properties(self, nodes):
for node in nodes:
interface = node.getElementsByTagName('interface')[0]
if interface.getElementsByTagName('property'):
return True
return False
def __call__(self):
nodes = self.dom.getElementsByTagName('node')
nodes.sort(cmp_by_name)
self.h('#include <glib-object.h>')
self.h('#include <dbus/dbus-glib.h>')
if self.have_properties(nodes):
self.h('#include <telepathy-glib/dbus-properties-mixin.h>')
self.h('')
self.h('G_BEGIN_DECLS')
self.h('')
self.b('#include "%s.h"' % self.basename)
self.b('')
for header in self.headers:
self.b('#include %s' % header)
self.b('')
for node in nodes:
self.do_node(node)
self.h('')
self.h('G_END_DECLS')
self.b('')
for header in self.end_headers:
self.b('#include %s' % header)
self.h('')
self.b('')
open(self.basename + '.h', 'w').write('\n'.join(self.__header))
open(self.basename + '.c', 'w').write('\n'.join(self.__body))
def cmdline_error():
print """\
usage:
gen-ginterface [OPTIONS] xmlfile Prefix_
options:
--include='<header.h>' (may be repeated)
--include='"header.h"' (ditto)
--include-end='"header.h"' (ditto)
Include extra headers in the generated .c file
--signal-marshal-prefix='prefix'
Use the given prefix on generated signal marshallers (default is
prefix.lower()).
--filename='BASENAME'
Set the basename for the output files (default is prefix.lower()
+ 'ginterfaces')
--not-implemented-func='symbol'
Set action when methods not implemented in the interface vtable are
called. symbol must have signature
void symbol (DBusGMethodInvocation *context)
and return some sort of "not implemented" error via
dbus_g_method_return_error (context, ...)
"""
sys.exit(1)
if __name__ == '__main__':
from getopt import gnu_getopt
options, argv = gnu_getopt(sys.argv[1:], '',
['filename=', 'signal-marshal-prefix=',
'include=', 'include-end=',
'allow-unstable',
'not-implemented-func='])
try:
prefix = argv[1]
except IndexError:
cmdline_error()
basename = prefix.lower() + 'ginterfaces'
signal_marshal_prefix = prefix.lower().rstrip('_')
headers = []
end_headers = []
not_implemented_func = ''
allow_havoc = False
for option, value in options:
if option == '--filename':
basename = value
elif option == '--signal-marshal-prefix':
signal_marshal_prefix = value
elif option == '--include':
if value[0] not in '<"':
value = '"%s"' % value
headers.append(value)
elif option == '--include-end':
if value[0] not in '<"':
value = '"%s"' % value
end_headers.append(value)
elif option == '--not-implemented-func':
not_implemented_func = value
elif option == '--allow-unstable':
allow_havoc = True
try:
dom = xml.dom.minidom.parse(argv[0])
except IndexError:
cmdline_error()
Generator(dom, prefix, basename, signal_marshal_prefix, headers,
end_headers, not_implemented_func, allow_havoc)()
| GNOME/libsocialweb | tools/glib-ginterface-gen.py | Python | lgpl-2.1 | 28,402 |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name = 'django-supertools',
version = "0.3",
description = "Generic tools for django apps.",
long_description = "",
keywords = 'django',
author = 'Jesús Espino García & Andrey Antukh',
author_email = 'jespinog@gmail.com, niwi@niwi.be',
url = 'https://github.com/kaleidos/django-supertools',
license = 'BSD',
include_package_data = True,
packages = find_packages(),
install_requires=[
'pytz',
],
classifiers = [
"Programming Language :: Python",
"Development Status :: 5 - Production/Stable",
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Framework :: Django',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
]
)
| kaleidos/django-supertools | setup.py | Python | bsd-3-clause | 1,195 |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 24 13:46:03 2015
@author: sthomp
"""
import numpy as np
import nufft as nu
#import matplotlib.pyplot as plt
#from scipy import interpolate
#from scipy import signal
#Fit a sine wave at a series of harmonics.
def dofft(time,flux, over):
"""Take a fourier transform using nufft
"""
dt=(time[3]-time[1])/2.0;
n=len(time)
endf=1/(dt*2.0); #Nyquist Frequency
step=endf/(n*over);
freq=np.arange(0,endf,step)
freq.size
fft=nu.nufft3(time,flux,freq)
amp=2*np.sqrt(fft.real**2 + fft.imag**2)
return(freq,amp)
| barentsen/dave | susanplay/otherTools.py | Python | mit | 624 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def colorize(text, color):
colors = { "black" : 30,
"red" : 31,
"green" : 32,
"yellow" : 33,
"blue" : 34,
"purple" : 35,
"cyan" : 36,
"white" : 37,
"bold" : 1,
"underline" : 4,
"blink" : 5,
"inverse" : 6 }
return "\033[0;0;%dm%s\033[0m" % (colors[color], text)
def levenshtein(word1, word2, distance):
pass
| weezel/BandEventNotifier | utils.py | Python | isc | 578 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from app_metrics.compat import AUTH_USER_MODEL
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'MetricItem.created'
db.alter_column('app_metrics_metricitem', 'created', self.gf('django.db.models.fields.DateTimeField')())
def backwards(self, orm):
# Changing field 'MetricItem.created'
db.alter_column('app_metrics_metricitem', 'created', self.gf('django.db.models.fields.DateField')())
models = {
'app_metrics.metric': {
'Meta': {'object_name': 'Metric'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60'})
},
'app_metrics.metricday': {
'Meta': {'object_name': 'MetricDay'},
'created': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['app_metrics.Metric']"}),
'num': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
},
'app_metrics.metricitem': {
'Meta': {'object_name': 'MetricItem'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['app_metrics.Metric']"}),
'num': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'app_metrics.metricmonth': {
'Meta': {'object_name': 'MetricMonth'},
'created': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['app_metrics.Metric']"}),
'num': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
},
'app_metrics.metricset': {
'Meta': {'object_name': 'MetricSet'},
'email_recipients': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm[AUTH_USER_MODEL]", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metrics': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['app_metrics.Metric']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'no_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'send_daily': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'send_monthly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'send_weekly': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'app_metrics.metricweek': {
'Meta': {'object_name': 'MetricWeek'},
'created': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['app_metrics.Metric']"}),
'num': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
},
'app_metrics.metricyear': {
'Meta': {'object_name': 'MetricYear'},
'created': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['app_metrics.Metric']"}),
'num': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL.split('.')[-1]},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['app_metrics']
| frankwiles/django-app-metrics | app_metrics/migrations/0002_alter_created_to_datetime.py | Python | bsd-3-clause | 7,469 |
# -*- coding: UTF-8 -*-
class WikipediaServerError(Exception):
def __init__(self, resp):
self.msg = u"Server error %s: \"%s\" on %s" % (
resp.status_code, resp.reason, resp.url)
super(WikipediaServerError, self).__init__(self.msg)
self.response = resp
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
class WikiError(Exception):
"""
Base exception for WikiError errors
"""
pass
| tsalmon/WikiMD | wikimd/exceptions.py | Python | mit | 490 |
#!/usr/bin/env python
from collections import OrderedDict
import inspect
import json
import os
import re
import shutil
import io
from subprocess import call, Popen, PIPE
import sys, getopt
import pkg_resources
import subprocess
from jinja2 import Environment, FileSystemLoader
from drafter_postprocessing.json_processing import postprocess_drafter_json
from apib_extra_parse_utils import preprocess_apib_parameters_lines, start_apib_section, get_indentation
def print_api_spec_title_to_extra_file(input_file_path, extra_sections_file_path):
"""Extracts the title of the API specification and writes it to the extra sections file.
Arguments:
input_file_path -- File with the API specification
extra_sections_file_path -- File where we will write the extra sections
"""
with open(input_file_path, 'rU') as input_file_path, open(extra_sections_file_path, 'w') as extra_sections_file:
line = input_file_path.readline()
while (line != "" and not line.startswith("# ")):
line = input_file_path.readline()
extra_sections_file.write( line )
def separate_extra_sections_and_api_blueprint(input_file_path, extra_sections_file_path, API_blueprint_file_path):
"""Divides a Fiware API specification into extra sections and its API blueprint.
Arguments:
input_file_path -- A Fiware API specification file.
extra_sections_file_path -- Resulting file containing extra information about the API specification.
API_blueprint_file_path -- Resulting file containing the API blueprint of the Fiware API.
"""
print_api_spec_title_to_extra_file(input_file_path, extra_sections_file_path)
with open(input_file_path, 'rU') as input_file, open(extra_sections_file_path, 'a') as extra_sections_file, open(API_blueprint_file_path, 'w') as API_blueprint_file:
line_counter = 0
title_line_end = -1
apib_line_start = -1
metadata_section = True
apib_part = False
title_section = False
parameters_section = False
data_structures_section = 0
for line in input_file:
line_counter += 1
copy = False
if metadata_section and len(line.split(':')) == 1:
metadata_section = False
title_section = True
if metadata_section:
copy = False
else:
if title_section and line.startswith('##'):
title_section = False
if title_section:
copy = False
else:
if not apib_part:
apib_part = start_apib_section(line)
if title_line_end < 0:
title_line_end = line_counter
if not apib_part:
copy = True
else:
copy = False
if apib_line_start < 0:
apib_line_start = line_counter
if copy:
extra_sections_file.write(line)
else:
line = line.replace('\t',' ')
(line, parameters_section, data_structures_section) = preprocess_apib_parameters_lines(line,
parameters_section,
data_structures_section)
API_blueprint_file.write(line)
return (title_line_end, apib_line_start)
def convert_message_error_lines(drafter_output, title_line_end, apib_line_start):
"""Convert the error lines to match the extended FIWARE APIB file format
Arguments:
drafter_output -- Text with drafter postprocessing output
title_line_end -- Line where the specification title ends
apib_line_start -- Line where the specification of the API starts
"""
line_error_regex = re.compile( "line (\d+)," )
line_error_matches = line_error_regex.findall(drafter_output)
if line_error_matches:
line_error_set = set(line_error_matches)
for line_error in line_error_set:
if line_error >= apib_line_start:
line_error_substitute = int(line_error) - title_line_end + apib_line_start
drafter_output = drafter_output.replace("line {},".format(line_error), "line {},".format(line_error_substitute))
return drafter_output
def parse_api_blueprint_with_drafter(API_blueprint_file_path, API_blueprint_JSON_file_path, title_line_end, apib_line_start):
"""Parse the API Blueprint file with the API specification and save the output to a JSON file
Arguments:
API_blueprint_file_path -- An API Blueprint definition file
API_blueprint_JSON_file_path -- Path to JSON file
title_line_end -- Line where the specification title ends. Needed to reconvert error messages from drafter.
apib_line_start -- Line where the specification of the API starts. Needed to reconvert error messages from drafter.
"""
command_call = ["drafter", API_blueprint_file_path, "--output", API_blueprint_JSON_file_path, "--format", "json", "--use-line-num"]
[_, execution_error_output] = Popen(command_call, stderr=PIPE).communicate()
print convert_message_error_lines(execution_error_output, title_line_end, apib_line_start)
def generate_metadata_dictionary(metadata_section):
"""Generates a metadata section as a dictionary from a non-dictionary section
Arguments:
metadata_section -- Source metadata section
"""
metadata_section_dict = {}
metadata_section_dict['id'] = metadata_section['id']
metadata_section_dict['name'] = metadata_section['name']
metadata_section_dict['body'] = metadata_section['body']
metadata_section_dict['subsections'] = OrderedDict()
for subsection in metadata_section['subsections']:
metadata_section_dict['subsections'][subsection['name']] = generate_metadata_dictionary(subsection)
return metadata_section_dict
def copy_static_files(template_dir_path, dst_dir_path):
"""Copies the static files used by the resulting rendered site
Arguments:
template_dir_path -- path to the template directory
dst_dir_path -- destination directory
"""
subdirectories = ['/css', '/js', '/img', '/font']
for subdirectory in subdirectories:
if os.path.exists(dst_dir_path + subdirectory):
shutil.rmtree(dst_dir_path + subdirectory)
shutil.copytree(template_dir_path + subdirectory, dst_dir_path + subdirectory, ignore=shutil.ignore_patterns('*.pyc', '*.py'))
def render_api_blueprint(template_file_path, context_file_path, dst_dir_path):
"""Renders an API Blueprint context file with a Jinja2 template.
Arguments:
template_file_path -- The Jinja2 template path
context_file_path -- Path to the context file
dst_dir_path -- Path to save the compiled site
"""
env = Environment(extensions=["jinja2.ext.do",], loader=FileSystemLoader(os.path.dirname(template_file_path)))
env.filters['sort_payload_parameters'] = sort_payload_parameters
template = env.get_template(os.path.basename(template_file_path))
output = ""
with open(context_file_path, "rU") as contextFile:
output = template.render(json.load(contextFile))
rendered_HTML_filename = os.path.splitext(os.path.basename(context_file_path))[0]
rendered_HTML_path = os.path.join(dst_dir_path, rendered_HTML_filename + ".html")
with open(rendered_HTML_path, 'w') as output_file:
output_file.write(output.encode('utf-8'))
copy_static_files(os.path.dirname(template_file_path), dst_dir_path)
def create_directory_if_not_exists(dir_path):
"""Creates a directory with the given path if it doesn't exists yet"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def clear_directory(dir_path):
"""Removes all the files on a directory given its path"""
for file in os.listdir(dir_path):
file_path = os.path.join(dir_path, file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
print e
def compare_payload_parameter(paramA, paramB):
"""Returns a boolean indicating whether paramA < paramB (alphabetically)
Arguments:
paramA - first operand of the comparison
paramB - second operand of the comparison"""
if( paramA['class'] == "property" and
paramB['class'] == "property"
):
if( paramA['content']['name']['literal'] < paramB['content']['name']['literal'] ):
return -1
else:
return 1
else:
return 0
def sort_payload_parameters(parameters_list):
"""Jinja2 custom filter for ordering a list of parameters
Arguments:
parameters_list - list of payload parameters given by Drafter"""
return sorted(parameters_list, cmp=compare_payload_parameter)
def render_api_specification(API_specification_path, template_path, dst_dir_path, clear_temporal_dir=True, cover=None):
"""Renders an API specification using a template and saves it to destination directory.
Arguments:
API_specification_path -- Path to API Blueprint specification
template_path -- The Jinja2 template path
dst_dir_path -- Path to save the compiled site
clear_temporal_dir -- Flag to clear temporary files generated by the script
"""
temp_dir_path = "/var/tmp/fiware_api_blueprint_renderer_tmp"
API_specification_file_name = os.path.splitext(os.path.basename(API_specification_path))[0]
API_extra_sections_file_path = os.path.join(temp_dir_path, API_specification_file_name + '.extras')
API_blueprint_file_path = os.path.join(temp_dir_path + '/' + API_specification_file_name + '.apib')
API_blueprint_JSON_file_path = os.path.join(temp_dir_path + '/' + API_specification_file_name + '.json')
create_directory_if_not_exists(temp_dir_path)
(title_line_end, apib_line_start) = separate_extra_sections_and_api_blueprint(API_specification_path,
API_extra_sections_file_path,
API_blueprint_file_path)
parse_api_blueprint_with_drafter(API_blueprint_file_path, API_blueprint_JSON_file_path, title_line_end, apib_line_start)
is_PDF = cover is not None
postprocess_drafter_json(API_blueprint_JSON_file_path,API_blueprint_file_path,API_extra_sections_file_path, is_PDF)
render_api_blueprint(template_path, API_blueprint_JSON_file_path, dst_dir_path)
if is_PDF: #cover needed for pdf
cover_json_path = os.path.join( dst_dir_path + '/' + 'cover' + '.json' )
shutil.move(API_blueprint_JSON_file_path, cover_json_path)
render_api_blueprint( cover, cover_json_path, dst_dir_path )
shutil.move(cover_json_path, API_blueprint_JSON_file_path)
return
if clear_temporal_dir == True:
clear_directory( temp_dir_path )
def print_package_dependencies():
"""Print the dependencies of package Fabre"""
print "\nPIP dependencies\n"
dependencies_matrix = [["Package", "Required version", "Installed version"]]
for package in pkg_resources.get_distribution("fiware_api_blueprint_renderer").requires():
package_header = str(package).split('>=')
package_name = package_header[0]
package_required_version = ">= " + package_header[1]
package_installed_info = subprocess.check_output(['pip', 'show', package_name])
version_regex = re.compile("Version: (.*)")
package_installed_version = version_regex.search(package_installed_info).group(1)
dependencies_matrix.append([package_name, package_required_version, package_installed_version])
pretty_print_matrix(dependencies_matrix)
system_dependencies_matrix = [["Package", "Required version", "Installed version"]]
system_dependencies = [('drafter', 'v0.1.9'), ('wkhtmltopdf', '0.12.2.1 (with patched qt)')]
for (package_name, package_required_version) in system_dependencies:
row = []
row.append(package_name)
row.append(package_required_version)
if package_name != 'wkhtmltopdf':
row.append(subprocess.check_output([package_name, '--version'])[0:-1])
else:
row.append(subprocess.check_output([package_name, '--version'])[0:-1].split(' ',1)[1])
system_dependencies_matrix.append(row)
print "\nSystem dependencies\n"
pretty_print_matrix(system_dependencies_matrix)
print "\n"
def pretty_print_matrix(matrix):
"""Pretty print the given matrix (as a table)"""
# Retrieve the size of the matrix longest element
longest_matrix_string_size = 0
for row in matrix:
longest_row_string_size = len(max(row, key=len))
if longest_row_string_size > longest_matrix_string_size:
longest_matrix_string_size = longest_row_string_size
# Print the matrix as a table
row_format = "{:<%i}" % (longest_matrix_string_size + 2)
row_format = row_format * len(matrix[0])
for row in matrix:
print "\t" + row_format.format(*row)
def main():
usage = "Usage: \n\t" + sys.argv[0] + " -i <api-spec-path> -o <dst-dir> [--pdf] [--no-clear-temp-dir] [--template]"
version = "fabre " + pkg_resources.require("fiware_api_blueprint_renderer")[0].version
default_theme = os.path.dirname(__file__)+"/../themes/default_theme/api-specification.tpl"
pdf_template_path= os.path.dirname(__file__)+"/../themes/default_theme/api-specification.tpl"
cover_template_path= os.path.dirname(__file__)+"/../themes/default_theme/cover.tpl"
template_path= default_theme
clear_temporal_dir = True
API_specification_path = None
dst_dir_path = None
temp_pdf_path = "/var/tmp/fiware_api_blueprint_renderer_tmp_pdf/"
pdf = False
try:
opts, args = getopt.getopt(sys.argv[1:],"hvi:o:ct:",["version","ifile=","odir=","no-clear-temp-dir","template=","pdf","version-dependencies"])
except getopt.GetoptError:
print usage
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print usage
sys.exit()
elif opt in ("-v", "--version"):
print version
sys.exit()
elif opt == '--version-dependencies':
print version
print_package_dependencies()
sys.exit()
elif opt in ("-i", "--input"):
API_specification_path = arg
elif opt in ("-o", "--output"):
dst_dir_path = arg
elif opt in ("-t", "--template"):
template_path = arg
elif opt in ("-c", "--no-clear-temp-dir"):
clear_temporal_dir = False
elif opt in ("--pdf"):
pdf = True
#if no template is specified, uses the default pdf template
if not ('-t' in zip(*opts)[0] or '--template' in zip(*opts)[0]):
template_path = pdf_template_path
if API_specification_path is None:
print "API specification file must be specified"
print usage
sys.exit(3)
if dst_dir_path is None:
print "Destination directory must be specified"
print usage
sys.exit(4)
if pdf:
create_directory_if_not_exists(temp_pdf_path)
rendered_HTML_filename = os.path.splitext(os.path.basename(API_specification_path))[0]
rendered_HTML_path = os.path.join(temp_pdf_path, rendered_HTML_filename + ".html")
rendered_HTML_cover = os.path.join(temp_pdf_path, "cover" + ".html")
if ".pdf" not in dst_dir_path:
create_directory_if_not_exists(dst_dir_path)
dst_dir_path = os.path.join(dst_dir_path, rendered_HTML_filename + ".pdf")
render_api_specification(API_specification_path, template_path, temp_pdf_path, clear_temporal_dir, cover_template_path)
call( ["wkhtmltopdf", '-d', '125', '--page-size','A4', "page", "file://"+rendered_HTML_cover ,"toc" ,"page", "file://"+rendered_HTML_path, '--footer-center', "Page [page]",'--footer-font-size', '8', '--footer-spacing', '3','--run-script', "setInterval(function(){if(document.readyState=='complete') window.status='done';},100)", "--window-status", "done", dst_dir_path ])
else:
create_directory_if_not_exists( dst_dir_path )
render_api_specification( API_specification_path, template_path, dst_dir_path, clear_temporal_dir, None)
sys.exit(0)
if __name__ == "__main__":
main()
| Lenijas/test-travisci | fiware_api_blueprint_renderer/src/renderer.py | Python | bsd-3-clause | 16,781 |
# -*- coding: utf-8 -*-
import collections
import datetime
import tavi.documents
class MongoCommand(object):
def __init__(self, target, **kwargs):
self.target = target
self.kwargs = kwargs
@property
def name(self):
raise "Not Implemented"
def execute(self):
self._now = datetime.datetime.utcnow()
if hasattr(self.target, "last_modified_at"):
self.old_last_modified_at = self.target.last_modified_at
self._update_field("last_modified_at", self._now)
def reset_fields(self):
if hasattr(self.target, "last_modified_at"):
self._update_field("last_modified_at", self.old_last_modified_at)
def _update_field(self, name, timestamp):
for field in self.target.fields:
value = getattr(self.target, field)
if name == field:
setattr(self.target, name, timestamp)
elif isinstance(value, collections.Iterable):
for item in value:
if isinstance(item, tavi.documents.EmbeddedDocument):
setattr(item, name, timestamp)
elif isinstance(value, tavi.documents.EmbeddedDocument):
if hasattr(value, name):
setattr(value, name, timestamp)
class Insert(MongoCommand):
@property
def name(self):
return "INSERT"
def execute(self):
super(Insert, self).execute()
if hasattr(self.target, "created_at"):
self.old_created_at = self.target.created_at
self._update_field("created_at", self._now)
collection = self.target.__class__.collection
values = self.target.mongo_field_values
self.target._id = collection.insert(values, **self.kwargs)
def reset_fields(self):
super(Insert, self).reset_fields()
if hasattr(self.target, "created_at"):
self._update_field("created_at", self.old_created_at)
class Update(MongoCommand):
@property
def name(self):
return "UPDATE"
def execute(self):
super(Update, self).execute()
self.kwargs["upsert"] = True
self.target.__class__.collection.update(
{"_id": self.target._id},
{"$set": self.target.mongo_field_values},
**self.kwargs)
| bnadlerjr/tavi | tavi/commands.py | Python | mit | 2,311 |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class OriginationUrlList(ListResource):
""" """
def __init__(self, version, trunk_sid):
"""
Initialize the OriginationUrlList
:param Version version: Version that contains the resource
:param trunk_sid: The SID of the Trunk that owns the Origination URL
:returns: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlList
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlList
"""
super(OriginationUrlList, self).__init__(version)
# Path Solution
self._solution = {'trunk_sid': trunk_sid, }
self._uri = '/Trunks/{trunk_sid}/OriginationUrls'.format(**self._solution)
def create(self, weight, priority, enabled, friendly_name, sip_url):
"""
Create a new OriginationUrlInstance
:param unicode weight: The value that determines the relative load the URI should receive compared to others with the same priority
:param unicode priority: The relative importance of the URI
:param bool enabled: Whether the URL is enabled
:param unicode friendly_name: A string to describe the resource
:param unicode sip_url: The SIP address you want Twilio to route your Origination calls to
:returns: Newly created OriginationUrlInstance
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlInstance
"""
data = values.of({
'Weight': weight,
'Priority': priority,
'Enabled': enabled,
'FriendlyName': friendly_name,
'SipUrl': sip_url,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return OriginationUrlInstance(self._version, payload, trunk_sid=self._solution['trunk_sid'], )
def stream(self, limit=None, page_size=None):
"""
Streams OriginationUrlInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists OriginationUrlInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of OriginationUrlInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of OriginationUrlInstance
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlPage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return OriginationUrlPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of OriginationUrlInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of OriginationUrlInstance
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return OriginationUrlPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a OriginationUrlContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext
"""
return OriginationUrlContext(self._version, trunk_sid=self._solution['trunk_sid'], sid=sid, )
def __call__(self, sid):
"""
Constructs a OriginationUrlContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext
"""
return OriginationUrlContext(self._version, trunk_sid=self._solution['trunk_sid'], sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Trunking.V1.OriginationUrlList>'
class OriginationUrlPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the OriginationUrlPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param trunk_sid: The SID of the Trunk that owns the Origination URL
:returns: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlPage
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlPage
"""
super(OriginationUrlPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of OriginationUrlInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlInstance
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlInstance
"""
return OriginationUrlInstance(self._version, payload, trunk_sid=self._solution['trunk_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Trunking.V1.OriginationUrlPage>'
class OriginationUrlContext(InstanceContext):
""" """
def __init__(self, version, trunk_sid, sid):
"""
Initialize the OriginationUrlContext
:param Version version: Version that contains the resource
:param trunk_sid: The SID of the Trunk from which to fetch the OriginationUrl
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext
"""
super(OriginationUrlContext, self).__init__(version)
# Path Solution
self._solution = {'trunk_sid': trunk_sid, 'sid': sid, }
self._uri = '/Trunks/{trunk_sid}/OriginationUrls/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a OriginationUrlInstance
:returns: Fetched OriginationUrlInstance
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return OriginationUrlInstance(
self._version,
payload,
trunk_sid=self._solution['trunk_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the OriginationUrlInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def update(self, weight=values.unset, priority=values.unset,
enabled=values.unset, friendly_name=values.unset,
sip_url=values.unset):
"""
Update the OriginationUrlInstance
:param unicode weight: The value that determines the relative load the URI should receive compared to others with the same priority
:param unicode priority: The relative importance of the URI
:param bool enabled: Whether the URL is enabled
:param unicode friendly_name: A string to describe the resource
:param unicode sip_url: The SIP address you want Twilio to route your Origination calls to
:returns: Updated OriginationUrlInstance
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlInstance
"""
data = values.of({
'Weight': weight,
'Priority': priority,
'Enabled': enabled,
'FriendlyName': friendly_name,
'SipUrl': sip_url,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return OriginationUrlInstance(
self._version,
payload,
trunk_sid=self._solution['trunk_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Trunking.V1.OriginationUrlContext {}>'.format(context)
class OriginationUrlInstance(InstanceResource):
""" """
def __init__(self, version, payload, trunk_sid, sid=None):
"""
Initialize the OriginationUrlInstance
:returns: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlInstance
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlInstance
"""
super(OriginationUrlInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'sid': payload.get('sid'),
'trunk_sid': payload.get('trunk_sid'),
'weight': deserialize.integer(payload.get('weight')),
'enabled': payload.get('enabled'),
'sip_url': payload.get('sip_url'),
'friendly_name': payload.get('friendly_name'),
'priority': deserialize.integer(payload.get('priority')),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'trunk_sid': trunk_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: OriginationUrlContext for this OriginationUrlInstance
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlContext
"""
if self._context is None:
self._context = OriginationUrlContext(
self._version,
trunk_sid=self._solution['trunk_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def trunk_sid(self):
"""
:returns: The SID of the Trunk that owns the Origination URL
:rtype: unicode
"""
return self._properties['trunk_sid']
@property
def weight(self):
"""
:returns: The value that determines the relative load the URI should receive compared to others with the same priority
:rtype: unicode
"""
return self._properties['weight']
@property
def enabled(self):
"""
:returns: Whether the URL is enabled
:rtype: bool
"""
return self._properties['enabled']
@property
def sip_url(self):
"""
:returns: The SIP address you want Twilio to route your Origination calls to
:rtype: unicode
"""
return self._properties['sip_url']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def priority(self):
"""
:returns: The relative importance of the URI
:rtype: unicode
"""
return self._properties['priority']
@property
def date_created(self):
"""
:returns: The RFC 2822 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The RFC 2822 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The absolute URL of the resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a OriginationUrlInstance
:returns: Fetched OriginationUrlInstance
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the OriginationUrlInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def update(self, weight=values.unset, priority=values.unset,
enabled=values.unset, friendly_name=values.unset,
sip_url=values.unset):
"""
Update the OriginationUrlInstance
:param unicode weight: The value that determines the relative load the URI should receive compared to others with the same priority
:param unicode priority: The relative importance of the URI
:param bool enabled: Whether the URL is enabled
:param unicode friendly_name: A string to describe the resource
:param unicode sip_url: The SIP address you want Twilio to route your Origination calls to
:returns: Updated OriginationUrlInstance
:rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlInstance
"""
return self._proxy.update(
weight=weight,
priority=priority,
enabled=enabled,
friendly_name=friendly_name,
sip_url=sip_url,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Trunking.V1.OriginationUrlInstance {}>'.format(context)
| tysonholub/twilio-python | twilio/rest/trunking/v1/trunk/origination_url.py | Python | mit | 17,613 |
from src.fflag import FeatureFlag
import pytest
class ConfigStub():
def __init__(self, input_dict):
self.conf = input_dict
def get(self, sec_name, key_name=None):
if self.conf is None:
raise Exception
if sec_name in self.conf :
if key_name is None:
return self.conf[sec_name]
else:
if key_name in self.conf[sec_name]:
return self.conf[sec_name][key_name]
else:
return None
else:
return None
def test_init_redis_client():
redis_config = ConfigStub({
'ConnectionSection':{
'redis.host':'localhost',
'redis.port':6379
}
})
redis = FeatureFlag._init_redis_client(redis_config, True)
if redis is None:
pytest.fail('Unable to init local redis client')
def test_get_feature_flag():
ff = FeatureFlag(True)
ff._redis_client = ConfigStub({
'foo':'bar'
})
assert(ff.get_feature_flag('foo') is not None)
| rchakra3/simple-flask-app | test/test_ff.py | Python | gpl-2.0 | 865 |
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import sys
sys.path.append("../..")
import pyalgotrade.logger
import lxml.html
import symbolsxml
# pyalgotrade.logger.file_log = "get_sp500_symbols.log"
logger = pyalgotrade.logger.getLogger("get_sp500_symbols")
TICKER_SYMBOL_COL = 0
COMPANY_COL = 1
GICS_COL = 3
GICS_SUB_INDUSTRY_COL = 4
def get_html():
logger.info("Getting S&P 500 Component Stocks from Wikipedia")
ret = lxml.html.parse("http://en.wikipedia.org/wiki/List_of_S%26P_500_companies")
return ret
def find_table(htmlTree):
logger.info("Finding the right table")
ret = None
tables = htmlTree.xpath("//table[@class='wikitable sortable']")
for table in tables:
headers = table.xpath("tr[1]/th")
if len(headers) > 5:
if headers[TICKER_SYMBOL_COL].xpath("a[1]")[0].text != "Ticker symbol":
continue
if headers[COMPANY_COL].text != "Company":
continue
if headers[GICS_COL].xpath("a[1]")[0].text != "GICS":
continue
if headers[GICS_SUB_INDUSTRY_COL].text != "GICS Sub Industry":
continue
ret = table
break
return ret
def parse_results(table):
ret = symbolsxml.Writer()
logger.info("Parsing table")
rows = table.xpath("tr")
for row in rows[1:]:
cols = row.xpath("td")
tickerSymbol = cols[TICKER_SYMBOL_COL].xpath("a[1]")[0].text
company = cols[COMPANY_COL].xpath("a[1]")[0].text
gics = cols[GICS_COL].text
gicsSubIndustry = cols[GICS_SUB_INDUSTRY_COL].text
if gicsSubIndustry is None:
gicsSubIndustry = ""
ret.addStock(tickerSymbol, company, gics, gicsSubIndustry)
return ret
def main():
try:
htmlTree = get_html()
table = find_table(htmlTree)
if table is None:
raise Exception("S&P 500 Component Stocks table not found")
symbolsXML = parse_results(table)
logger.info("Writing sp500.xml")
symbolsXML.write("sp500.xml")
except Exception, e:
logger.error(str(e))
if __name__ == "__main__":
main()
| cgqyh/pyalgotrade-mod | tools/symbols/get_sp500_symbols.py | Python | apache-2.0 | 2,820 |
import datetime
import sys
import pdb
from directory import directory
if False:
pdb.set_trace() # avoid warning message from pyflakes
class Logger(object):
# from stack overflow: how do i duplicat sys stdout to a log file in python
def __init__(self, logfile_path=None, logfile_mode='w', base_name=None):
def path(s):
return directory('log') + s + datetime.datetime.now().isoformat('T') + '.log'
self.terminal = sys.stdout
clean_path = logfile_path.replace(':', '-') if base_name is None else path(base_name)
self.log = open(clean_path, logfile_mode)
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush():
pass
if False:
# usage example
sys.stdout = Logger('path/to/log/file')
# now print statements write on both stdout and the log file
| rlowrance/re-local-linear | Logger.py | Python | mit | 886 |
"""
Configuration for datadog Django app
"""
from django.apps import AppConfig
from django.conf import settings
from dogapi import dog_http_api, dog_stats_api
class DatadogConfig(AppConfig):
"""
Configuration class for datadog Django app
"""
name = 'openedx.core.djangoapps.datadog'
verbose_name = "Datadog"
def ready(self):
"""
Initialize connection to datadog during django startup.
Configure using DATADOG dictionary in the django project settings.
"""
# By default use the statsd agent
options = {'statsd': True}
if hasattr(settings, 'DATADOG'):
options.update(settings.DATADOG)
# Not all arguments are documented.
# Look at the source code for details.
dog_stats_api.start(**options)
dog_http_api.api_key = options.get('api_key')
| BehavioralInsightsTeam/edx-platform | openedx/core/djangoapps/datadog/apps.py | Python | agpl-3.0 | 866 |
#!/usr/bin/env python
'''
UBlox binary protocol handling
Copyright Andrew Tridgell, October 2012
Released under GNU GPL version 3 or later
'''
import struct
from datetime import datetime
import time, os
import sys
# specify Python version
if sys.version_info[0] < 3: # we're on python 2.x.x
PYTHON_VERSION = 2
else:
PYTHON_VERSION = 3
# protocol constants
PREAMBLE1 = 0xb5
PREAMBLE2 = 0x62
# message classes
CLASS_NAV = 0x01
CLASS_RXM = 0x02
CLASS_INF = 0x04
CLASS_ACK = 0x05
CLASS_CFG = 0x06
CLASS_MON = 0x0A
CLASS_AID = 0x0B
CLASS_TIM = 0x0D
CLASS_ESF = 0x10
# ACK messages
MSG_ACK_NACK = 0x00
MSG_ACK_ACK = 0x01
# NAV messages
MSG_NAV_POSECEF = 0x1
MSG_NAV_POSLLH = 0x2
MSG_NAV_STATUS = 0x3
MSG_NAV_DOP = 0x4
MSG_NAV_SOL = 0x6
MSG_NAV_POSUTM = 0x8
MSG_NAV_VELNED = 0x12
MSG_NAV_VELECEF = 0x11
MSG_NAV_TIMEGPS = 0x20
MSG_NAV_TIMEUTC = 0x21
MSG_NAV_CLOCK = 0x22
MSG_NAV_SVINFO = 0x30
MSG_NAV_AOPSTATUS = 0x60
MSG_NAV_DGPS = 0x31
MSG_NAV_DOP = 0x04
MSG_NAV_EKFSTATUS = 0x40
MSG_NAV_SBAS = 0x32
MSG_NAV_SOL = 0x06
# RXM messages
MSG_RXM_RAW = 0x10
MSG_RXM_SFRB = 0x11
MSG_RXM_SVSI = 0x20
MSG_RXM_EPH = 0x31
MSG_RXM_ALM = 0x30
MSG_RXM_PMREQ = 0x41
# AID messages
MSG_AID_ALM = 0x30
MSG_AID_EPH = 0x31
MSG_AID_ALPSRV = 0x32
MSG_AID_AOP = 0x33
MSG_AID_DATA = 0x10
MSG_AID_ALP = 0x50
MSG_AID_DATA = 0x10
MSG_AID_HUI = 0x02
MSG_AID_INI = 0x01
MSG_AID_REQ = 0x00
# CFG messages
MSG_CFG_PRT = 0x00
MSG_CFG_ANT = 0x13
MSG_CFG_DAT = 0x06
MSG_CFG_EKF = 0x12
MSG_CFG_ESFGWT = 0x29
MSG_CFG_CFG = 0x09
MSG_CFG_USB = 0x1b
MSG_CFG_RATE = 0x08
MSG_CFG_SET_RATE = 0x01
MSG_CFG_NAV5 = 0x24
MSG_CFG_FXN = 0x0E
MSG_CFG_INF = 0x02
MSG_CFG_ITFM = 0x39
MSG_CFG_MSG = 0x01
MSG_CFG_NAVX5 = 0x23
MSG_CFG_NMEA = 0x17
MSG_CFG_NVS = 0x22
MSG_CFG_PM2 = 0x3B
MSG_CFG_PM = 0x32
MSG_CFG_RINV = 0x34
MSG_CFG_RST = 0x04
MSG_CFG_RXM = 0x11
MSG_CFG_SBAS = 0x16
MSG_CFG_TMODE2 = 0x3D
MSG_CFG_TMODE = 0x1D
MSG_CFG_TPS = 0x31
MSG_CFG_TP = 0x07
MSG_CFG_GNSS = 0x3E
# ESF messages
MSG_ESF_MEAS = 0x02
MSG_ESF_STATUS = 0x10
# INF messages
MSG_INF_DEBUG = 0x04
MSG_INF_ERROR = 0x00
MSG_INF_NOTICE = 0x02
MSG_INF_TEST = 0x03
MSG_INF_WARNING= 0x01
# MON messages
MSG_MON_SCHD = 0x01
MSG_MON_HW = 0x09
MSG_MON_HW2 = 0x0B
MSG_MON_IO = 0x02
MSG_MON_MSGPP = 0x06
MSG_MON_RXBUF = 0x07
MSG_MON_RXR = 0x21
MSG_MON_TXBUF = 0x08
MSG_MON_VER = 0x04
# TIM messages
MSG_TIM_TP = 0x01
MSG_TIM_TM2 = 0x03
MSG_TIM_SVIN = 0x04
MSG_TIM_VRFY = 0x06
# port IDs
PORT_DDC =0
PORT_SERIAL1=1
PORT_SERIAL2=2
PORT_USB =3
PORT_SPI =4
# dynamic models
DYNAMIC_MODEL_PORTABLE = 0
DYNAMIC_MODEL_STATIONARY = 2
DYNAMIC_MODEL_PEDESTRIAN = 3
DYNAMIC_MODEL_AUTOMOTIVE = 4
DYNAMIC_MODEL_SEA = 5
DYNAMIC_MODEL_AIRBORNE1G = 6
DYNAMIC_MODEL_AIRBORNE2G = 7
DYNAMIC_MODEL_AIRBORNE4G = 8
#reset items
RESET_HOT = 0
RESET_WARM = 1
RESET_COLD = 0xFFFF
RESET_HW = 0
RESET_SW = 1
RESET_SW_GPS = 2
RESET_HW_GRACEFUL = 4
RESET_GPS_STOP = 8
RESET_GPS_START = 9
class UBloxError(Exception):
'''Ublox error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = msg
class UBloxAttrDict(dict):
'''allow dictionary members as attributes'''
def __init__(self):
dict.__init__(self)
def __getattr__(self, name):
try:
return self.__getitem__(name)
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
if self.__dict__.has_key(name):
# allow set on normal attributes
dict.__setattr__(self, name, value)
else:
self.__setitem__(name, value)
def ArrayParse(field):
'''parse an array descriptor'''
arridx = field.find('[')
if arridx == -1:
return (field, -1)
alen = int(field[arridx+1:-1])
fieldname = field[:arridx]
return (fieldname, alen)
class UBloxDescriptor:
'''class used to describe the layout of a UBlox message'''
def __init__(self, name, msg_format, fields=[], count_field=None, format2=None, fields2=None):
self.name = name
self.msg_format = msg_format
self.fields = fields
self.count_field = count_field
self.format2 = format2
self.fields2 = fields2
def getf(self, fmt, buf, size):
f = list(struct.unpack(fmt, buf[:size]))
return f
def unpack(self, msg):
'''unpack a UBloxMessage, creating the .fields and ._recs attributes in msg'''
msg._fields = {}
# unpack main message blocks. A comm
formats = self.msg_format.split(',')
buf = msg._buf[6:-2]
count = 0
msg._recs = []
fields = self.fields[:]
for fmt in formats:
size1 = struct.calcsize(fmt)
if size1 > len(buf):
raise UBloxError("%s INVALID_SIZE1=%u" % (self.name, len(buf)))
f1 = self.getf(fmt, buf, size1)
i = 0
while i < len(f1):
field = fields.pop(0)
(fieldname, alen) = ArrayParse(field)
if alen == -1:
msg._fields[fieldname] = f1[i]
if self.count_field == fieldname:
count = int(f1[i])
i += 1
else:
msg._fields[fieldname] = [0]*alen
for a in range(alen):
msg._fields[fieldname][a] = f1[i]
i += 1
buf = buf[size1:]
if len(buf) == 0:
break
if self.count_field == '_remaining':
count = len(buf) / struct.calcsize(self.format2)
if count == 0:
msg._unpacked = True
if len(buf) != 0:
raise UBloxError("EXTRA_BYTES=%u" % len(buf))
return
size2 = struct.calcsize(self.format2)
for c in range(count):
r = UBloxAttrDict()
if size2 > len(buf):
raise UBloxError("INVALID_SIZE=%u, " % len(buf))
f2 = self.getf(self.format2, buf, size2)
for i in range(len(self.fields2)):
r[self.fields2[i]] = f2[i]
buf = buf[size2:]
msg._recs.append(r)
if len(buf) != 0:
raise UBloxError("EXTRA_BYTES=%u" % len(buf))
msg._unpacked = True
def pack(self, msg, msg_class=None, msg_id=None):
'''pack a UBloxMessage from the .fields and ._recs attributes in msg'''
f1 = []
if msg_class is None:
msg_class = msg.msg_class()
if msg_id is None:
msg_id = msg.msg_id()
msg._buf = ''
fields = self.fields[:]
for f in fields:
(fieldname, alen) = ArrayParse(f)
if not fieldname in msg._fields:
break
if alen == -1:
f1.append(msg._fields[fieldname])
else:
for a in range(alen):
f1.append(msg._fields[fieldname][a])
try:
# try full length message
fmt = self.msg_format.replace(',', '')
msg._buf = struct.pack(fmt, *tuple(f1))
except Exception as e:
# try without optional part
fmt = self.msg_format.split(',')[0]
msg._buf = struct.pack(fmt, *tuple(f1))
length = len(msg._buf)
if msg._recs:
length += len(msg._recs) * struct.calcsize(self.format2)
header = struct.pack('<BBBBH', PREAMBLE1, PREAMBLE2, msg_class, msg_id, length)
msg._buf = header + msg._buf
for r in msg._recs:
f2 = []
for f in self.fields2:
f2.append(r[f])
msg._buf += struct.pack(self.format2, *tuple(f2))
msg._buf += struct.pack('<BB', *msg.checksum(data=msg._buf[2:]))
def format(self, msg):
'''return a formatted string for a message'''
if not msg._unpacked:
self.unpack(msg)
ret = self.name + ': '
for f in self.fields:
(fieldname, alen) = ArrayParse(f)
if not fieldname in msg._fields:
continue
v = msg._fields[fieldname]
if isinstance(v, list):
ret += '%s=[' % fieldname
for a in range(alen):
ret += '%s, ' % v[a]
ret = ret[:-2] + '], '
elif isinstance(v, str):
ret += '%s="%s", ' % (f, v.rstrip(' \0'))
else:
ret += '%s=%s, ' % (f, v)
for r in msg._recs:
ret += '[ '
for f in self.fields2:
v = r[f]
ret += '%s=%s, ' % (f, v)
ret = ret[:-2] + ' ], '
return ret[:-2]
# list of supported message types.
msg_types = {
(CLASS_ACK, MSG_ACK_ACK) : UBloxDescriptor('ACK_ACK',
'<BB',
['clsID', 'msgID']),
(CLASS_ACK, MSG_ACK_NACK) : UBloxDescriptor('ACK_NACK',
'<BB',
['clsID', 'msgID']),
(CLASS_CFG, MSG_CFG_USB) : UBloxDescriptor('CFG_USB',
'<HHHHHH32s32s32s',
['vendorID', 'productID', 'reserved1', 'reserved2', 'powerConsumption',
'flags', 'vendorString', 'productString', 'serialNumber']),
(CLASS_CFG, MSG_CFG_PRT) : UBloxDescriptor('CFG_PRT',
'<BBHIIHHHH',
['portID', 'reserved0', 'txReady', 'mode', 'baudRate', 'inProtoMask',
'outProtoMask', 'reserved4', 'reserved5']),
(CLASS_CFG, MSG_CFG_CFG) : UBloxDescriptor('CFG_CFG',
'<III,B',
['clearMask', 'saveMask', 'loadMask', 'deviceMask']),
(CLASS_CFG, MSG_CFG_RST) : UBloxDescriptor('CFG_RST',
'<HBB',
['navBbrMask ', 'resetMode', 'reserved1']),
(CLASS_CFG, MSG_CFG_SBAS) : UBloxDescriptor('CFG_SBAS',
'<BBBBI',
['mode', 'usage', 'maxSBAS', 'scanmode2', 'scanmode1']),
(CLASS_CFG, MSG_CFG_GNSS) : UBloxDescriptor('CFG_GNSS',
'<BBBBBBBBI',
['msgVer', 'numTrkChHw', 'numTrkChUse', 'numConfigBlocks', 'gnssId',
'resTrkCh', 'maxTrkCh', 'resetved1', 'flags']),
(CLASS_CFG, MSG_CFG_RATE) : UBloxDescriptor('CFG_RATE',
'<HHH',
['measRate', 'navRate', 'timeRef']),
(CLASS_CFG, MSG_CFG_MSG) : UBloxDescriptor('CFG_MSG',
'<BB6B',
['msgClass', 'msgId', 'rates[6]']),
(CLASS_NAV, MSG_NAV_POSLLH) : UBloxDescriptor('NAV_POSLLH',
'<IiiiiII',
['iTOW', 'Longitude', 'Latitude', 'height', 'hMSL', 'hAcc', 'vAcc']),
(CLASS_NAV, MSG_NAV_VELNED) : UBloxDescriptor('NAV_VELNED',
'<IiiiIIiII',
['iTOW', 'velN', 'velE', 'velD', 'speed', 'gSpeed', 'heading',
'sAcc', 'cAcc']),
(CLASS_NAV, MSG_NAV_DOP) : UBloxDescriptor('NAV_DOP',
'<IHHHHHHH',
['iTOW', 'gDOP', 'pDOP', 'tDOP', 'vDOP', 'hDOP', 'nDOP', 'eDOP']),
(CLASS_NAV, MSG_NAV_STATUS) : UBloxDescriptor('NAV_STATUS',
'<IBBBBII',
['iTOW', 'gpsFix', 'flags', 'fixStat', 'flags2', 'ttff', 'msss']),
(CLASS_NAV, MSG_NAV_SOL) : UBloxDescriptor('NAV_SOL',
'<IihBBiiiIiiiIHBBI',
['iTOW', 'fTOW', 'week', 'gpsFix', 'flags', 'ecefX', 'ecefY', 'ecefZ',
'pAcc', 'ecefVX', 'ecefVY', 'ecefVZ', 'sAcc', 'pDOP', 'reserved1',
'numSV', 'reserved2']),
(CLASS_NAV, MSG_NAV_POSUTM) : UBloxDescriptor('NAV_POSUTM',
'<Iiiibb',
['iTOW', 'East', 'North', 'Alt', 'Zone', 'Hem']),
(CLASS_NAV, MSG_NAV_SBAS) : UBloxDescriptor('NAV_SBAS',
'<IBBbBBBBB',
['iTOW', 'geo', 'mode', 'sys', 'service', 'cnt', 'reserved01', 'reserved02', 'reserved03' ],
'cnt',
'BBBBBBhHh',
['svid', 'flags', 'udre', 'svSys', 'svService', 'reserved1',
'prc', 'reserved2', 'ic']),
(CLASS_NAV, MSG_NAV_POSECEF): UBloxDescriptor('NAV_POSECEF',
'<IiiiI',
['iTOW', 'ecefX', 'ecefY', 'ecefZ', 'pAcc']),
(CLASS_NAV, MSG_NAV_VELECEF): UBloxDescriptor('NAV_VELECEF',
'<IiiiI',
['iTOW', 'ecefVX', 'ecefVY', 'ecefVZ', 'sAcc']),
(CLASS_NAV, MSG_NAV_TIMEGPS): UBloxDescriptor('NAV_TIMEGPS',
'<IihbBI',
['iTOW', 'fTOW', 'week', 'leapS', 'valid', 'tAcc']),
(CLASS_NAV, MSG_NAV_TIMEUTC): UBloxDescriptor('NAV_TIMEUTC',
'<IIiHBBBBBB',
['iTOW', 'tAcc', 'nano', 'year', 'month', 'day', 'hour', 'min', 'sec', 'valid']),
(CLASS_NAV, MSG_NAV_CLOCK) : UBloxDescriptor('NAV_CLOCK',
'<IiiII',
['iTOW', 'clkB', 'clkD', 'tAcc', 'fAcc']),
(CLASS_NAV, MSG_NAV_DGPS) : UBloxDescriptor('NAV_DGPS',
'<IihhBBH',
['iTOW', 'age', 'baseId', 'baseHealth', 'numCh', 'status', 'reserved1'],
'numCh',
'<BBHff',
['svid', 'flags', 'ageC', 'prc', 'prrc']),
(CLASS_NAV, MSG_NAV_SVINFO) : UBloxDescriptor('NAV_SVINFO',
'<IBBH',
['iTOW', 'numCh', 'globalFlags', 'reserved2'],
'numCh',
'<BBBBBbhi',
['chn', 'svid', 'flags', 'quality', 'cno', 'elev', 'azim', 'prRes']),
(CLASS_RXM, MSG_RXM_SVSI) : UBloxDescriptor('RXM_SVSI',
'<IhBB',
['iTOW', 'week', 'numVis', 'numSV'],
'numSV',
'<BBhbB',
['svid', 'svFlag', 'azim', 'elev', 'age']),
(CLASS_RXM, MSG_RXM_EPH) : UBloxDescriptor('RXM_EPH',
'<II , 8I 8I 8I',
['svid', 'how',
'sf1d[8]', 'sf2d[8]', 'sf3d[8]']),
(CLASS_AID, MSG_AID_EPH) : UBloxDescriptor('AID_EPH',
'<II , 8I 8I 8I',
['svid', 'how',
'sf1d[8]', 'sf2d[8]', 'sf3d[8]']),
(CLASS_AID, MSG_AID_AOP) : UBloxDescriptor('AID_AOP',
'<B47B , 48B 48B 48B',
['svid', 'data[47]', 'optional0[48]', 'optional1[48]', 'optional1[48]']),
(CLASS_RXM, MSG_RXM_RAW) : UBloxDescriptor('RXM_RAW',
'<ihBB',
['iTOW', 'week', 'numSV', 'reserved1'],
'numSV',
'<ddfBbbB',
['cpMes', 'prMes', 'doMes', 'sv', 'mesQI', 'cno', 'lli']),
(CLASS_RXM, MSG_RXM_SFRB) : UBloxDescriptor('RXM_SFRB',
'<BB10I',
['chn', 'svid', 'dwrd[10]']),
(CLASS_AID, MSG_AID_ALM) : UBloxDescriptor('AID_ALM',
'<II',
'_remaining',
'I',
['dwrd']),
(CLASS_RXM, MSG_RXM_ALM) : UBloxDescriptor('RXM_ALM',
'<II , 8I',
['svid', 'week', 'dwrd[8]']),
(CLASS_CFG, MSG_CFG_NAV5) : UBloxDescriptor('CFG_NAV5',
'<HBBiIbBHHHHBBIII',
['mask', 'dynModel', 'fixMode', 'fixedAlt', 'fixedAltVar', 'minElev',
'drLimit', 'pDop', 'tDop', 'pAcc', 'tAcc', 'staticHoldThresh',
'dgpsTimeOut', 'reserved2', 'reserved3', 'reserved4']),
(CLASS_CFG, MSG_CFG_NAVX5) : UBloxDescriptor('CFG_NAVX5',
'<HHIBBBBBBBBBBHIBBBBBBHII',
['version', 'mask1', 'reserved0', 'reserved1', 'reserved2',
'minSVs', 'maxSVs', 'minCNO', 'reserved5', 'iniFix3D',
'reserved6', 'reserved7', 'reserved8', 'wknRollover',
'reserved9', 'reserved10', 'reserved11',
'usePPP', 'useAOP', 'reserved12', 'reserved13',
'aopOrbMaxErr', 'reserved3', 'reserved4']),
(CLASS_MON, MSG_MON_HW) : UBloxDescriptor('MON_HW',
'<IIIIHHBBBBIB25BHIII',
['pinSel', 'pinBank', 'pinDir', 'pinVal', 'noisePerMS', 'agcCnt', 'aStatus',
'aPower', 'flags', 'reserved1', 'usedMask',
'VP[25]',
'jamInd', 'reserved3', 'pinInq',
'pullH', 'pullL']),
(CLASS_MON, MSG_MON_HW2) : UBloxDescriptor('MON_HW2',
'<bBbBB3BI8BI4B',
['ofsI', 'magI', 'ofsQ', 'magQ', 'cfgSource', 'reserved1[3]',
'lowLevCfg', 'reserved2[8]', 'postStatus', 'reserved3[4]']),
(CLASS_MON, MSG_MON_SCHD) : UBloxDescriptor('MON_SCHD',
'<IIIIHHHBB',
['tskRun', 'tskSchd', 'tskOvrr', 'tskReg', 'stack',
'stackSize', 'CPUIdle', 'flySly', 'ptlSly']),
(CLASS_MON, MSG_MON_VER) : UBloxDescriptor('MON_VER',
'<30s10s,30s',
['swVersion', 'hwVersion', 'romVersion'],
'_remaining',
'30s',
['extension']),
(CLASS_TIM, MSG_TIM_TP) : UBloxDescriptor('TIM_TP',
'<IIiHBB',
['towMS', 'towSubMS', 'qErr', 'week', 'flags', 'reserved1']),
(CLASS_TIM, MSG_TIM_TM2) : UBloxDescriptor('TIM_TM2',
'<BBHHHIIIII',
['ch', 'flags', 'count', 'wnR', 'wnF', 'towMsR', 'towSubMsR',
'towMsF', 'towSubMsF', 'accEst']),
(CLASS_TIM, MSG_TIM_SVIN) : UBloxDescriptor('TIM_SVIN',
'<IiiiIIBBH',
['dur', 'meanX', 'meanY', 'meanZ', 'meanV',
'obs', 'valid', 'active', 'reserved1']),
(CLASS_INF, MSG_INF_ERROR) : UBloxDescriptor('INF_ERR', '<18s', ['str']),
(CLASS_INF, MSG_INF_DEBUG) : UBloxDescriptor('INF_DEBUG', '<18s', ['str'])
}
class UBloxMessage:
'''UBlox message class - holds a UBX binary message'''
def __init__(self):
self._buf = b""
self._fields = {}
self._recs = []
self._unpacked = False
self.debug_level = 0
def __str__(self):
'''format a message as a string'''
if not self.valid():
return 'UBloxMessage(INVALID)'
type = self.msg_type()
if type in msg_types:
return msg_types[type].format(self)
return 'UBloxMessage(UNKNOWN %s, %u)' % (str(type), self.msg_length())
def __getattr__(self, name):
'''allow access to message fields'''
try:
return self._fields[name]
except KeyError:
if name == 'recs':
return self._recs
raise AttributeError(name)
def __setattr__(self, name, value):
'''allow access to message fields'''
if name.startswith('_'):
self.__dict__[name] = value
else:
self._fields[name] = value
def have_field(self, name):
'''return True if a message contains the given field'''
return name in self._fields
def debug(self, level, msg):
'''write a debug message'''
if self.debug_level >= level:
print(msg)
def unpack(self):
'''unpack a message'''
if not self.valid():
raise UBloxError('INVALID MESSAGE')
type = self.msg_type()
if not type in msg_types:
raise UBloxError('Unknown message %s length=%u' % (str(type), len(self._buf)))
msg_types[type].unpack(self)
def pack(self):
'''pack a message'''
if not self.valid():
raise UbloxError('INVALID MESSAGE')
type = self.msg_type()
if not type in msg_types:
raise UBloxError('Unknown message %s' % str(type))
msg_types[type].pack(self)
def name(self):
'''return the short string name for a message'''
if not self.valid():
raise UbloxError('INVALID MESSAGE')
type = self.msg_type()
if not type in msg_types:
raise UBloxError('Unknown message %s length=%u' % (str(type), len(self._buf)))
return msg_types[type].name
if PYTHON_VERSION == 2:
def msg_class(self):
'''return the message class'''
return ord(self._buf[2])
def msg_id(self):
'''return the message id within the class'''
return ord(self._buf[3])
else:
def msg_class(self):
'''return the message class'''
return (self._buf[2])
def msg_id(self):
'''return the message id within the class'''
return (self._buf[3])
def msg_type(self):
'''return the message type tuple (class, id)'''
return (self.msg_class(), self.msg_id())
def msg_length(self):
'''return the payload length'''
(payload_length,) = struct.unpack('<H', self._buf[4:6])
return payload_length
def valid_so_far(self):
'''check if the message is valid so far'''
if PYTHON_VERSION == 2:
if len(self._buf) > 0 and ord(self._buf[0]) != PREAMBLE1:
return False
if len(self._buf) > 1 and ord(self._buf[1]) != PREAMBLE2:
self.debug(1, "bad pre2")
return False
else:
if len(self._buf) > 0 and (self._buf[0]) != PREAMBLE1:
return False
if len(self._buf) > 1 and (self._buf[1]) != PREAMBLE2:
self.debug(1, "bad pre2")
return False
if self.needed_bytes() == 0 and not self.valid():
if len(self._buf) > 8:
self.debug(1, "bad checksum len=%u needed=%u" % (len(self._buf), self.needed_bytes()))
else:
self.debug(1, "bad len len=%u needed=%u" % (len(self._buf), self.needed_bytes()))
return False
return True
def add(self, bytes):
'''add some bytes to a message'''
self._buf += bytes
while not self.valid_so_far() and len(self._buf) > 0:
'''handle corrupted streams'''
self._buf = self._buf[1:]
if self.needed_bytes() < 0:
self._buf = b""
def checksum(self, data=None):
'''return a checksum tuple for a message'''
if data is None:
data = self._buf[2:-2]
cs = 0
ck_a = 0
ck_b = 0
for i in data:
if type(i) is str:
ck_a = (ck_a + ord(i)) & 0xFF
else:
ck_a = (ck_a + i) & 0xFF
ck_b = (ck_b + ck_a) & 0xFF
return (ck_a, ck_b)
def valid_checksum(self):
'''check if the checksum is OK'''
(ck_a, ck_b) = self.checksum()
d = self._buf[2:-2]
(ck_a2, ck_b2) = struct.unpack('<BB', self._buf[-2:])
return ck_a == ck_a2 and ck_b == ck_b2
def needed_bytes(self):
'''return number of bytes still needed'''
if len(self._buf) < 6:
return 8 - len(self._buf)
return self.msg_length() + 8 - len(self._buf)
def valid(self):
'''check if a message is valid'''
return len(self._buf) >= 8 and self.needed_bytes() == 0 and self.valid_checksum()
class UBlox:
'''main UBlox control class.
port can be a file (for reading only) or a serial device
'''
def __init__(self, port, baudrate=115200, timeout=0):
self.serial_device = port
self.baudrate = baudrate
self.use_sendrecv = False
self.read_only = False
self.use_xfer = False
self.debug_level = 0
if self.serial_device.startswith("tcp:"):
import socket
a = self.serial_device.split(':')
destination_addr = (a[1], int(a[2]))
self.dev = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.dev.connect(destination_addr)
self.dev.setblocking(1)
self.dev.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self.use_sendrecv = True
elif os.path.isfile(self.serial_device):
self.read_only = True
self.dev = open(self.serial_device, mode='rb')
if self.serial_device.startswith("spi:"):
import spidev
bus, cs = map(int, self.serial_device.split(':')[1].split('.'))
#print(bus, cs)
self.use_xfer = True
self.dev = spidev.SpiDev()
self.dev.open(bus, cs)
#We reuse baudrate parameter but it's difficult to get default paramaters right. So it's better to specify them explicitly
self.dev.max_speed_hz = baudrate
else:
import serial
self.dev = serial.Serial(self.serial_device, baudrate=self.baudrate,
dsrdtr=False, rtscts=False, xonxoff=False, timeout=timeout)
self.logfile = None
self.log = None
self.preferred_dynamic_model = None
self.preferred_usePPP = None
self.preferred_dgps_timeout = None
def close(self):
'''close the device'''
self.dev.close()
self.dev = None
def set_debug(self, debug_level):
'''set debug level'''
self.debug_level = debug_level
def debug(self, level, msg):
'''write a debug message'''
if self.debug_level >= level:
print(msg)
def set_logfile(self, logfile, append=False):
'''setup logging to a file'''
if self.log is not None:
self.log.close()
self.log = None
self.logfile = logfile
if self.logfile is not None:
if append:
mode = 'ab'
else:
mode = 'wb'
self.log = open(self.logfile, mode=mode)
def set_preferred_dynamic_model(self, model):
'''set the preferred dynamic model for receiver'''
self.preferred_dynamic_model = model
if model is not None:
self.configure_poll(CLASS_CFG, MSG_CFG_NAV5)
def set_preferred_dgps_timeout(self, timeout):
'''set the preferred DGPS timeout for receiver'''
self.preferred_dgps_timeout = timeout
if timeout is not None:
self.configure_poll(CLASS_CFG, MSG_CFG_NAV5)
def set_preferred_usePPP(self, usePPP):
'''set the preferred usePPP setting for the receiver'''
if usePPP is None:
self.preferred_usePPP = None
return
self.preferred_usePPP = int(usePPP)
self.configure_poll(CLASS_CFG, MSG_CFG_NAVX5)
def nmea_checksum(self, msg):
d = msg[1:]
cs = 0
for i in d:
cs ^= ord(i)
return cs
def write(self, buf):
'''write some bytes'''
if not self.read_only:
if self.use_sendrecv:
return self.dev.send(buf)
elif self.use_xfer:
spiBuf = [] # form buf
for b in buf:
if type(b) is str:
spiBuf.append(ord(b))
else:
spiBuf.append(b)
return self.dev.xfer2(spiBuf)
return self.dev.write(buf)
def read(self, n):
'''read some bytes'''
if self.use_sendrecv:
import socket
try:
buf = self.dev.recv(n)
return buf
except socket.error as e:
return b''
if self.use_xfer:
buf = self.dev.readbytes(n)
return buf
buf = self.dev.read(n)
return buf
def send_nmea(self, msg):
if not self.read_only:
s = msg + "*%02X" % self.nmea_checksum(msg)
if PYTHON_VERSION == 2:
b = bytearray()
b.extend(s)
else:
b = bytearray()
b.extend(map(ord, s))
self.write(b)
def set_binary(self):
'''put a UBlox into binary mode using a NMEA string'''
if not self.read_only:
print("try set binary at %u" % self.baudrate)
self.send_nmea("$PUBX,41,0,0007,0001,%u,0" % self.baudrate)
self.send_nmea("$PUBX,41,1,0007,0001,%u,0" % self.baudrate)
self.send_nmea("$PUBX,41,2,0007,0001,%u,0" % self.baudrate)
self.send_nmea("$PUBX,41,3,0007,0001,%u,0" % self.baudrate)
self.send_nmea("$PUBX,41,4,0007,0001,%u,0" % self.baudrate)
self.send_nmea("$PUBX,41,5,0007,0001,%u,0" % self.baudrate)
def seek_percent(self, pct):
'''seek to the given percentage of a file'''
self.dev.seek(0, 2)
filesize = self.dev.tell()
self.dev.seek(pct*0.01*filesize)
def special_handling(self, msg):
'''handle automatic configuration changes'''
if msg.name() == 'CFG_NAV5':
msg.unpack()
sendit = False
pollit = False
if self.preferred_dynamic_model is not None and msg.dynModel != self.preferred_dynamic_model:
msg.dynModel = self.preferred_dynamic_model
sendit = True
pollit = True
if self.preferred_dgps_timeout is not None and msg.dgpsTimeOut != self.preferred_dgps_timeout:
msg.dgpsTimeOut = self.preferred_dgps_timeout
self.debug(2, "Setting dgpsTimeOut=%u" % msg.dgpsTimeOut)
sendit = True
# we don't re-poll for this one, as some receivers refuse to set it
if sendit:
msg.pack()
self.send(msg)
if pollit:
self.configure_poll(CLASS_CFG, MSG_CFG_NAV5)
if msg.name() == 'CFG_NAVX5' and self.preferred_usePPP is not None:
msg.unpack()
if msg.usePPP != self.preferred_usePPP:
msg.usePPP = self.preferred_usePPP
msg.mask = 1<<13
msg.pack()
self.send(msg)
self.configure_poll(CLASS_CFG, MSG_CFG_NAVX5)
def receive_message_nonblocking(self, seconds=5):
'''nonblocking receive of one ublox message'''
with Timeout(seconds=seconds):
return self.receive_message()
def receive_message(self, ignore_eof=False):
'''blocking receive of one ublox message'''
msg = UBloxMessage()
while True:
n = msg.needed_bytes()
b = self.read(n)
if not b:
if ignore_eof:
time.sleep(0.01)
continue
return None
if self.use_xfer:
if PYTHON_VERSION == 3:
bb = bytearray()
for c in b:
bb.append(c)
b = bb
else:
b = "".join([chr(c) for c in b]) # here str
msg.add(b)
if self.log is not None:
self.log.write(b)
self.log.flush()
if msg.valid():
self.special_handling(msg)
return msg
def receive_message_noerror(self, ignore_eof=False):
'''blocking receive of one ublox message, ignoring errors'''
try:
return self.receive_message(ignore_eof=ignore_eof)
except UBloxError as e:
print(e)
return None
except OSError as e:
# Occasionally we get hit with 'resource temporarily unavailable'
# messages here on the serial device, catch them too.
print(e)
return None
def send(self, msg):
'''send a preformatted ublox message'''
if not msg.valid():
self.debug(1, "invalid send")
return
if not self.read_only:
self.write(msg._buf)
def send_message(self, msg_class, msg_id, payload):
'''send a ublox message with class, id and payload'''
msg = UBloxMessage()
msg._buf = struct.pack('<BBBBH', 0xb5, 0x62, msg_class, msg_id, len(payload))
msg._buf += payload
(ck_a, ck_b) = msg.checksum(msg._buf[2:])
msg._buf += struct.pack('<BB', ck_a, ck_b)
self.send(msg)
def configure_solution_rate(self, rate_ms=200, nav_rate=1, timeref=0):
'''configure the solution rate in milliseconds'''
payload = struct.pack('<HHH', rate_ms, nav_rate, timeref)
self.send_message(CLASS_CFG, MSG_CFG_RATE, payload)
def configure_message_rate(self, msg_class, msg_id, rate):
'''configure the message rate for a given message'''
payload = struct.pack('<BBB', msg_class, msg_id, rate)
self.send_message(CLASS_CFG, MSG_CFG_SET_RATE, payload)
def configure_port(self, port=1, inMask=3, outMask=3, mode=2240, baudrate=None):
'''configure a IO port'''
if baudrate is None:
baudrate = self.baudrate
payload = struct.pack('<BBHIIHHHH', port, 0xff, 0, mode, baudrate, inMask, outMask, 0xFFFF, 0xFFFF)
self.send_message(CLASS_CFG, MSG_CFG_PRT, payload)
def configure_loadsave(self, clearMask=0, saveMask=0, loadMask=0, deviceMask=0):
'''configure configuration load/save'''
payload = struct.pack('<IIIB', clearMask, saveMask, loadMask, deviceMask)
self.send_message(CLASS_CFG, MSG_CFG_CFG, payload)
def configure_poll(self, msg_class, msg_id, payload=b''):
'''poll a configuration message'''
self.send_message(msg_class, msg_id, payload)
def configure_poll_port(self, portID=None):
'''poll a port configuration'''
if portID is None:
self.configure_poll(CLASS_CFG, MSG_CFG_PRT)
else:
self.configure_poll(CLASS_CFG, MSG_CFG_PRT, struct.pack('<B', portID))
def configure_min_max_sats(self, min_sats=4, max_sats=32):
'''Set the minimum/maximum number of satellites for a solution in the NAVX5 message'''
payload = struct.pack('<HHIBBBBBBBBBBHIBBBBBBHII', 0, 4, 0, 0, 0, min_sats, max_sats, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
self.send_message(CLASS_CFG, MSG_CFG_NAVX5, payload)
def module_reset(self, set, mode):
''' Reset the module for hot/warm/cold start'''
payload = struct.pack('<HBB', set, mode, 0)
self.send_message(CLASS_CFG, MSG_CFG_RST, payload)
class TimeoutError(Exception):
pass
import signal
class Timeout:
def __init__(self, seconds=1, msg='Timeout'):
self.seconds = seconds
self.msg = msg
def handle_timeout(self, signum, frame):
raise TimeoutError(self.msg)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
| shawn1231/snowflakex-iii | Python/navio2/ublox.py | Python | bsd-3-clause | 39,252 |
#!/usr/bin/env python
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import odlclient.tests.data as test_data
import odlclient.datatypes as datatypes
class JsonObjectTests(unittest.TestCase):
""" Tests the JsonObject Class """
def setUp(self):
self.json_object = datatypes.JsonObject()
self.json_object.a = 0
self.json_object.b = [1, 2, 3, 4]
self.json_object.c = {"d": 5, "e": "six", "f": [7, "eight", 9]}
metric_app = datatypes.JsonObjectFactory.create('MetricApp',
test_data.METRIC_APP)
self.json_object.metric_app = metric_app
self.string = ('{\n'
' "a": 0,\n'
' "b": [\n'
' 1,\n'
' 2,\n'
' 3,\n'
' 4\n'
' ],\n'
' "c": {\n'
' "d": 5,\n'
' "e": "six",\n'
' "f": [\n'
' 7,\n'
' "eight",\n'
' 9\n'
' ]\n'
' },\n'
' "metric_app": {\n'
' "app_id": "com.hp.sdn.cloud",\n'
' "app_name": "HP VAN SDN Cloud Controller"\n'
' }\n'
'}')
def test_to_json_string(self):
result = self.json_object.to_json_string()
expected = self.string
self.assertEquals(result, expected)
def test_to_dict(self):
result = self.json_object.to_dict()
expected = {"a": 0,
"b": [1, 2, 3, 4],
"c": {"d": 5, "e": "six", "f": [7, "eight", 9]},
"metric_app": {
"app_id": "com.hp.sdn.cloud",
"app_name": "HP VAN SDN Cloud Controller",
}
}
self.assertEquals(result, expected)
# Omitted test case for test_factory....
#factory method is tested by the child classes in the suite below
class FactoryTests(unittest.TestCase):
""" Tests the JsonObjectFactory """
def _test_type(self, data, datatype):
""" Tests that the provided data is cast to the correct class.
If attributes within the class are also mapped to Python objects,
these are also checked """
type_name = datatype.__name__
obj = datatypes.JsonObjectFactory.create(type_name, data)
self.assertTrue(isinstance(obj, datatype))
try:
class_map = datatypes.CLASS_MAP[type_name]
for key in class_map:
if eval('obj.%s' % key) is None:
continue
else:
attribute = eval('obj.%s' % key)
if type(attribute) is None:
break
elif type(attribute) == list:
for item in attribute:
cls = eval('datatypes.%s' % class_map[key])
self.assertTrue(isinstance(item, cls))
else:
cls = eval('datatypes.%s' % class_map[key])
self.assertTrue(isinstance(attribute, cls))
except KeyError:
pass
return obj
def test_add_factory(self):
datatypes.JsonObjectFactory.add_factory('Datapath', datatypes.Datapath)
self.assertIn('Datapath', datatypes.JsonObjectFactory.factories)
self.assertEquals(datatypes.JsonObjectFactory.factories['Datapath'],
datatypes.Datapath)
def test_factory_create(self):
obj = self._test_type(test_data.SYSTEM, datatypes.System)
self.assertIn('self_', dir(obj))
self.assertIn('System', datatypes.JsonObjectFactory.factories)
def test_create_license(self):
self._test_type(test_data.LICENSE, datatypes.License)
def test_create_app(self):
self._test_type(test_data.APP, datatypes.App)
def test_create_app_health(self):
self._test_type(test_data.APP_HEALTH, datatypes.AppHealth)
def test_create_audit_log(self):
self._test_type(test_data.AUDIT_LOG, datatypes.AuditLogEntry)
def test_create_system(self):
self._test_type(test_data.SYSTEM, datatypes.System)
def test_create_region(self):
self._test_type(test_data.REGION, datatypes.Region)
def test_create_team(self):
self._test_type(test_data.TEAM, datatypes.Team)
def test_create_alert(self):
self._test_type(test_data.ALERT, datatypes.Alert)
def test_create_alert_topic(self):
self._test_type(test_data.ALERT_TOPIC, datatypes.AlertTopic)
def test_create_alert_topic_listener(self):
self._test_type(test_data.ALERT_TOPIC_LISTENER,
datatypes.AlertTopicListener)
def test_create_metric_app(self):
self._test_type(test_data.METRIC_APP, datatypes.MetricApp)
def test_create_metric(self):
self._test_type(test_data.METRIC, datatypes.Metric)
def test_create_metric_values(self):
self._test_type(test_data.METRIC_VALUES, datatypes.MetricValues)
def test_create_controller_stats(self):
self._test_type(test_data.CONTROLLER_STATS, datatypes.ControllerStats)
def test_create_stats(self):
self._test_type(test_data.STATS, datatypes.Stats)
def test_create_port_stats(self):
self._test_type(test_data.PORT_STATS, datatypes.PortStats)
def test_create_group_stats(self):
self._test_type(test_data.GROUP_STATS, datatypes.GroupStats)
def test_create_meter_stats(self):
self._test_type(test_data.METER_STATS, datatypes.MeterStats)
def test_create_datapath(self):
self._test_type(test_data.DATAPATH, datatypes.Datapath)
def test_create_meter_features(self):
self._test_type(test_data.METER_FEATURES, datatypes.MeterFeatures)
def test_create_group_features(self):
self._test_type(test_data.GROUP_FEATURES, datatypes.GroupFeatures)
def test_create_port(self):
self._test_type(test_data.PORT, datatypes.Port)
def test_create_meter(self):
self._test_type(test_data.METER, datatypes.Meter)
def test_create_group(self):
self._test_type(test_data.GROUP, datatypes.Group)
def test_create_flow(self):
obj = self._test_type(test_data.FLOW, datatypes.Flow)
self.assertEquals(obj.actions.output, 2)
def test_create_flow_multiple_action(self):
obj = self._test_type(test_data.FLOW_MA, datatypes.Flow)
self.assertEquals(obj.actions.output, [1,2,3])
def test_create_cluster(self):
self._test_type(test_data.CLUSTER, datatypes.Cluster)
def test_create_link(self):
self._test_type(test_data.LINK, datatypes.Link)
def test_create_path(self):
self._test_type(test_data.PATH, datatypes.Path)
def test_create_node(self):
self._test_type(test_data.NODE, datatypes.Node)
def test_create_lldp(self):
self._test_type(test_data.LLDP, datatypes.LldpProperties)
def test_create_observation(self):
self._test_type(test_data.OBSERVATION, datatypes.Observation)
def test_create_packet(self):
self._test_type(test_data.PACKET, datatypes.Packet)
def test_create_next_hop(self):
self._test_type(test_data.NEXT_HOP, datatypes.NextHop)
| chrissmall22/odl-client | odlclient/tests/unit/test_datatypes.py | Python | apache-2.0 | 8,243 |
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all exception's which search services may raise."""
from search.common import utils
class Error(Exception):
"""Generic error."""
def ToString(self, error_prefix):
"""Builds error message string escaping it for HTML.
Args:
error_prefix: an error prefix.
Returns:
HTML escaped error message.
"""
if error_prefix:
return utils.HtmlEscape(
"{0}: {1}".format(error_prefix, str("\n".join(self.args))))
else:
return utils.HtmlEscape("Error: {0}".format(str("\n".join(self.args))))
def __str__(self):
return self.ToString("Error")
class BadQueryException(Error):
"""BadQueryException error."""
def __str__(self):
return self.ToString("BadQueryException")
# Places search service pool exception.
class PoolConnectionException(Error):
"""PoolConnectionException error."""
def __str__(self):
return self.ToString("PoolConnectionException")
def main():
pass
if __name__ == "__main__":
main()
| tst-mswartz/earthenterprise | earth_enterprise/src/server/wsgi/search/common/exceptions.py | Python | apache-2.0 | 1,602 |
__author__ = "Mohammad Dabiri"
__copyright__ = "Free to use, copy and modify"
__credits__ = ["Mohammad Dabiri"]
__license__ = "MIT Licence"
__version__ = "0.0.1"
__maintainer__ = "Mohammad Dabiri"
__email__ = "moddabiri@yahoo.com"
from servo_control.ABE_ServoPi import PWM
from servo_control.ABE_helpers import ABEHelpers
import os
i2c_helper = ABEHelpers()
bus = i2c_helper.get_smbus()
pwm = PWM(bus, 0x40)
servo_pan_min = 250
servo_pan_max = 450
servo_tilt_min = 350
servo_tilt_max = 450
pwm.set_pwm_freq(60)
pwm.output_enable()
tilt_state = servo_tilt_min+int((servo_tilt_max-servo_tilt_min)/2)
pan_state = servo_pan_min+int((servo_pan_max-servo_pan_min)/2)
def move_head(direction, steps):
global tilt_state
global pan_state
if 'up' in direction:
new_tilt_state = tilt_state + steps
elif 'down' in direction:
new_tilt_state = tilt_state - steps
else:
new_tilt_state = tilt_state
if 'right' in direction:
new_pan_state = pan_state - steps
elif 'left' in direction:
new_pan_state = pan_state + steps
else:
new_pan_state = pan_state
my_print("Moving head to (%d,%d) current: (%d,%d) - Direction: %s"%(new_tilt_state, new_pan_state, tilt_state, pan_state, direction))
move_head_to(new_tilt_state, new_pan_state)
def move_head_to(tilt_target, pan_target):
global tilt_state
global pan_state
global pwm
tilt_target = max(min(tilt_target,servo_tilt_max), servo_tilt_min)
pan_target = max(min(pan_target,servo_pan_max), servo_pan_min)
while(True):
if tilt_target == tilt_state and pan_target == pan_state:
break
if not tilt_state == tilt_target:
if tilt_target > tilt_state:
tilt_state +=1
else:
tilt_state -=1
pwm.set_pwm(0, 0, tilt_state)
if not pan_state == pan_target:
if pan_target > pan_state:
pan_state +=1
else:
pan_state -=1
pwm.set_pwm(1, 0, pan_state)
time.sleep(0.005)
def move_to_center(immediate=False):
global pwm
global servo_tilt_min
global servo_tilt_max
global servo_pan_min
global servo_pan_max
new_tilt_state = servo_tilt_min+int((servo_tilt_max-servo_tilt_min)/2)
new_pan_state = servo_pan_min+int((servo_pan_max-servo_pan_min)/2)
my_print("Moving to %d,%d"%(new_tilt_state, new_pan_state))
if immediate:
pwm.set_pwm(0, 0, new_tilt_state)
pwm.set_pwm(1, 0, new_pan_state)
else:
move_head_to(new_tilt_state, new_pan_state)
def move_by_pixel(x, y):
global tilt_state
global pan_state
tilt = int(y/3.8)
pan = int(x/3.8)
print("Moving from (%d,%d) to (%d,%d)"%(tilt_state, pan_state, tilt_state-tilt, pan_state+pan))
move_head_to(tilt_state-tilt, pan_state+pan)
def look_around(onLook):
global tilt_state
global pan_state
while tilt_state < servo_tilt_max:
move_head('up', 50)
result = onLook()
if result:
return result
while pan_state < servo_pan_max:
move_head('left', 50)
result = onLook()
if result:
return result
while pan_state > servo_pan_min:
move_head('right', 50)
result = onLook()
if result:
return result
while tilt_state > servo_tilt_min:
move_head('down', 50)
result = onLook()
if result:
return result
while pan_state < servo_pan_max:
move_head('left', 50)
result = onLook()
if result:
return result
while pan_state > servo_pan_min:
move_head('right', 50)
result = onLook()
if result:
return result
return None | moddabiri/simple_talking_robot | controllers/head/neck/motor.py | Python | mit | 3,834 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from treemap.tests.base import OTMTestCase
from treemap.models import Role, Plot, Tree
from treemap.lib import perms
from treemap.tests import make_instance, make_officer_user, make_admin_user
class PermTestCase(OTMTestCase):
def test_none_perm(self):
self.assertEqual(False,
perms._allows_perm(Role(),
'NonExistentModel',
any, 'allows_reads'))
class UserCanDeleteTestCase(OTMTestCase):
def setUp(self):
instance = make_instance()
self.creator_user = make_officer_user(instance)
self.admin_user = make_admin_user(instance)
self.other_user = make_officer_user(instance, username='other')
self.plot = Plot(geom=instance.center, instance=instance)
self.plot.save_with_user(self.creator_user)
self.tree = Tree(plot=self.plot, instance=instance)
self.tree.save_with_user(self.creator_user)
def assert_can_delete(self, user, deletable, should_be_able_to_delete):
can = deletable.user_can_delete(user)
self.assertEqual(can, should_be_able_to_delete)
def test_user_can_delete(self):
self.assert_can_delete(self.creator_user, self.plot, True)
self.assert_can_delete(self.admin_user, self.plot, True)
self.assert_can_delete(self.other_user, self.plot, False)
self.assert_can_delete(self.creator_user, self.tree, True)
self.assert_can_delete(self.admin_user, self.tree, True)
self.assert_can_delete(self.other_user, self.tree, False)
| RickMohr/otm-core | opentreemap/treemap/tests/test_perms.py | Python | agpl-3.0 | 1,734 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.show_resourcegroup import CommandShowResourceGroup
class CommandShowResourcegroupResourcegroup(CommandShowResourceGroup):
required_parameters = ["resourcegroup"]
| jrha/aquilon | lib/python2.6/aquilon/worker/commands/show_resourcegroup_resourcegroup.py | Python | apache-2.0 | 981 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2017 Ryan Roden-Corrent (rcorre) <ryan@rcorre.net>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Provides access to an in-memory sqlite database."""
import collections
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtSql import QSqlDatabase, QSqlQuery, QSqlError
from qutebrowser.utils import log, debug
class SqlError(Exception):
"""Raised on an error interacting with the SQL database.
Attributes:
environmental: Whether the error is likely caused by the environment
and not a qutebrowser bug.
"""
def __init__(self, msg, environmental=False):
super().__init__(msg)
self.environmental = environmental
def text(self):
"""Get a short text to display."""
return str(self)
class SqliteError(SqlError):
"""A SQL error with a QSqlError available.
Attributes:
error: The QSqlError object.
"""
def __init__(self, msg, error):
super().__init__(msg)
self.error = error
log.sql.debug("SQL error:")
log.sql.debug("type: {}".format(
debug.qenum_key(QSqlError, error.type())))
log.sql.debug("database text: {}".format(error.databaseText()))
log.sql.debug("driver text: {}".format(error.driverText()))
log.sql.debug("error code: {}".format(error.nativeErrorCode()))
# https://sqlite.org/rescode.html
# https://github.com/qutebrowser/qutebrowser/issues/2930
# https://github.com/qutebrowser/qutebrowser/issues/3004
environmental_errors = [
'5', # SQLITE_BUSY ("database is locked")
'8', # SQLITE_READONLY
'13', # SQLITE_FULL
]
self.environmental = error.nativeErrorCode() in environmental_errors
def text(self):
return self.error.databaseText()
@classmethod
def from_query(cls, what, query, error):
"""Construct an error from a failed query.
Arguments:
what: What we were doing when the error happened.
query: The query which was executed.
error: The QSqlError object.
"""
msg = 'Failed to {} query "{}": "{}"'.format(what, query, error.text())
return cls(msg, error)
def init(db_path):
"""Initialize the SQL database connection."""
database = QSqlDatabase.addDatabase('QSQLITE')
if not database.isValid():
raise SqlError('Failed to add database. '
'Are sqlite and Qt sqlite support installed?',
environmental=True)
database.setDatabaseName(db_path)
if not database.open():
error = database.lastError()
raise SqliteError("Failed to open sqlite database at {}: {}"
.format(db_path, error.text()), error)
def close():
"""Close the SQL connection."""
QSqlDatabase.removeDatabase(QSqlDatabase.database().connectionName())
def version():
"""Return the sqlite version string."""
try:
if not QSqlDatabase.database().isOpen():
init(':memory:')
ver = Query("select sqlite_version()").run().value()
close()
return ver
return Query("select sqlite_version()").run().value()
except SqlError as e:
return 'UNAVAILABLE ({})'.format(e)
class Query(QSqlQuery):
"""A prepared SQL Query."""
def __init__(self, querystr, forward_only=True):
"""Prepare a new sql query.
Args:
querystr: String to prepare query from.
forward_only: Optimization for queries that will only step forward.
Must be false for completion queries.
"""
super().__init__(QSqlDatabase.database())
log.sql.debug('Preparing SQL query: "{}"'.format(querystr))
if not self.prepare(querystr):
raise SqliteError.from_query('prepare', querystr, self.lastError())
self.setForwardOnly(forward_only)
def __iter__(self):
if not self.isActive():
raise SqlError("Cannot iterate inactive query")
rec = self.record()
fields = [rec.fieldName(i) for i in range(rec.count())]
rowtype = collections.namedtuple('ResultRow', fields)
while self.next():
rec = self.record()
yield rowtype(*[rec.value(i) for i in range(rec.count())])
def run(self, **values):
"""Execute the prepared query."""
log.sql.debug('Running SQL query: "{}"'.format(self.lastQuery()))
for key, val in values.items():
self.bindValue(':{}'.format(key), val)
log.sql.debug('query bindings: {}'.format(self.boundValues()))
if not self.exec_():
raise SqliteError.from_query('exec', self.lastQuery(),
self.lastError())
return self
def value(self):
"""Return the result of a single-value query (e.g. an EXISTS)."""
if not self.next():
raise SqlError("No result for single-result query")
return self.record().value(0)
class SqlTable(QObject):
"""Interface to a sql table.
Attributes:
_name: Name of the SQL table this wraps.
Signals:
changed: Emitted when the table is modified.
"""
changed = pyqtSignal()
def __init__(self, name, fields, constraints=None, parent=None):
"""Create a new table in the sql database.
Does nothing if the table already exists.
Args:
name: Name of the table.
fields: A list of field names.
constraints: A dict mapping field names to constraint strings.
"""
super().__init__(parent)
self._name = name
constraints = constraints or {}
column_defs = ['{} {}'.format(field, constraints.get(field, ''))
for field in fields]
q = Query("CREATE TABLE IF NOT EXISTS {name} ({column_defs})"
.format(name=name, column_defs=', '.join(column_defs)))
q.run()
def create_index(self, name, field):
"""Create an index over this table.
Args:
name: Name of the index, should be unique.
field: Name of the field to index.
"""
q = Query("CREATE INDEX IF NOT EXISTS {name} ON {table} ({field})"
.format(name=name, table=self._name, field=field))
q.run()
def __iter__(self):
"""Iterate rows in the table."""
q = Query("SELECT * FROM {table}".format(table=self._name))
q.run()
return iter(q)
def contains_query(self, field):
"""Return a prepared query that checks for the existence of an item.
Args:
field: Field to match.
"""
return Query(
"SELECT EXISTS(SELECT * FROM {table} WHERE {field} = :val)"
.format(table=self._name, field=field))
def __len__(self):
"""Return the count of rows in the table."""
q = Query("SELECT count(*) FROM {table}".format(table=self._name))
q.run()
return q.value()
def delete(self, field, value):
"""Remove all rows for which `field` equals `value`.
Args:
field: Field to use as the key.
value: Key value to delete.
Return:
The number of rows deleted.
"""
q = Query("DELETE FROM {table} where {field} = :val"
.format(table=self._name, field=field))
q.run(val=value)
if not q.numRowsAffected():
raise KeyError('No row with {} = "{}"'.format(field, value))
self.changed.emit()
def _insert_query(self, values, replace):
params = ', '.join(':{}'.format(key) for key in values)
verb = "REPLACE" if replace else "INSERT"
return Query("{verb} INTO {table} ({columns}) values({params})".format(
verb=verb, table=self._name, columns=', '.join(values),
params=params))
def insert(self, values, replace=False):
"""Append a row to the table.
Args:
values: A dict with a value to insert for each field name.
replace: If set, replace existing values.
"""
q = self._insert_query(values, replace)
q.run(**values)
self.changed.emit()
def insert_batch(self, values, replace=False):
"""Performantly append multiple rows to the table.
Args:
values: A dict with a list of values to insert for each field name.
replace: If true, overwrite rows with a primary key match.
"""
q = self._insert_query(values, replace)
for key, val in values.items():
q.bindValue(':{}'.format(key), val)
db = QSqlDatabase.database()
db.transaction()
if not q.execBatch():
raise SqliteError.from_query('exec', q.lastQuery(), q.lastError())
db.commit()
self.changed.emit()
def delete_all(self):
"""Remove all rows from the table."""
Query("DELETE FROM {table}".format(table=self._name)).run()
self.changed.emit()
def select(self, sort_by, sort_order, limit=-1):
"""Prepare, run, and return a select statement on this table.
Args:
sort_by: name of column to sort by.
sort_order: 'asc' or 'desc'.
limit: max number of rows in result, defaults to -1 (unlimited).
Return: A prepared and executed select query.
"""
q = Query("SELECT * FROM {table} ORDER BY {sort_by} {sort_order} "
"LIMIT :limit"
.format(table=self._name, sort_by=sort_by,
sort_order=sort_order))
q.run(limit=limit)
return q
| NoctuaNivalis/qutebrowser | qutebrowser/misc/sql.py | Python | gpl-3.0 | 10,463 |
__author__ = 'Maximilian Bisani'
__version__ = '$LastChangedRevision: 1667 $'
__date__ = '$LastChangedDate: 2007-06-02 16:32:35 +0200 (Sat, 02 Jun 2007) $'
__copyright__ = 'Copyright (c) 2004-2005 RWTH Aachen University'
__license__ = """
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License Version 2 (June
1991) as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, you will find it at
http://www.gnu.org/licenses/gpl.html, or write to the Free Software
Foundation, Inc., 51 Franlin Street, Fifth Floor, Boston, MA 02110,
USA.
Should a provision of no. 9 and 10 of the GNU General Public License
be invalid or become invalid, a valid provision is deemed to have been
agreed upon which comes closest to what the parties intended
commercially. In any case guarantee/warranty shall be limited to gross
negligent actions or intended actions or fraudulent concealment.
"""
import copy, itertools, heapq
from misc import restartable
# ===========================================================================
if __debug__:
class assertIsSorted:
def __init__(self, seq):
self.seq = seq
def __iter__(self):
it = iter(self.seq)
previous = it.next()
yield previous
for item in it:
if previous[0] > item[0]:
raise ValueError('sequence must be sorted', previous, item)
yield item
previous = item
class assertIsSortedAndConsolidated:
def __init__(self, seq):
self.seq = seq
def __iter__(self):
it = iter(self.seq)
previous = it.next()
yield previous
for item in it:
if previous[0] >= item[0]:
raise ValueError('sequence must be sorted and consolidated')
yield item
previous = item
assertIsConsolidated = assertIsSortedAndConsolidated
else:
def assertIsSorted(seq):
return seq
def assertIsSortedAndConsolidated(seq):
return seq
def assertIsConsolidated(seq):
return seq
# ===========================================================================
def mergeSort(seqs):
"""
perform merge sort on a list of sorted iterators
"""
queue = []
for s in seqs:
s = assertIsSorted(s)
it = iter(s)
try:
queue.append((it.next(), it.next))
except StopIteration:
pass
heapq.heapify(queue)
while queue:
item, it = queue[0]
yield item
try:
heapq.heapreplace(queue, (it(), it))
except StopIteration:
heapq.heappop(queue)
# ---------------------------------------------------------------------------
def consolidateInPlaceAdd(seq):
"""
merge items of a sorted iterator
"""
seq = assertIsSorted(seq)
it = iter(seq)
key, value = it.next()
ownsValue = False
for k, v in it:
if k == key:
if not ownsValue:
value = copy.copy(value)
ownsValue = True
value += v
else:
yield key, value
key, value = k, v
ownsValue = False
yield key, value
consolidate = consolidateInPlaceAdd
def aggregate(seq):
"""
merge items of a sorted iterator
"""
seq = assertIsSorted(seq)
it = iter(seq)
key, value = it.next()
current = [value]
for k, value in it:
if k == key:
current.append(value)
else:
yield key, current
key = k
current = [value]
yield key, current
aggregate = restartable(aggregate)
# ===========================================================================
def leftJoin(seqA, seqB):
seqA = assertIsSortedAndConsolidated(seqA)
seqB = assertIsSortedAndConsolidated(seqB)
aIter = iter(seqA)
bIter = iter(seqB)
bKey = None
try:
for aKey, aValue in aIter:
while aKey > bKey:
bKey, bValue = bIter.next()
if aKey == bKey:
yield aKey, aValue, bValue
else:
yield aKey, aValue, None
except StopIteration:
for aKey, aValue in aIter:
yield aKey, aValue, None
def innerJoin(seqA, seqB):
seqA = assertIsSortedAndConsolidated(seqB)
seqB = assertIsSortedAndConsolidated(seqA)
aIter = iter(seqA)
bIter = iter(seqB)
bKey = None
for aKey, aValue in aIter:
while aKey > bKey:
try:
bKey, bValue = bIter.next()
except StopIteration:
return
if aKey == bKey:
yield aKey, aValue, bValue
def outerJoin(seqA, seqB):
seqA = assertIsSorted(seqA)
seqB = assertIsSorted(seqB)
aIter = iter(seqA)
bIter = iter(seqB)
try:
aKey, aValue = aIter.next()
except StopIteration:
aIter = None
try:
bKey, bValue = bIter.next()
except StopIteration:
bIter = None
while (aIter is not None) and (bIter is not None):
aNext = bNext = False
if aKey < bKey:
yield aKey, aValue, None
aNext = True
elif aKey > bKey:
yield bKey, None, bValue
bNext = True
elif aKey == bKey:
yield aKey, aValue, bValue
aNext = bNext = True
else:
raise ValueError('tertium non datur')
if aNext:
try:
aKey, aValue = aIter.next()
except StopIteration:
aIter = None
if bNext:
try:
bKey, bValue = bIter.next()
except StopIteration:
bIter = None
if aIter is not None:
yield aKey, aValue, None
for aKey, aValue in aIter:
yield aKey, aValue, None
if bIter is not None:
yield bKey, None, bValue
for bKey, bValue in bIter:
yield bKey, None, bValue
def outerJoinMany(*seqs):
front = []
for ii, s in enumerate(seqs):
s = assertIsSorted(s)
it = iter(s)
try:
key, value = it.next()
front.append([ii+1, key, value, it])
except StopIteration:
pass
row = [None] + len(seqs) * [None]
while front:
minKey = min([ key for ii, key, value, it in front ])
row[0] = minKey
remove = []
for f, (ii, key, value, it) in enumerate(front):
if key == minKey:
row[ii] = value
try:
front[f][1:3] = it.next()
except StopIteration:
remove.append(ii)
else:
row[ii] = None
yield tuple(row)
if remove:
for ii in remove:
row[ii] = None
front = [ f for f in front if f[0] not in remove ]
# ===========================================================================
class monodict(object):
def __init__(self, seq):
seq = assertIsSortedAndConsolidated(seq)
self.it = iter(seq)
self.recentKey = None
try:
self.key, self.value = self.it.next()
except StopIteration:
self.key = None
def __getitem__(self, key):
if key != self.key:
if key < self.recentKey:
raise ValueError('access not monotonous', self.recentKey, key)
self.recentKey = key
while key > self.key:
try:
self.key, self.value = self.it.next()
except StopIteration:
raise KeyError(key)
if key != self.key:
raise KeyError(key)
return self.value
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
| Louiiiss/ros_asr | src/grammar/mit_g2p_tools/g2p/IterMap.py | Python | gpl-2.0 | 7,066 |
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.views import generic
from django.template import RequestContext, loader
#import models from www if there are any
def index(request):
return render(request, 'site_flow/index.html')
def about(request):
return render(request, 'site_flow/about.html')
def faq(request):
return render(request, 'site_flow/faq.html')
| RKD314/yumstat | site_flow/views.py | Python | mit | 497 |
import sys
import time
import uuid
import pandaserver.userinterface.Client as Client
from pandaserver.taskbuffer.JobSpec import JobSpec
from pandaserver.taskbuffer.FileSpec import FileSpec
aSrvID = None
prodUserNameDefault = 'unknown-user'
prodUserName = None
prodUserNameDP = None
prodUserNamePipeline = None
site = 'ANALY_BNL-LSST'
PIPELINE_TASK = None
PIPELINE_PROCESSINSTANCE = None
PIPELINE_EXECUTIONNUMBER = None
PIPELINE_STREAM = None
lsstJobParams = ""
for idx,argv in enumerate(sys.argv):
if argv == '--site':
try:
site = sys.argv[idx + 1]
except Exception:
site = 'ANALY_BNL-LSST'
if argv == '-DP_USER':
try:
prodUserNameDP = sys.argv[idx + 1]
if len(lsstJobParams):
lsstJobParams += "|"
lsstJobParams += "%(key)s=%(value)s" % \
{'key': 'DP_USER', \
'value': str(prodUserNameDP)}
except Exception:
prodUserNameDP = None
if argv == '-PIPELINE_USER':
try:
prodUserNamePipeline = sys.argv[idx + 1]
if len(lsstJobParams):
lsstJobParams += "|"
lsstJobParams += "%(key)s=%(value)s" % \
{'key': 'PIPELINE_USER', \
'value': str(prodUserNamePipeline)}
except Exception:
prodUserNamePipeline = None
if argv == '-PIPELINE_TASK':
try:
PIPELINE_TASK = sys.argv[idx + 1]
if len(lsstJobParams):
lsstJobParams += "|"
lsstJobParams += "%(key)s=%(value)s" % \
{'key': 'PIPELINE_TASK', \
'value': str(PIPELINE_TASK)}
except Exception:
PIPELINE_TASK = None
if argv == '-PIPELINE_PROCESSINSTANCE':
try:
PIPELINE_PROCESSINSTANCE = int(sys.argv[idx + 1])
if len(lsstJobParams):
lsstJobParams += "|"
lsstJobParams += "%(key)s=%(value)s" % \
{'key': 'PIPELINE_PROCESSINSTANCE', \
'value': str(PIPELINE_PROCESSINSTANCE)}
except Exception:
PIPELINE_PROCESSINSTANCE = None
if argv == '-PIPELINE_EXECUTIONNUMBER':
try:
PIPELINE_EXECUTIONNUMBER = int(sys.argv[idx + 1])
if len(lsstJobParams):
lsstJobParams += "|"
lsstJobParams += "%(key)s=%(value)s" % \
{'key': 'PIPELINE_EXECUTIONNUMBER', \
'value': str(PIPELINE_EXECUTIONNUMBER)}
except Exception:
PIPELINE_EXECUTIONNUMBER = None
if argv == '-PIPELINE_STREAM':
try:
PIPELINE_STREAM = int(sys.argv[idx + 1])
if len(lsstJobParams):
lsstJobParams += "|"
lsstJobParams += "%(key)s=%(value)s" % \
{'key': 'PIPELINE_STREAM', \
'value': str(PIPELINE_STREAM)}
except Exception:
PIPELINE_STREAM = None
if argv == '-s':
aSrvID = sys.argv[idx+1]
sys.argv = sys.argv[:idx]
break
### DP_USER and PIPELINE_USER preference
if prodUserNameDP is not None:
prodUserName = prodUserNameDP
elif prodUserNamePipeline is not None:
prodUserName = prodUserNamePipeline
#site = sys.argv[1]
#site = 'ANALY_BNL-LSST' #orig
#site = 'BNL-LSST'
#site = 'SWT2_CPB-LSST'
#site = 'UTA_SWT2-LSST'
#site = 'ANALY_SWT2_CPB-LSST'
destName = None
if prodUserName is not None \
and PIPELINE_TASK is not None \
and PIPELINE_PROCESSINSTANCE is not None:
datasetName = 'panda.lsst.user.%(PIPELINE_PROCESSINSTANCE)s.%(PIPELINE_TASK)s.%(prodUserName)s' % \
{'prodUserName': str(prodUserName), \
'PIPELINE_TASK': str(PIPELINE_TASK), \
'PIPELINE_PROCESSINSTANCE': str(PIPELINE_PROCESSINSTANCE) \
}
else:
datasetName = 'panda.lsst.user.jschovan.%s' % str(uuid.uuid4())
if prodUserName is not None \
and PIPELINE_TASK is not None \
and PIPELINE_EXECUTIONNUMBER is not None \
and PIPELINE_STREAM is not None:
jobName = 'job.%(PIPELINE_PROCESSINSTANCE)s.%(PIPELINE_TASK)s.%(PIPELINE_EXECUTIONNUMBER)s.%(prodUserName)s.%(PIPELINE_STREAM)s' % \
{'prodUserName': str(prodUserName), \
'PIPELINE_TASK': str(PIPELINE_TASK), \
'PIPELINE_EXECUTIONNUMBER': str(PIPELINE_EXECUTIONNUMBER), \
'PIPELINE_STREAM': str(PIPELINE_STREAM), \
'PIPELINE_PROCESSINSTANCE': str(PIPELINE_PROCESSINSTANCE) \
}
else:
jobName = "%s" % str(uuid.uuid4())
if PIPELINE_STREAM is not None:
jobDefinitionID = PIPELINE_STREAM
else:
jobDefinitionID = int(time.time()) % 10000
job = JobSpec()
job.jobDefinitionID = jobDefinitionID
job.jobName = jobName
job.transformation = 'http://pandawms.org/pandawms-jobcache/lsst-trf.sh'
job.destinationDBlock = datasetName
job.destinationSE = 'local'
job.currentPriority = 1000
job.prodSourceLabel = 'panda'
job.jobParameters = ' --lsstJobParams="%s" ' % lsstJobParams
if prodUserName is not None:
job.prodUserName = prodUserName
else:
job.prodUserName = prodUserNameDefault
if PIPELINE_PROCESSINSTANCE is not None:
job.taskID = PIPELINE_PROCESSINSTANCE
if PIPELINE_EXECUTIONNUMBER is not None:
job.attemptNr = PIPELINE_EXECUTIONNUMBER
if PIPELINE_TASK is not None:
job.processingType = PIPELINE_TASK
job.computingSite = site
job.VO = "lsst"
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % job.jobName
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
fileOL.type = 'log'
job.addFile(fileOL)
s,o = Client.submitJobs([job],srvID=aSrvID)
print(s)
for x in o:
print("PandaID=%s" % x[0])
| PanDAWMS/panda-server | pandaserver/test/lsst/lsstSubmit.py | Python | apache-2.0 | 5,693 |
"""
To set up the OpenShift driver you need
* a workin OpenShift instance
* a user in the OpenShift instance (a separate machine to machine account is
recommended)
1. Start by adding the url, username, password and subdomain in the creds
file. names are "OSO_XXX_URL", where XXX is the name of your installation
(there can be multiple installations)
2. Restart Pebbles
3. Check out https://github.com/cscfi/notebook-images/
4. Log in as the M2M user using the *oc* command line utility
5. run build_openshift.sh to build and publish images to the OpenShift Docker registry
6. Enable OpenShiftDriver in the Admin UI
"""
import base64
import json
import time
import uuid
from pprint import pprint
import requests
# noinspection PyUnresolvedReferences
from six.moves.urllib.parse import urlparse, parse_qs
from pebbles.client import PBClient
from pebbles.drivers.provisioning import base_driver
from pebbles.utils import parse_maximum_lifetime
# maximum time to wait for pod creation before failing
MAX_POD_SPAWN_WAIT_TIME_SEC = 900
# maximum time to wait for pod (down) scaling
MAX_POD_SCALE_WAIT_TIME_SEC = 120
# refresh the token if it is this close to expiration
TOKEN_REFRESH_DELTA = 600
class OpenShiftClient(object):
"""
An abstraction of accessing an OpenShift cluster
"""
def __init__(self, base_url, subdomain, user, password):
"""
Constructor
:param base_url: url to access the api, like https://oso.example.org:8443/
:param subdomain: the subdomain for creating the routes, like osoapps.example.org
:param user:
:param password:
"""
if base_url[-1] == '/':
base_url = base_url[:-1]
self.base_url = base_url
self.subdomain = subdomain
self.oapi_base_url = base_url + '/oapi/v1'
self.kube_base_url = base_url + '/api/v1'
self.template_base_url = base_url + '/apis/template.openshift.io/v1'
self.user = user
self.password = password
# token_data caches the token to access the API. See _request_token() for details.
self.token_data = None
self._session = requests.session()
@staticmethod
def make_base_kube_object(kind, name=None):
return dict(
kind=kind,
apiVersion="v1",
metadata=dict(
name=name
)
)
@staticmethod
def print_response(resp):
if resp.ok:
print('success: %s' % resp.status_code)
pprint(resp.json())
else:
print('error in response: %s %s %s' % (resp.status_code, resp.reason, resp.text))
def _request_token(self, current_ts=None):
"""
Requests an access token for the cluster
:param current_ts: current timestamp
:return: dict containing access_token, lifetime and expiry time
"""
url = self.base_url + '/oauth/authorize'
auth_encoded = base64.b64encode(bytes('%s:%s' % (self.user, self.password)))
headers = {
'Authorization': 'Basic %s' % str(auth_encoded),
'X-Csrf-Token': '1'
}
params = {
'response_type': 'token',
'client_id': 'openshift-challenging-client'
}
resp = requests.get(url, headers=headers, verify=False, params=params, allow_redirects=False)
location = resp.headers.get('location')
if not current_ts:
current_ts = int(time.time())
parsed_data = urlparse(location)
parsed_query = parse_qs(parsed_data.fragment)
return {
'access_token': parsed_query['access_token'][0],
'lifetime': int(parsed_query['expires_in'][0]),
'expires_at': int(parsed_query['expires_in'][0]) + current_ts,
}
def _get_token(self, current_ts=None):
"""
Caching version of _request_token
"""
if not self.token_data:
self.token_data = self._request_token(current_ts)
else:
if not current_ts:
current_ts = int(time.time())
if self.token_data['expires_at'] - TOKEN_REFRESH_DELTA < current_ts:
self.token_data = self._request_token(current_ts)
return self.token_data['access_token']
def _construct_object_url(self, api_type, namespace=None, object_kind=None, object_id=None, subop=None):
"""
Create a url string for given object
:param kubeapi: whether plain k8s or oso api is used
:param namespace: namespace for the object
:param object_kind: type of the object
:param object_id: id of the object
:return: url string, like 'https://oso.example.org:8443/api/v1/my-project/pods/18hfgy1'
"""
if api_type == 'kubeapi':
url_components = [self.kube_base_url]
elif api_type == 'template_oapi':
url_components = [self.template_base_url]
else:
url_components = [self.oapi_base_url]
if namespace:
url_components.append('namespaces')
url_components.append(namespace)
if object_kind:
url_components.append(object_kind)
if object_id:
url_components.append(object_id)
if subop:
url_components.append(subop)
url = '/'.join(url_components)
return url
def make_request(self, method=None, api_type='oapi', verbose=False, namespace=None, object_kind=None, object_id=None,
subop=None, params=None, data=None, raise_on_failure=True):
"""
Makes a request to OpenShift API
:param method: GET, PUT, POST
:param kubeapi: whether plain k8s or oso api is used
:param verbose: debugging on
:param namespace: namespace for the object
:param object_kind: type of the object
:param object_id: id of the object
:param subop: if it's a suboperation eg. getting logs of an object
:param params: request parameters
:param data: request data
:param raise_on_failure: should we raise a RuntimeError on failure
:return: response object from requests session
"""
url = self._construct_object_url(api_type, namespace, object_kind, object_id, subop)
headers = {'Authorization': 'Bearer %s' % self._get_token()}
if isinstance(data, dict):
data = json.dumps(data)
if data:
if not method or method == 'POST':
resp = self._session.post(url, headers=headers, verify=False, params=params, data=data)
elif method == 'PUT':
resp = self._session.put(url, headers=headers, verify=False, params=params, data=data)
else:
raise RuntimeError('Do not know what to do with data and method %s' % method)
else:
if method and method != 'GET':
raise RuntimeError('Do not know what to do with no data and method %s' % method)
resp = self._session.get(url, headers=headers, verify=False, params=params)
if verbose:
self.print_response(resp)
if raise_on_failure and not resp.ok:
raise RuntimeError(resp.text)
return resp
def make_delete_request(self, api_type='oapi', verbose=False, namespace=None, object_kind=None, object_id=None,
params=None, raise_on_failure=True):
"""
Makes a delete request to OpenShift API
:param kubeapi: whether plain k8s or oso api is used
:param verbose: debugging on
:param namespace: namespace for the object
:param object_kind: type of the object
:param object_id: id of the object
:param raise_on_failure: should we raise a RuntimeError on failure
:return: response object from requests session
"""
url = self._construct_object_url(api_type, namespace, object_kind, object_id)
headers = {'Authorization': 'Bearer %s' % self._get_token()}
resp = self._session.delete(url, headers=headers, verify=False, params=params)
if verbose:
self.print_response(resp)
if raise_on_failure and not resp.ok:
raise RuntimeError(resp.text)
return resp
def search_by_label(self, api_type, namespace=None, object_kind=None, params=None):
"""
Performs a search by label(s)
:param kubeapi: k8s api instead of openshift
:param namespace:
:param object_kind:
:param params: a dict containing search criteria, like {'labelSelector': 'app=my-app'}
:return: search results as json
"""
res = self.make_request(
api_type=api_type,
namespace=namespace,
object_kind=object_kind,
params=params
)
res_json = res.json()
return res_json.get('items', [])
class OpenShiftDriverAccessProxy(object):
"""
Abstraction layer for isolating driver from real world to enable mocking in unit tests
"""
def __init__(self, m2m_creds):
self._m2m_creds = m2m_creds
def get_openshift_client(self, cluster_id):
key_base = 'OSD_%s_' % cluster_id
return OpenShiftClient(
base_url=self._m2m_creds.get(key_base + 'BASE_URL'),
subdomain=self._m2m_creds.get(key_base + 'SUBDOMAIN'),
user=self._m2m_creds.get(key_base + 'USER'),
password=self._m2m_creds.get(key_base + 'PASSWORD'),
)
@staticmethod
def get_pb_client(token, api_base_url, ssl_verify):
return PBClient(token, api_base_url, ssl_verify)
class OpenShiftDriver(base_driver.ProvisioningDriverBase):
""" OpenShift Driver allows provisioning instances in an existing OpenShift cluster.
It creates a project per user, identified by user eppn, and optionally a persistent
volume claim (PVC) for user data.
The driver needs credentials for the cluster. The credentials are placed in the same
m2m creds file that OpenStack and Docker driver use. The keys are as follows:
"OSD_[cluster_id]_BASE_URL": "https://oso-cluster-api.example.org:8443",
"OSD_[cluster_id]_SUBDOMAIN": "oso-cluster.example.org",
"OSD_[cluster_id]_USER": "pebbles-m2m-user",
"OSD_[cluster_id]_PASSWORD": "sickritt"
Replace [cluster_id] with a unique string to a cluster. When creating a blueprint template,
refer to the cluster id in the configuration, key 'openshift_cluster_id' .You can have multiple
credentials configured in the creds file.
"""
def get_configuration(self):
from pebbles.drivers.provisioning.openshift_driver_config import CONFIG
config = CONFIG.copy()
return config
def get_running_instance_logs(self, token, instance_id):
""" Get the logs of the openshift based instance which is in running state """
self.logger.debug("getting container logs for instance id %s" % instance_id)
ap = self._get_access_proxy()
pbclient = ap.get_pb_client(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)
running_log_uploader = self.create_prov_log_uploader(token, instance_id, log_type='running')
instance = pbclient.get_instance_description(instance_id)
# create openshift client by getting the cluster id from the blueprint config
blueprint = pbclient.get_blueprint_description(instance['blueprint_id'])
blueprint_config = blueprint['full_config']
oc = ap.get_openshift_client(
cluster_id=blueprint_config['openshift_cluster_id'],
)
instance_name = instance['name']
project = self._get_project_name(instance)
log_res = oc.make_request(
method='GET',
namespace=project,
object_kind='deploymentconfigs',
object_id=instance_name,
subop='log',
)
running_log_uploader.info(log_res.text)
def _get_access_proxy(self):
if not getattr(self, '_ap', None):
m2m_creds = self.get_m2m_credentials()
self._ap = OpenShiftDriverAccessProxy(m2m_creds)
return self._ap
def do_update_connectivity(self, token, instance_id):
self.logger.warning('do_update_connectivity not implemented')
def do_provision(self, token, instance_id):
self.logger.debug('do_provision %s' % instance_id)
log_uploader = self.create_prov_log_uploader(token, instance_id, log_type='provisioning')
log_uploader.info('Provisioning OpenShift based instance (%s)\n' % instance_id)
return self._do_provision(token, instance_id, int(time.time()))
def _do_provision(self, token, instance_id, cur_ts):
"""
Provisions a new instance on OpenShift.
:param token: token to access the API with
:param instance_id: instance that should be provisioned
:param cur_ts: current time
"""
ap = self._get_access_proxy()
pbclient = ap.get_pb_client(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)
log_uploader = self.create_prov_log_uploader(token, instance_id, log_type='provisioning')
instance = pbclient.get_instance_description(instance_id)
# fetch config
blueprint = pbclient.get_blueprint_description(instance['blueprint_id'])
blueprint_config = blueprint['full_config']
instance_token = None
if 'auto_authentication' in blueprint_config and blueprint_config['auto_authentication']:
instance_seconds = parse_maximum_lifetime(blueprint_config['maximum_lifetime'])
instance_token = pbclient.create_instance_token(instance_id, instance_seconds)
# get/generate a project name
project_name = self._get_project_name(instance)
# create an openshift client based for selected cluster
oc = ap.get_openshift_client(
cluster_id=blueprint_config['openshift_cluster_id'],
)
# create a dict out of space separated list of VAR=VAL entries
env_var_array = blueprint_config.get('environment_vars', '').split()
env_vars = {k: v for k, v in [x.split('=') for x in env_var_array]}
env_vars['INSTANCE_ID'] = instance_id
# merge the autodownload vars into environment
for var_suffix in ('url', 'filename'):
var = 'autodownload_{}'.format(var_suffix)
if blueprint_config.get(var, None):
env_vars[var.upper()] = blueprint_config[var]
# create a project and PVC if necessary and spawn a pod (through DC/RC), service and route
res = self._spawn_project_and_objects(
oc=oc,
project_name=project_name,
blueprint_config=blueprint_config,
instance=instance,
environment_vars=env_vars
)
instance_data = {
'endpoints': [
{
'name': 'https',
'access': res['route']
},
],
'project_name': project_name,
'spawn_ts': cur_ts
}
# if instance token is created, then append it along with instance id in the query string
if instance_token:
endpoints = instance_data['endpoints']
for endpoint_i in endpoints:
endpoint_i['access'] += '?token=' + instance_token + '&instance_id=' + instance_id
if 'show_password' in blueprint_config and blueprint_config['show_password']:
instance_data['password'] = instance_id
pbclient.do_instance_patch(
instance_id,
{
'instance_data': json.dumps(instance_data),
}
)
log_uploader.info("provisioning done for %s\n" % instance_id)
def _create_project(self, oc, project_name):
# https://server:8443/oapi/v1/projectrequests
project_data = oc.make_base_kube_object('ProjectRequest', project_name)
# create the project if it does not exist yet
from time import sleep
for delay_count in range(0, 30): # sometimes the project isn't created and the code starts to create pvc
res = oc.make_request(object_kind='projects', object_id=project_name, raise_on_failure=False)
if not res.ok and res.status_code == 403:
oc.make_request(object_kind='projectrequests', data=project_data)
else:
break # project has been created, exit the loop
sleep(2) # sleep for 2 seconds, the loop goes on for 1 minute
# noinspection PyTypeChecker
def _spawn_project_and_objects(self, oc, project_name, blueprint_config, instance, environment_vars=None):
"""
Creates an OpenShift project (if needed) and launches a pod in it. If a volume mount point is requested,
a volume is allocated (if needed) and mounted to the pod. A secure route is also created.
:param oc: openshift client to use
:param project_name: namespace/project name
:param pod_name: pod name
:param pod_image: image to use for pod
:param port: the port to expose
:param pod_memory: amount of memory to reserve
:param volume_mount_point: where the persistent data should be mounted in the pod
:param environment_vars: environment vars to set in the pod
:return: dict with key 'route' set to the provisioned route to the instance
"""
self._create_project(oc, project_name)
pod_name = instance['name']
pod_image = blueprint_config['image']
port = int(blueprint_config['port'])
pod_memory = blueprint_config['memory_limit']
volume_mount_point = blueprint_config.get('volume_mount_point', None)
# create PVC if it does not exist yet
if volume_mount_point:
pvc_data = {
'apiVersion': 'v1',
'kind': 'PersistentVolumeClaim',
'metadata': {
'name': 'pvc001'
},
'spec': {
'accessModes': ['ReadWriteMany'],
'resources': {
'requests': {
'storage': '1Gi'
}
},
},
}
# calling kubernetes API here
# https://server:8443/api/v1/namespaces/project/persistentvolumeclaims
# first check if we already have a PVC
res = oc.make_request(
namespace=project_name,
object_kind='persistentvolumeclaims',
object_id=pvc_data['metadata']['name'],
api_type='kubeapi',
raise_on_failure=False,
)
# nope, let's create it
if not res.ok and res.status_code == 404:
oc.make_request(
namespace=project_name,
object_kind='persistentvolumeclaims',
data=pvc_data,
api_type='kubeapi'
)
# https://server:8443/oapi/v1/namespaces/project/deploymentconfigs
dc_data = {
'kind': 'DeploymentConfig',
'apiVersion': 'v1',
'metadata': {
'name': pod_name,
'creationTimestamp': None,
'labels': {
'run': pod_name
}
},
'spec': {
'strategy': {
'resources': {}
},
'triggers': None,
'replicas': 1,
'test': False,
'selector': {
'run': pod_name
},
'template': {
'metadata': {
'creationTimestamp': None,
'labels': {
'run': pod_name
}
},
'spec': {
'containers': [
{
'name': pod_name,
'image': pod_image,
'resources': {
'requests': {
'memory': pod_memory
},
'limits': {
'memory': pod_memory
},
},
'ports': [
{
'protocol': 'TCP',
'containerPort': port,
}
],
'readinessProbe': {
'httpGet': {
'path': '/',
'port': port,
'scheme': 'HTTP',
}
}
}
],
}
}
}
}
dc_volume_mounts = []
dc_volumes = []
# workaround for service account secret automount.
# see https://github.com/kubernetes/kubernetes/issues/16779
# TODO revise this workaround
dc_volume_mounts.append(
{
'mountPath': '/var/run/secrets/kubernetes.io/serviceaccount',
'name': 'nosecret',
}
)
dc_volumes.append(
{
'name': 'nosecret',
'emptyDir': {}
}
)
# add a mount point to persistent storage, if configured
if volume_mount_point:
dc_volume_mounts.append(
{
'mountPath': volume_mount_point,
'name': 'work',
}
)
dc_volumes.append(
{
'name': 'work',
'persistentVolumeClaim': {
'claimName': 'pvc001'
}
}
)
dc_data['spec']['template']['spec']['containers'][0]['volumeMounts'] = dc_volume_mounts
dc_data['spec']['template']['spec']['volumes'] = dc_volumes
# create environment variables for the blueprint
env_data = []
if environment_vars:
for key, value in environment_vars.items():
env_data.append({'name': key, 'value': value})
dc_data['spec']['template']['spec']['containers'][0]['env'] = env_data
oc.make_request(namespace=project_name, object_kind='deploymentconfigs', data=dc_data)
pod_selector_params = dict(labelSelector='run=%s' % pod_name)
self._wait_for_pod_creation(oc, project_name, pod_selector_params, pod_name)
# calling kubernetes API here
# https://server:8443/api/v1/namespaces/project/services
svc_data = {
'kind': 'Service',
'apiVersion': 'v1',
'metadata': {
'name': pod_name,
'labels': {
'run': pod_name
}
},
'spec': {
'ports': [
{
'protocol': 'TCP',
'port': port,
'targetPort': port,
}
],
'selector': {
'run': pod_name
}
}, 'status': {'loadBalancer': {}}
}
oc.make_request(api_type='kubeapi', namespace=project_name, object_kind='services', data=svc_data)
route_host = '%s-%s.%s' % (pod_name, uuid.uuid4().hex[:10], oc.subdomain)
# https://server:8443/oapi/v1/namespaces/project/routes
route_data = {
'kind': 'Route',
'apiVersion': 'v1',
'metadata': {
'name': pod_name,
'labels': {
'run': pod_name
}
},
'spec': {
'host': route_host,
'to': {
'name': pod_name,
},
'port': {
'targetPort': port,
},
'tls': {
'termination': 'edge'
}
}
}
route_data = oc.make_request(namespace=project_name, object_kind='routes', data=route_data)
route_json = route_data.json()
return dict(route='https://%s/' % route_json['spec']['host'])
def _wait_for_pod_creation(self, oc, project_name, params, pod_name):
# wait for pod to become ready
end_ts = time.time() + MAX_POD_SPAWN_WAIT_TIME_SEC
while time.time() < end_ts:
# pods live in kubernetes API
res = oc.make_request(
api_type='kubeapi',
namespace=project_name,
object_kind='pods',
params=params,
)
if res.ok:
pod_ready = None
for pod in res.json()['items']:
try:
pod_status = pod['status']['phase']
if pod_status != 'Running':
break
pod_ready = True
except:
pass
if pod_ready:
break
self.logger.debug('waiting for pod to be ready %s' % pod_name)
time.sleep(5)
else:
raise RuntimeError('Timeout waiting for pod readiness for %s' % params['labelSelector'])
def do_deprovision(self, token, instance_id):
return self._do_deprovision(token, instance_id)
def _do_deprovision(self, token, instance_id):
"""
Deprovisions an instance. It removes
- DeploymentConfig (DC)
- ReplicationController (RC)
- Service
- Route
DC is removed first, then RC is scaled down to get rid of Pods. When the Pods are gone, RC is removed.
Then Service and Route are removed.
Any volumes attached to the instance are left intact, as well as the project the instance is running in.
:param token: token to access API
:param instance_id: the instance to delete
"""
self.logger.debug('do_deprovision %s' % instance_id)
ap = self._get_access_proxy()
pbclient = ap.get_pb_client(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)
instance = pbclient.get_instance_description(instance_id)
log_uploader = self.create_prov_log_uploader(token, instance_id, log_type='deprovisioning')
log_uploader.info('Deprovisioning OpenShift based instance (%s)\n' % instance_id)
# fetch config
blueprint = pbclient.get_blueprint_description(instance['blueprint_id'])
blueprint_config = blueprint['full_config']
if 'auto_authentication' in blueprint_config and blueprint_config['auto_authentication']:
try:
pbclient.instance_token_delete(instance_id)
except:
self.logger.warning("the token seems to be not present or deleted already")
oc = ap.get_openshift_client(
cluster_id=blueprint_config['openshift_cluster_id'],
)
project = self._get_project_name(instance)
self._delete_objects(oc=oc, project=project, blueprint_config=blueprint_config, instance=instance)
def _delete_objects(self, oc, project, instance, blueprint_config=None):
""" Delete openshift objects
"""
name = instance['name']
instance_id = instance['id']
# remove dc
res = oc.make_delete_request(
namespace=project,
object_kind='deploymentconfigs',
object_id=name,
raise_on_failure=False,
)
if not res.ok:
if res.status_code == 404:
self.logger.warning('do_deprovision: DC not found, assuming deleted: %s' % name)
else:
raise RuntimeError(res.reason)
# find rc
params = dict(labelSelector='run=%s' % name)
rc_list = oc.search_by_label(
api_type='kubeapi',
namespace=project,
object_kind='replicationcontrollers',
params=params
)
# then set replicas to 0 and let pods die
for rc in rc_list:
self._scale_rc(oc, project, rc, 0)
# remove rc
for rc in rc_list:
res = oc.make_delete_request(
api_type='kubeapi',
namespace=project,
object_kind='replicationcontrollers',
object_id=rc['metadata']['name'],
raise_on_failure=False,
)
if not res.ok:
if res.status_code == 404:
self.logger.warning('do_deprovision: RC not found, assuming deleted: %s' % name)
else:
raise RuntimeError(res.reason)
# remove route
res = oc.make_delete_request(
namespace=project,
object_kind='routes',
object_id=name,
raise_on_failure=False,
)
if not res.ok:
if res.status_code == 404:
self.logger.warning('do_deprovision: route not found, assuming deleted: %s' % name)
else:
raise RuntimeError(res.reason)
# remove service
res = oc.make_delete_request(
api_type='kubeapi',
namespace=project,
object_kind='services',
object_id=name,
raise_on_failure=False,
)
if not res.ok:
if res.status_code == 404:
self.logger.warning('do_deprovision: service not found, assuming deleted: %s' % name)
else:
raise RuntimeError(res.reason)
self.logger.debug('do_deprovision done for %s' % instance_id)
@staticmethod
def _scale_rc(oc, project, rc, num_replicas):
"""
Scale a ReplicationController and wait for the amount of replicas to catch up. The maximum waiting time
is taken from MAX_POD_SCALE_WAIT_TIME_SEC
:param oc: the openshift client to use
:param project: project name
:param rc: ReplicationController name
:param num_replicas: new number of replicas
"""
# scale the pods down
rc['spec']['replicas'] = num_replicas
res = oc.make_request(
api_type='kubeapi',
method='PUT',
namespace=project,
object_kind='replicationcontrollers',
object_id=rc['metadata']['name'],
data=rc,
)
if not res.ok:
raise RuntimeError(res.reason)
# wait for scaling to be complete
end_ts = time.time() + MAX_POD_SCALE_WAIT_TIME_SEC
while time.time() < end_ts:
res = oc.make_request(
api_type='kubeapi',
namespace=project,
object_kind='replicationcontrollers',
object_id=rc['metadata']['name'],
)
rc = res.json()
if int(rc['status']['replicas']) == num_replicas:
break
time.sleep(2)
else:
raise RuntimeError('Could not scale pods to %d for %s' % (num_replicas, rc['metadata']['name']))
@staticmethod
def _get_project_name(instance):
"""
Generate a project name from instance data. If the instance data already has 'project_name' attribute,
use that.
:param instance: dict containing instance data
:return: project name based on username and first 4 characters of user id
"""
if 'instance_data' in instance and 'project_name' in instance['instance_data']:
return instance['instance_data']['project_name']
else:
# create a project name based on username and userid
name = ('%s-%s' % (instance['username'], instance['user_id'][:4]))
name = name.replace('@', '-at-').replace('.', '-').lower()
return name
def do_housekeep(self, token):
# TODO: Implement optional cleaning of the old projects.
self.logger.info('do_housekeep not implemented')
| CSC-IT-Center-for-Science/pouta-blueprints | pebbles/drivers/provisioning/openshift_driver.py | Python | mit | 32,797 |
#!/usr/bin/env python
# coding=utf-8
from biplist import *
import os
import subprocess
from util.colorlog import *
#货主PLIST_PATH="/Consignor4ios/Supporting Files/Consignor4ios-Info.plist"
#司机PLIST_PATH="/NewDriver4iOS/NewDriver4iOS/Info.plist"
VERSION_KEY = "CFBundleShortVersionString"
BUILD_VERSON_KEY = "CFBundleVersion"
HOOKS_PATH = ".git/hooks"
PROJECT_SUF = ".xcodeproj"
PLIST_SUF = ".plist"
#公开
def add_version_if_need():
if open_fun() == "YES":
print "已开启自动递增build版本号的功能"
change_plist_verson(True)
pass
else:
print_log("未开启自动递增build版本号的功能,如需开启请执行:git config githooks.autoversion \"YES\"")
pass
def reduce_version_if_need():
if open_fun() == "YES":
change_plist_verson(False)
pass
pass
def read_current_project_version():
#需要在仓库里配置.plist_path文件,并填好对应的plist文件便可以拼接版本号
config_path = get_project_path()+os.path.sep+".plist_path"
if not(os.path.exists(config_path) and os.path.isfile(config_path)):
return ""
pass
with open(config_path, 'r+') as f:
relative_path = f.read()
pass
plist_path = get_project_path()+os.path.sep+relative_path
if not check_file_is_plsit(plist_path):
print plist_path+"不是plist文件"
return ""
pass
current_version = read_plist_for_key(plist_path, VERSION_KEY, False, "")
if len(current_version)>0:
return "["+current_version+"]"
pass
return ""
pass
def write_version(current_version,plist_path,add):
if current_version == None:
return
las_dot_index = current_version.rfind('.')
pre_version_str = current_version[0:las_dot_index+1]
last_version_str = current_version[las_dot_index+1:len(current_version)]
version = str(int(last_version_str)+1) if add else str(int(last_version_str)-1)
write_version_str = pre_version_str+version
plist_dic = readPlist(plist_path)
plist_dic[BUILD_VERSON_KEY] = write_version_str
writePlist(plist_dic,plist_path)
if add:
loggreen("已成功将版本号"+current_version+"改成为"+write_version_str)
pass
else:
logred("已成功将版本号"+current_version+"回滚至"+write_version_str)
pass
def print_log(log):
print log
pass
def read_version(plist_path, can_raise):
if not os.path.exists(plist_path):
log = "路径%s找不到plist文件" % plist_path
raise(IOError(log)) if can_raise else print_log(log)
return None
pass
try:
plist = readPlist(plist_path)
try:
version = plist[BUILD_VERSON_KEY]
return version
except(KeyError, Exception),e:
log = plist_path+"文件里没有这个key:"+BUILD_VERSON_KEY
raise(IOError(log)) if can_raise else print_log(log)
return None
except (InvalidPlistException, NotBinaryPlistException), e:
log = "路径%s不是plist文件" % plist_path
raise(IOError(log)) if can_raise else print_log(log)
return None
pass
def get_project_path():
current_path = os.getcwd()
if current_path.find(HOOKS_PATH):
return current_path.replace(HOOKS_PATH,'')
else:
raise Exception("路径不在.git/hooks,请检查")
pass
def plist_father_path():
preject_path = get_project_path()
for file in os.listdir(preject_path):
if PROJECT_SUF in file:
preject_path = preject_path+'/'+file.replace(PROJECT_SUF,'')
break
pass
return preject_path+"/Supporting Files/"
pass
def plist_paths(plist_father_path):
plists = []
if not os.path.isdir(plist_father_path):
return plists
pass
for file in os.listdir(plist_father_path):
if PLIST_SUF in file:
plists.append(plist_father_path+file)
pass
pass
return plists
pass
def change_plist_verson(add):
plist_exist = True
current_version = ""
plist_path_arr = plist_paths(plist_father_path())
if len(plist_path_arr) == 0:
plist_exist = False
pass
#先尝试去默认路径读取
for plist_path in plist_path_arr:
current_version = read_version(plist_path,False)
if current_version == None:
plist_exist = False
break
pass
else:
write_version(current_version, plist_path, add)
pass
pass
#如果读取不到再到配置的路径读取
if not plist_exist:
config_plist_paths = ""
try:
config_plist_paths = subprocess.check_output('git config githooks.plistpaths', shell=True).strip()
pass
except subprocess.CalledProcessError as e:
log = "默认路径没有plist文件,请在%s路径下配置plist的相对路径,如果有多个," % get_project_path()
example = "请以逗号隔开,示例:git config githooks.plistpaths \"xxx/info.plist,ooo/info2.plist\""
ex = log+example
raise IOError(ex)
config_plist_path_arr = config_plist_paths.split(',')
#遍历的是相对路径
for config_relative_plist_path in config_plist_path_arr:
#绝对路径
config_plist_path = get_project_path()+config_relative_plist_path
current_version = read_version(config_plist_path,True)
write_version(current_version, config_plist_path, add)
pass
pass
#公开
def open_fun():
return check_out_put("git config githooks.autoversion", False, "NO")
pass
def reset_autoversion_state():
if check_out_put('git config githooks.autoversion', False ,"NO") == "YES":
check_out_put('git config githooks.autoversion \"NO\"', False, "")
pass
pass
pass
def check_out_put(cammand, can_raise, return_value):
try:
return subprocess.check_output(cammand, shell=True).strip()
pass
except subprocess.CalledProcessError as e:
if can_raise:
raise(e)
else:
return return_value
pass
pass
# 基础方法
def check_file_is_plsit(plist_path):
try:
plist = readPlist(plist_path)
return True
except (InvalidPlistException, NotBinaryPlistException), e:
return False
pass
# 此方法必须先验证是plist文件
def read_plist_for_key(plist_path, key, can_raise, return_value):
plist = readPlist(plist_path)
try:
return plist[key]
except(KeyError, Exception),e:
return return_value
pass
| spWang/gitHooks | AutoVersion.py | Python | mit | 6,774 |
# mousepointer.py
# TODO:
# - remove code duplication in LoadImage/LoadStream
import wx
import colordb
import os
import sys
import utils
POINTERS = {
"arrow" : wx.CURSOR_ARROW,
"arrowright" : wx.CURSOR_RIGHT_ARROW,
"bullseye" : wx.CURSOR_BULLSEYE,
"char" : wx.CURSOR_CHAR,
"cross" : wx.CURSOR_CROSS,
"hand" : wx.CURSOR_HAND,
"ibeam" : wx.CURSOR_IBEAM,
"buttonleft" : wx.CURSOR_LEFT_BUTTON,
"magnifier" : wx.CURSOR_MAGNIFIER,
"buttonmiddle" : wx.CURSOR_MIDDLE_BUTTON,
"noentry" : wx.CURSOR_NO_ENTRY,
"paintbrush" : wx.CURSOR_PAINT_BRUSH,
"pencil" : wx.CURSOR_PENCIL,
"pointleft" : wx.CURSOR_POINT_LEFT,
"pointright" : wx.CURSOR_POINT_RIGHT,
"arrowquestion" : wx.CURSOR_QUESTION_ARROW,
"buttonright" : wx.CURSOR_RIGHT_BUTTON,
"sizenesw" : wx.CURSOR_SIZENESW,
"sizens" : wx.CURSOR_SIZENS,
"sizenwse" : wx.CURSOR_SIZENWSE,
"sizewe" : wx.CURSOR_SIZEWE,
"sizing" : wx.CURSOR_SIZING,
"spraycan" : wx.CURSOR_SPRAYCAN,
"wait" : wx.CURSOR_WAIT,
"watch" : wx.CURSOR_WATCH,
"blank" : wx.CURSOR_BLANK,
"default" : wx.CURSOR_DEFAULT,
"arrowcopy" : wx.CURSOR_COPY_ARROW,
"arrowwait" : wx.CURSOR_ARROWWAIT,
}
class MousePointerRegistry:
def __init__(self):
self.custom = {}
def Get(self, name):
if POINTERS.has_key(name):
return wx.StockCursor(POINTERS[name]) # return a cursor
elif self.custom.has_key(name):
return self.custom[name] # return an image-as-cursor
else:
raise KeyError, "Unknown pointer name: %s" % (name,)
def Set(self, name, value):
self.custom[name] = value
Register = Set
def GetBuiltinNames(self):
return POINTERS.keys()
def GetCustomNames(self):
return self.custom.keys()
def GetNames(self):
return self.GetBuiltinNames() + self.GetCustomNames()
def _RegisterImage(self, name, image, maskcolor='white', hotx=None, hoty=None):
""" Register a wx.Image as a cursor. """
if sys.platform =='win32':
#cursors fixed size 32,32
xratio = image.GetWidth() / 32
yratio = image.GetHeight() / 32
else:
xratio = yratio = 1
if not image.HasMask():
c = colordb.convert_color(maskcolor)
image.SetMaskColour(c[0], c[1], c[2])
if not hotx:
hotx = image.GetWidth() / 2
hotx = hotx / xratio
image.SetOptionInt(wx.IMAGE_OPTION_CUR_HOTSPOT_X, hotx)
if not hoty:
hoty = image.GetHeight() / 2
hoty = hoty / yratio
image.SetOptionInt(wx.IMAGE_OPTION_CUR_HOTSPOT_Y, hoty)
self.custom[name] = wx.CursorFromImage(image)
def RegisterImage(self, name, filename, maskcolor='white', hotx=None, hoty=None):
i = wx.Image(utils.opj(filename), wx.BITMAP_TYPE_ANY)
self._RegisterImage(name, i, maskcolor, hotx, hoty)
def RegisterStream(self, name, stream, maskcolor='white', hotx=None, hoty=None):
i = wx.ImageFromStream(stream)
self._RegisterImage(name, i, maskcolor, hotx, hoty)
# global registry
MousePointers = MousePointerRegistry()
| bblais/plasticity | plasticity/dialogs/waxy/mousepointer.py | Python | mit | 3,190 |
# pylint: disable-msg=W0401,W0511,W0611,W0612,W0614,R0201,E1102
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
from __future__ import division, absolute_import, print_function
__author__ = "Pierre GF Gerard-Marchant"
import warnings
import pickle
import operator
from functools import reduce
import numpy as np
import numpy.ma.core
import numpy.core.fromnumeric as fromnumeric
import numpy.core.umath as umath
from numpy.testing import TestCase, run_module_suite, assert_raises
from numpy import ndarray
from numpy.compat import asbytes, asbytes_nested
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal,
assert_equal_records, fail_if_equal, assert_not_equal,
assert_mask_equal,
)
from numpy.ma.core import (
MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all,
allclose, allequal, alltrue, angle, anom, arange, arccos, arctan2,
arcsin, arctan, argsort, array, asarray, choose, concatenate,
conjugate, cos, cosh, count, default_fill_value, diag, divide, empty,
empty_like, equal, exp, flatten_mask, filled, fix_invalid,
flatten_structured_array, fromflex, getmask, getmaskarray, greater,
greater_equal, identity, inner, isMaskedArray, less, less_equal, log,
log10, make_mask, make_mask_descr, mask_or, masked, masked_array,
masked_equal, masked_greater, masked_greater_equal, masked_inside,
masked_less, masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_where, max, maximum,
maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply,
mvoid, nomask, not_equal, ones, outer, power, product, put, putmask,
ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt,
subtract, sum, take, tan, tanh, transpose, where, zeros,
)
pi = np.pi
class TestMaskedArray(TestCase):
# Base test class for MaskedArrays.
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
def test_basicattributes(self):
# Tests some basic array attributes.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a.ndim, 1)
assert_equal(b.ndim, 1)
assert_equal(a.size, 3)
assert_equal(b.size, 3)
assert_equal(a.shape, (3,))
assert_equal(b.shape, (3,))
def test_basic0d(self):
# Checks masking a scalar
x = masked_array(0)
assert_equal(str(x), '0')
x = masked_array(0, mask=True)
assert_equal(str(x), str(masked_print_option))
x = masked_array(0, mask=False)
assert_equal(str(x), '0')
x = array(0, mask=1)
self.assertTrue(x.filled().dtype is x._data.dtype)
def test_basic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertTrue((xm - ym).filled(0).any())
fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))
s = x.shape
assert_equal(np.shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.dtype, x.dtype)
assert_equal(zm.dtype, z.dtype)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_array_equal(xm, xf)
assert_array_equal(filled(xm, 1.e20), xf)
assert_array_equal(x, xm)
def test_basic2d(self):
# Test of basic array creation and properties in 2 dimensions.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
for s in [(4, 3), (6, 2)]:
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_equal(xm, xf)
assert_equal(filled(xm, 1.e20), xf)
assert_equal(x, xm)
def test_concatenate_basic(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# basic concatenation
assert_equal(np.concatenate((x, y)), concatenate((xm, ym)))
assert_equal(np.concatenate((x, y)), concatenate((x, y)))
assert_equal(np.concatenate((x, y)), concatenate((xm, y)))
assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x)))
def test_concatenate_alongaxis(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# Concatenation along an axis
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
assert_equal(xm.mask, np.reshape(m1, s))
assert_equal(ym.mask, np.reshape(m2, s))
xmym = concatenate((xm, ym), 1)
assert_equal(np.concatenate((x, y), 1), xmym)
assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask)
x = zeros(2)
y = array(ones(2), mask=[False, True])
z = concatenate((x, y))
assert_array_equal(z, [0, 0, 1, 1])
assert_array_equal(z.mask, [False, False, False, True])
z = concatenate((y, x))
assert_array_equal(z, [1, 1, 0, 0])
assert_array_equal(z.mask, [False, True, False, False])
def test_concatenate_flexible(self):
# Tests the concatenation on flexible arrays.
data = masked_array(list(zip(np.random.rand(10),
np.arange(10))),
dtype=[('a', float), ('b', int)])
test = concatenate([data[:5], data[5:]])
assert_equal_records(test, data)
def test_creation_ndmin(self):
# Check the use of ndmin
x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2)
assert_equal(x.shape, (1, 3))
assert_equal(x._data, [[1, 2, 3]])
assert_equal(x._mask, [[1, 0, 0]])
def test_creation_ndmin_from_maskedarray(self):
# Make sure we're not losing the original mask w/ ndmin
x = array([1, 2, 3])
x[-1] = masked
xx = array(x, ndmin=2, dtype=float)
assert_equal(x.shape, x._mask.shape)
assert_equal(xx.shape, xx._mask.shape)
def test_creation_maskcreation(self):
# Tests how masks are initialized at the creation of Maskedarrays.
data = arange(24, dtype=float)
data[[3, 6, 15]] = masked
dma_1 = MaskedArray(data)
assert_equal(dma_1.mask, data.mask)
dma_2 = MaskedArray(dma_1)
assert_equal(dma_2.mask, dma_1.mask)
dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6)
fail_if_equal(dma_3.mask, dma_1.mask)
def test_creation_with_list_of_maskedarrays(self):
# Tests creaating a masked array from alist of masked arrays.
x = array(np.arange(5), mask=[1, 0, 0, 0, 0])
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])
x.mask = nomask
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
self.assertTrue(data.mask is nomask)
def test_asarray(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
xm.fill_value = -9999
xm._hardmask = True
xmm = asarray(xm)
assert_equal(xmm._data, xm._data)
assert_equal(xmm._mask, xm._mask)
assert_equal(xmm.fill_value, xm.fill_value)
assert_equal(xmm._hardmask, xm._hardmask)
def test_fix_invalid(self):
# Checks fix_invalid.
with np.errstate(invalid='ignore'):
data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1])
data_fixed = fix_invalid(data)
assert_equal(data_fixed._data, [data.fill_value, 0., 1.])
assert_equal(data_fixed._mask, [1., 0., 1.])
def test_maskedelement(self):
# Test of masked element
x = arange(6)
x[1] = masked
self.assertTrue(str(masked) == '--')
self.assertTrue(x[1] is masked)
assert_equal(filled(x[1], 0), 0)
# don't know why these should raise an exception...
#self.assertRaises(Exception, lambda x,y: x+y, masked, masked)
#self.assertRaises(Exception, lambda x,y: x+y, masked, 2)
#self.assertRaises(Exception, lambda x,y: x+y, masked, xx)
#self.assertRaises(Exception, lambda x,y: x+y, xx, masked)
def test_set_element_as_object(self):
# Tests setting elements with object
a = empty(1, dtype=object)
x = (1, 2, 3, 4, 5)
a[0] = x
assert_equal(a[0], x)
self.assertTrue(a[0] is x)
import datetime
dt = datetime.datetime.now()
a[0] = dt
self.assertTrue(a[0] is dt)
def test_indexing(self):
# Tests conversions and indexing
x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
assert_equal(np.sort(x1), sort(x2, endwith=False))
# tests of indexing
assert_(type(x2[1]) is type(x1[1]))
assert_(x1[1] == x2[1])
assert_(x2[0] is masked)
assert_equal(x1[2], x2[2])
assert_equal(x1[2:5], x2[2:5])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[2] = 9
x2[2] = 9
assert_equal(x1, x2)
x1[1:3] = 99
x2[1:3] = 99
assert_equal(x1, x2)
x2[1] = masked
assert_equal(x1, x2)
x2[1:3] = masked
assert_equal(x1, x2)
x2[:] = x1
x2[1] = masked
assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
assert_(allequal(x4, array([1, 2, 3, 4])))
x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
x1 = array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
assert_equal(type(s2), str)
assert_equal(type(s1), str)
assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
def test_matrix_indexing(self):
# Tests conversions and indexing
x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
x2 = array(x1, mask=[[1, 0, 0], [0, 1, 0]])
x3 = array(x1, mask=[[0, 1, 0], [1, 0, 0]])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
# tests of indexing
assert_(type(x2[1, 0]) is type(x1[1, 0]))
assert_(x1[1, 0] == x2[1, 0])
assert_(x2[1, 1] is masked)
assert_equal(x1[0, 2], x2[0, 2])
assert_equal(x1[0, 1:], x2[0, 1:])
assert_equal(x1[:, 2], x2[:, 2])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[0, 2] = 9
x2[0, 2] = 9
assert_equal(x1, x2)
x1[0, 1:] = 99
x2[0, 1:] = 99
assert_equal(x1, x2)
x2[0, 1] = masked
assert_equal(x1, x2)
x2[0, 1:] = masked
assert_equal(x1, x2)
x2[0, :] = x1[0, :]
x2[0, 1] = masked
assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x3)[1], array([1, 1, 0])))
assert_(allequal(getmask(x3[1]), array([1, 1, 0])))
x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x4[1]), array([1, 1, 0])))
assert_(allequal(x4[1], array([1, 2, 3])))
x1 = np.matrix(np.arange(5) * 1.0)
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
def test_copy(self):
# Tests of some subtle points of copying and sizing.
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
self.assertTrue(m is m2)
m3 = make_mask(m, copy=1)
self.assertTrue(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
#self.assertTrue( y1._data is x1)
assert_equal(y1._data.__array_interface__, x1.__array_interface__)
self.assertTrue(allequal(x1, y1.data))
#self.assertTrue( y1.mask is m)
assert_equal(y1._mask.__array_interface__, m.__array_interface__)
y1a = array(y1)
self.assertTrue(y1a._data.__array_interface__ ==
y1._data.__array_interface__)
self.assertTrue(y1a.mask is y1.mask)
y2 = array(x1, mask=m)
self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__)
#self.assertTrue( y2.mask is m)
self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__)
self.assertTrue(y2[2] is masked)
y2[2] = 9
self.assertTrue(y2[2] is not masked)
#self.assertTrue( y2.mask is not m)
self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__)
self.assertTrue(allequal(y2.mask, 0))
y3 = array(x1 * 1.0, mask=m)
self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
assert_equal(concatenate([x4, x4]), y4)
assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
y5 = repeat(x4, (2, 2, 2, 2), axis=0)
assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
y6 = repeat(x4, 2, axis=0)
assert_equal(y5, y6)
y7 = x4.repeat((2, 2, 2, 2), axis=0)
assert_equal(y5, y7)
y8 = x4.repeat(2, 0)
assert_equal(y5, y8)
y9 = x4.copy()
assert_equal(y9._data, x4._data)
assert_equal(y9._mask, x4._mask)
x = masked_array([1, 2, 3], mask=[0, 1, 0])
# Copy is False by default
y = masked_array(x)
assert_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_equal(y._mask.ctypes.data, x._mask.ctypes.data)
y = masked_array(x, copy=True)
assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)
def test_deepcopy(self):
from copy import deepcopy
a = array([0, 1, 2], mask=[False, True, False])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
assert_not_equal(id(a._mask), id(copied._mask))
copied[1] = 1
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
copied.mask[1] = False
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
def test_str_repr(self):
a = array([0, 1, 2], mask=[False, True, False])
assert_equal(str(a), '[0 -- 2]')
assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n'
' mask = [False True False],\n'
' fill_value = 999999)\n')
def test_pickling(self):
# Tests pickling
a = arange(10)
a[::3] = masked
a.fill_value = 999
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled._data, a._data)
assert_equal(a_pickled.fill_value, 999)
def test_pickling_subbaseclass(self):
# Test pickling w/ a subclass of ndarray
a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
self.assertTrue(isinstance(a_pickled._data, np.matrix))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
mc = np.ma.masked
mc_pickled = pickle.loads(mc.dumps())
assert_equal(mc_pickled._baseclass, mc._baseclass)
assert_equal(mc_pickled._mask, mc._mask)
assert_equal(mc_pickled._data, mc._data)
def test_pickling_wstructured(self):
# Tests pickling w/ structured array
a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', float)])
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
def test_pickling_keepalignment(self):
# Tests pickling w/ F_CONTIGUOUS arrays
a = arange(10)
a.shape = (-1, 2)
b = a.T
test = pickle.loads(pickle.dumps(b))
assert_equal(test, b)
def test_single_element_subscript(self):
# Tests single element subscripts of Maskedarrays.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a[0].shape, ())
assert_equal(b[0].shape, ())
assert_equal(b[1].shape, ())
def test_topython(self):
# Tests some communication issues with Python.
assert_equal(1, int(array(1)))
assert_equal(1.0, float(array(1)))
assert_equal(1, int(array([[[1]]])))
assert_equal(1.0, float(array([[1]])))
self.assertRaises(TypeError, float, array([1, 1]))
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
assert_(np.isnan(float(array([1], mask=[1]))))
a = array([1, 2, 3], mask=[1, 0, 0])
self.assertRaises(TypeError, lambda:float(a))
assert_equal(float(a[-1]), 3.)
self.assertTrue(np.isnan(float(a[0])))
self.assertRaises(TypeError, int, a)
assert_equal(int(a[-1]), 3)
self.assertRaises(MAError, lambda:int(a[0]))
def test_oddfeatures_1(self):
# Test of other odd features
x = arange(20)
x = x.reshape(4, 5)
x.flat[5] = 12
assert_(x[1, 0] == 12)
z = x + 10j * x
assert_equal(z.real, x)
assert_equal(z.imag, 10 * x)
assert_equal((z * conjugate(z)).real, 101 * x * x)
z.imag[...] = 0.0
x = arange(10)
x[3] = masked
assert_(str(x[3]) == str(masked))
c = x >= 8
assert_(count(where(c, masked, masked)) == 0)
assert_(shape(where(c, masked, masked)) == c.shape)
z = masked_where(c, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
assert_equal(x, z)
def test_oddfeatures_2(self):
# Tests some more features.
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
def test_oddfeatures_3(self):
# Tests some generic features
atest = array([10], mask=True)
btest = array([20])
idx = atest.mask
atest[idx] = btest[idx]
assert_equal(atest, [20])
def test_filled_w_object_dtype(self):
a = np.ma.masked_all(1, dtype='O')
assert_equal(a.filled('x')[0], 'x')
def test_filled_w_flexible_dtype(self):
# Test filled w/ flexible dtype
flexi = array([(1, 1, 1)],
dtype=[('i', int), ('s', '|S8'), ('f', float)])
flexi[0] = masked
assert_equal(flexi.filled(),
np.array([(default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),)], dtype=flexi.dtype))
flexi[0] = masked
assert_equal(flexi.filled(1),
np.array([(1, '1', 1.)], dtype=flexi.dtype))
def test_filled_w_mvoid(self):
# Test filled w/ mvoid
ndtype = [('a', int), ('b', float)]
a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype)
# Filled using default
test = a.filled()
assert_equal(tuple(test), (1, default_fill_value(1.)))
# Explicit fill_value
test = a.filled((-1, -1))
assert_equal(tuple(test), (1, -1))
# Using predefined filling values
a.fill_value = (-999, -999)
assert_equal(tuple(a.filled()), (1, -999))
def test_filled_w_nested_dtype(self):
# Test filled w/ nested dtype
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([(1, (1, 1)), (2, (2, 2))],
mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype)
test = a.filled(0)
control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype)
assert_equal(test, control)
test = a['B'].filled(0)
control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype)
assert_equal(test, control)
def test_filled_w_f_order(self):
# Test filled w/ F-contiguous array
a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),
mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'),
order='F') # this is currently ignored
self.assertTrue(a.flags['F_CONTIGUOUS'])
self.assertTrue(a.filled(0).flags['F_CONTIGUOUS'])
def test_optinfo_propagation(self):
# Checks that _optinfo dictionary isn't back-propagated
x = array([1, 2, 3, ], dtype=float)
x._optinfo['info'] = '???'
y = x.copy()
assert_equal(y._optinfo['info'], '???')
y._optinfo['info'] = '!!!'
assert_equal(x._optinfo['info'], '???')
def test_fancy_printoptions(self):
# Test printing a masked array w/ fancy dtype.
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = array([(1, (2, 3.0)), (4, (5, 6.0))],
mask=[(1, (0, 1)), (0, (1, 0))],
dtype=fancydtype)
control = "[(--, (2, --)) (4, (--, 6.0))]"
assert_equal(str(test), control)
def test_flatten_structured_array(self):
# Test flatten_structured_array on arrays
# On ndarray
ndtype = [('a', int), ('b', float)]
a = np.array([(1, 1), (2, 2)], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[1., 1.], [2., 2.]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
# On masked_array
a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1.], [2., 2.]],
mask=[[0, 1], [1, 0]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# On masked array with nested structure
ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])]
a = array([(1, (1, 1.1)), (2, (2, 2.2))],
mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1., 1.1], [2., 2., 2.2]],
mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# Keeping the initial shape
ndtype = [('a', int), ('b', float)]
a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
def test_void0d(self):
# Test creating a mvoid object
ndtype = [('a', int), ('b', int)]
a = np.array([(1, 2,)], dtype=ndtype)[0]
f = mvoid(a)
assert_(isinstance(f, mvoid))
a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0]
assert_(isinstance(a, mvoid))
a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
f = mvoid(a._data[0], a._mask[0])
assert_(isinstance(f, mvoid))
def test_mvoid_getitem(self):
# Test mvoid.__getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
f = a[0]
self.assertTrue(isinstance(f, mvoid))
assert_equal((f[0], f['a']), (1, 1))
assert_equal(f['b'], 2)
# w/ mask
f = a[1]
self.assertTrue(isinstance(f, mvoid))
self.assertTrue(f[0] is masked)
self.assertTrue(f['a'] is masked)
assert_equal(f[1], 4)
def test_mvoid_iter(self):
# Test iteration on __getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
assert_equal(list(a[0]), [1, 2])
# w/ mask
assert_equal(list(a[1]), [masked, 4])
def test_mvoid_print(self):
# Test printing a mvoid
mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)])
assert_equal(str(mx[0]), "(1, 1)")
mx['b'][0] = masked
ini_display = masked_print_option._display
masked_print_option.set_display("-X-")
try:
assert_equal(str(mx[0]), "(1, -X-)")
assert_equal(repr(mx[0]), "(1, -X-)")
finally:
masked_print_option.set_display(ini_display)
def test_mvoid_multidim_print(self):
# regression test for gh-6019
t_ma = masked_array(data = [([1, 2, 3],)],
mask = [([False, True, False],)],
fill_value = ([999999, 999999, 999999],),
dtype = [('a', '<i8', (3,))])
assert str(t_ma[0]) == "([1, --, 3],)"
assert repr(t_ma[0]) == "([1, --, 3],)"
# additonal tests with structured arrays
t_2d = masked_array(data = [([[1, 2], [3,4]],)],
mask = [([[False, True], [True, False]],)],
dtype = [('a', '<i8', (2,2))])
assert str(t_2d[0]) == "([[1, --], [--, 4]],)"
assert repr(t_2d[0]) == "([[1, --], [--, 4]],)"
t_0d = masked_array(data = [(1,2)],
mask = [(True,False)],
dtype = [('a', '<i8'), ('b', '<i8')])
assert str(t_0d[0]) == "(--, 2)"
assert repr(t_0d[0]) == "(--, 2)"
t_2d = masked_array(data = [([[1, 2], [3,4]], 1)],
mask = [([[False, True], [True, False]], False)],
dtype = [('a', '<i8', (2,2)), ('b', float)])
assert str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)"
assert repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)"
t_ne = masked_array(data=[(1, (1, 1))],
mask=[(True, (True, False))],
dtype = [('a', '<i8'), ('b', 'i4,i4')])
assert str(t_ne[0]) == "(--, (--, 1))"
assert repr(t_ne[0]) == "(--, (--, 1))"
def test_object_with_array(self):
mx1 = masked_array([1.], mask=[True])
mx2 = masked_array([1., 2.])
mx = masked_array([mx1, mx2], mask=[False, True])
assert mx[0] is mx1
assert mx[1] is not mx2
assert np.all(mx[1].data == mx2.data)
assert np.all(mx[1].mask)
# check that we return a view.
mx[1].data[0] = 0.
assert mx2[0] == 0.
class TestMaskedArrayArithmetic(TestCase):
# Base test class for MaskedArrays.
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_basic_arithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
assert_equal(a2d * a2d, a2d * a2dm)
assert_equal(a2d + a2d, a2d + a2dm)
assert_equal(a2d - a2d, a2d - a2dm)
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
assert_equal(-x, -xm)
assert_equal(x + y, xm + ym)
assert_equal(x - y, xm - ym)
assert_equal(x * y, xm * ym)
assert_equal(x / y, xm / ym)
assert_equal(a10 + y, a10 + ym)
assert_equal(a10 - y, a10 - ym)
assert_equal(a10 * y, a10 * ym)
assert_equal(a10 / y, a10 / ym)
assert_equal(x + a10, xm + a10)
assert_equal(x - a10, xm - a10)
assert_equal(x * a10, xm * a10)
assert_equal(x / a10, xm / a10)
assert_equal(x ** 2, xm ** 2)
assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5)
assert_equal(x ** y, xm ** ym)
assert_equal(np.add(x, y), add(xm, ym))
assert_equal(np.subtract(x, y), subtract(xm, ym))
assert_equal(np.multiply(x, y), multiply(xm, ym))
assert_equal(np.divide(x, y), divide(xm, ym))
def test_divide_on_different_shapes(self):
x = arange(6, dtype=float)
x.shape = (2, 3)
y = arange(3, dtype=float)
z = x / y
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
z = x / y[None,:]
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
y = arange(2, dtype=float)
z = x / y[:, None]
assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]])
assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]])
def test_mixed_arithmetic(self):
# Tests mixed arithmetics.
na = np.array([1])
ma = array([1])
self.assertTrue(isinstance(na + ma, MaskedArray))
self.assertTrue(isinstance(ma + na, MaskedArray))
def test_limits_arithmetic(self):
tiny = np.finfo(float).tiny
a = array([tiny, 1. / tiny, 0.])
assert_equal(getmaskarray(a / 2), [0, 0, 0])
assert_equal(getmaskarray(2 / a), [1, 0, 1])
def test_masked_singleton_arithmetic(self):
# Tests some scalar arithmetics on MaskedArrays.
# Masked singleton should remain masked no matter what
xm = array(0, mask=1)
self.assertTrue((1 / array(0)).mask)
self.assertTrue((1 + xm).mask)
self.assertTrue((-xm).mask)
self.assertTrue(maximum(xm, xm).mask)
self.assertTrue(minimum(xm, xm).mask)
def test_masked_singleton_equality(self):
# Tests (in)equality on masked snigleton
a = array([1, 2, 3], mask=[1, 1, 0])
assert_((a[0] == 0) is masked)
assert_((a[0] != 0) is masked)
assert_equal((a[-1] == 0), False)
assert_equal((a[-1] != 0), True)
def test_arithmetic_with_masked_singleton(self):
# Checks that there's no collapsing to masked
x = masked_array([1, 2])
y = x * masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
y = x[0] * masked
assert_(y is masked)
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
def test_arithmetic_with_masked_singleton_on_1d_singleton(self):
# Check that we're not losing the shape of a singleton
x = masked_array([1, ])
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y.mask, [True, ])
def test_scalar_arithmetic(self):
x = array(0, mask=0)
assert_equal(x.filled().ctypes.data, x.ctypes.data)
# Make sure we don't lose the shape in some circumstances
xm = array((0, 0)) / 0.
assert_equal(xm.shape, (2,))
assert_equal(xm.mask, [1, 1])
def test_basic_ufuncs(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.cos(x), cos(xm))
assert_equal(np.cosh(x), cosh(xm))
assert_equal(np.sin(x), sin(xm))
assert_equal(np.sinh(x), sinh(xm))
assert_equal(np.tan(x), tan(xm))
assert_equal(np.tanh(x), tanh(xm))
assert_equal(np.sqrt(abs(x)), sqrt(xm))
assert_equal(np.log(abs(x)), log(xm))
assert_equal(np.log10(abs(x)), log10(xm))
assert_equal(np.exp(x), exp(xm))
assert_equal(np.arcsin(z), arcsin(zm))
assert_equal(np.arccos(z), arccos(zm))
assert_equal(np.arctan(z), arctan(zm))
assert_equal(np.arctan2(x, y), arctan2(xm, ym))
assert_equal(np.absolute(x), absolute(xm))
assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))
assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))
assert_equal(np.equal(x, y), equal(xm, ym))
assert_equal(np.not_equal(x, y), not_equal(xm, ym))
assert_equal(np.less(x, y), less(xm, ym))
assert_equal(np.greater(x, y), greater(xm, ym))
assert_equal(np.less_equal(x, y), less_equal(xm, ym))
assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))
assert_equal(np.conjugate(x), conjugate(xm))
def test_count_func(self):
# Tests count
assert_equal(1, count(1))
assert_equal(0, array(1, mask=[1]))
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
res = count(ott)
self.assertTrue(res.dtype.type is np.intp)
assert_equal(3, res)
ott = ott.reshape((2, 2))
res = count(ott)
assert_(res.dtype.type is np.intp)
assert_equal(3, res)
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_equal([1, 2], res)
assert_(getmask(res) is nomask)
ott = array([0., 1., 2., 3.])
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_(res.dtype.type is np.intp)
assert_raises(IndexError, ott.count, 1)
def test_minmax_func(self):
# Tests minimum and maximum.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# max doesn't work if shaped
xr = np.ravel(x)
xmr = ravel(xm)
# following are true because of careful selection of data
assert_equal(max(xr), maximum(xmr))
assert_equal(min(xr), minimum(xmr))
assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])
assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert_equal(minimum(x, y), where(less(x, y), x, y))
assert_equal(maximum(x, y), where(greater(x, y), x, y))
assert_(minimum(x) == 0)
assert_(maximum(x) == 4)
x = arange(4).reshape(2, 2)
x[-1, -1] = masked
assert_equal(maximum(x), 2)
def test_minimummaximum_func(self):
a = np.ones((2, 2))
aminimum = minimum(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum(a, a))
aminimum = minimum.outer(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum.outer(a, a))
amaximum = maximum(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum(a, a))
amaximum = maximum.outer(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum.outer(a, a))
def test_minmax_reduce(self):
# Test np.min/maximum.reduce on array w/ full False mask
a = array([1, 2, 3], mask=[False, False, False])
b = np.maximum.reduce(a)
assert_equal(b, 3)
def test_minmax_funcs_with_output(self):
# Tests the min/max functions with explicit outputs
mask = np.random.rand(12).round()
xm = array(np.random.uniform(0, 10, 12), mask=mask)
xm.shape = (3, 4)
for funcname in ('min', 'max'):
# Initialize
npfunc = getattr(np, funcname)
mafunc = getattr(numpy.ma.core, funcname)
# Use the np version
nout = np.empty((4,), dtype=int)
try:
result = npfunc(xm, axis=0, out=nout)
except MaskError:
pass
nout = np.empty((4,), dtype=float)
result = npfunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
# Use the ma version
nout.fill(-999)
result = mafunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
def test_minmax_methods(self):
# Additional tests on max/min
(_, _, _, _, _, xm, _, _, _, _) = self.d
xm.shape = (xm.size,)
assert_equal(xm.max(), 10)
self.assertTrue(xm[0].max() is masked)
self.assertTrue(xm[0].max(0) is masked)
self.assertTrue(xm[0].max(-1) is masked)
assert_equal(xm.min(), -10.)
self.assertTrue(xm[0].min() is masked)
self.assertTrue(xm[0].min(0) is masked)
self.assertTrue(xm[0].min(-1) is masked)
assert_equal(xm.ptp(), 20.)
self.assertTrue(xm[0].ptp() is masked)
self.assertTrue(xm[0].ptp(0) is masked)
self.assertTrue(xm[0].ptp(-1) is masked)
x = array([1, 2, 3], mask=True)
self.assertTrue(x.min() is masked)
self.assertTrue(x.max() is masked)
self.assertTrue(x.ptp() is masked)
def test_addsumprod(self):
# Tests add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.add.reduce(x), add.reduce(x))
assert_equal(np.add.accumulate(x), add.accumulate(x))
assert_equal(4, sum(array(4), axis=0))
assert_equal(4, sum(array(4), axis=0))
assert_equal(np.sum(x, axis=0), sum(x, axis=0))
assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
assert_equal(np.sum(x, 0), sum(x, 0))
assert_equal(np.product(x, axis=0), product(x, axis=0))
assert_equal(np.product(x, 0), product(x, 0))
assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
if len(s) > 1:
assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
assert_equal(np.sum(x, 1), sum(x, 1))
assert_equal(np.product(x, 1), product(x, 1))
def test_binops_d2D(self):
# Test binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a * b
control = array([[2., 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a * b
control = array([[2, 3], [8, 10], [18, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2, 3], [8, 10], [18, 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_domained_binops_d2D(self):
# Test domained binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a / b
control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a / b
control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_noshrinking(self):
# Check that we don't shrink a mask when not wanted
# Binary operations
a = masked_array([1., 2., 3.], mask=[False, False, False],
shrink=False)
b = a + 1
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a += 1
assert_equal(a.mask, [0, 0, 0])
# Domained binary operation
b = a / 1.
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a /= 1.
assert_equal(a.mask, [0, 0, 0])
def test_noshink_on_creation(self):
# Check that the mask is not shrunk on array creation when not wanted
a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False)
assert_equal(a.mask, [0, 0, 0])
def test_mod(self):
# Tests mod
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(mod(x, y), mod(xm, ym))
test = mod(ym, xm)
assert_equal(test, np.mod(ym, xm))
assert_equal(test.mask, mask_or(xm.mask, ym.mask))
test = mod(xm, ym)
assert_equal(test, np.mod(xm, ym))
assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0)))
def test_TakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
assert_equal(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y))
assert_equal(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_imag_real(self):
# Check complex
xx = array([1 + 10j, 20 + 2j], mask=[1, 0])
assert_equal(xx.imag, [10, 2])
assert_equal(xx.imag.filled(), [1e+20, 2])
assert_equal(xx.imag.dtype, xx._data.imag.dtype)
assert_equal(xx.real, [1, 20])
assert_equal(xx.real.filled(), [1e+20, 20])
assert_equal(xx.real.dtype, xx._data.real.dtype)
def test_methods_with_output(self):
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',)
for funcname in funclist:
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty(4, dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
assert_(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty(4, dtype=int)
result = xmmeth(axis=0, out=output)
assert_(result is output)
assert_(output[0] is masked)
def test_eq_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a == a)
assert_equal(test, [True, True])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [False, True])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [True, False])
assert_equal(test.mask, [False, False])
def test_ne_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a != a)
assert_equal(test, [False, False])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [True, False])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [False, True])
assert_equal(test.mask, [False, False])
def test_eq_w_None(self):
# Really, comparisons with None should not be done, but check them
# anyway. Note that pep8 will flag these tests.
# With partial mask
a = array([1, 2], mask=[0, 1])
assert_equal(a == None, False)
assert_equal(a.data == None, False)
assert_equal(a.mask == None, False)
assert_equal(a != None, True)
# With nomask
a = array([1, 2], mask=False)
assert_equal(a == None, False)
assert_equal(a != None, True)
# With complete mask
a = array([1, 2], mask=True)
assert_equal(a == None, False)
assert_equal(a != None, True)
# Fully masked, even comparison to None should return "masked"
a = masked
assert_equal(a == None, masked)
def test_eq_w_scalar(self):
a = array(1)
assert_equal(a == 1, True)
assert_equal(a == 0, False)
assert_equal(a != 1, False)
assert_equal(a != 0, True)
def test_numpyarithmetics(self):
# Check that the mask is not back-propagated when using numpy functions
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
control = masked_array([np.nan, np.nan, 0, np.log(2), -1],
mask=[1, 1, 0, 0, 1])
test = log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
test = np.log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
class TestMaskedArrayAttributes(TestCase):
def test_keepmask(self):
# Tests the keep mask flag
x = masked_array([1, 2, 3], mask=[1, 0, 0])
mx = masked_array(x)
assert_equal(mx.mask, x.mask)
mx = masked_array(x, mask=[0, 1, 0], keep_mask=False)
assert_equal(mx.mask, [0, 1, 0])
mx = masked_array(x, mask=[0, 1, 0], keep_mask=True)
assert_equal(mx.mask, [1, 1, 0])
# We default to true
mx = masked_array(x, mask=[0, 1, 0])
assert_equal(mx.mask, [1, 1, 0])
def test_hardmask(self):
# Test hard_mask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
# We need to copy, to avoid updating d in xh !
xs = array(d, mask=m, hard_mask=False, copy=True)
xh[[1, 4]] = [10, 40]
xs[[1, 4]] = [10, 40]
assert_equal(xh._data, [0, 10, 2, 3, 4])
assert_equal(xs._data, [0, 10, 2, 3, 40])
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
assert_equal(xs.mask, [0, 0, 0, 1, 0])
self.assertTrue(xh._hardmask)
self.assertTrue(not xs._hardmask)
xh[1:4] = [10, 20, 30]
xs[1:4] = [10, 20, 30]
assert_equal(xh._data, [0, 10, 20, 3, 4])
assert_equal(xs._data, [0, 10, 20, 30, 40])
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
assert_equal(xs.mask, nomask)
xh[0] = masked
xs[0] = masked
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, [1, 0, 0, 0, 0])
xh[:] = 1
xs[:] = 1
assert_equal(xh._data, [0, 1, 1, 3, 4])
assert_equal(xs._data, [1, 1, 1, 1, 1])
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, nomask)
# Switch to soft mask
xh.soften_mask()
xh[:] = arange(5)
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh.mask, nomask)
# Switch back to hard mask
xh.harden_mask()
xh[xh < 3] = masked
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh[filled(xh > 1, False)] = 5
assert_equal(xh._data, [0, 1, 2, 5, 5])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True)
xh[0] = 0
assert_equal(xh._data, [[1, 0], [3, 4]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[-1, -1] = 5
assert_equal(xh._data, [[1, 0], [3, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[filled(xh < 5, False)] = 2
assert_equal(xh._data, [[1, 2], [2, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
def test_hardmask_again(self):
# Another test of hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
xh[4:5] = 999
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
xh[0:1] = 999
assert_equal(xh._data, [999, 1, 2, 3, 4])
def test_hardmask_oncemore_yay(self):
# OK, yet another test of hardmask
# Make sure that harden_mask/soften_mask//unshare_mask returns self
a = array([1, 2, 3], mask=[1, 0, 0])
b = a.harden_mask()
assert_equal(a, b)
b[0] = 0
assert_equal(a, b)
assert_equal(b, array([1, 2, 3], mask=[1, 0, 0]))
a = b.soften_mask()
a[0] = 0
assert_equal(a, b)
assert_equal(b, array([0, 2, 3], mask=[0, 0, 0]))
def test_smallmask(self):
# Checks the behaviour of _smallmask
a = arange(10)
a[1] = masked
a[1] = 1
assert_equal(a._mask, nomask)
a = arange(10)
a._smallmask = False
a[1] = masked
a[1] = 1
assert_equal(a._mask, zeros(10))
def test_shrink_mask(self):
# Tests .shrink_mask()
a = array([1, 2, 3], mask=[0, 0, 0])
b = a.shrink_mask()
assert_equal(a, b)
assert_equal(a.mask, nomask)
def test_flat(self):
# Test that flat can return all types of items [#4585, #4615]
# test simple access
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
assert_equal(test.flat[1], 2)
assert_equal(test.flat[2], masked)
self.assertTrue(np.all(test.flat[0:2] == test[0, 0:2]))
# Test flat on masked_matrices
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
assert_equal(test, control)
# Test setting
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
testflat = test.flat
testflat[:] = testflat[[2, 1, 0]]
assert_equal(test, control)
testflat[0] = 9
assert_equal(test[0, 0], 9)
# test 2-D record array
# ... on structured array w/ masked records
x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')],
[(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]],
dtype=[('a', int), ('b', float), ('c', '|S8')])
x['a'][0, 1] = masked
x['b'][1, 0] = masked
x['c'][0, 2] = masked
x[-1, -1] = masked
xflat = x.flat
assert_equal(xflat[0], x[0, 0])
assert_equal(xflat[1], x[0, 1])
assert_equal(xflat[2], x[0, 2])
assert_equal(xflat[:3], x[0])
assert_equal(xflat[3], x[1, 0])
assert_equal(xflat[4], x[1, 1])
assert_equal(xflat[5], x[1, 2])
assert_equal(xflat[3:], x[1])
assert_equal(xflat[-1], x[-1, -1])
i = 0
j = 0
for xf in xflat:
assert_equal(xf, x[j, i])
i += 1
if i >= x.shape[-1]:
i = 0
j += 1
# test that matrices keep the correct shape (#4615)
a = masked_array(np.matrix(np.eye(2)), mask=0)
b = a.flat
b01 = b[:2]
assert_equal(b01.data, array([[1., 0.]]))
assert_equal(b01.mask, array([[False, False]]))
def test_assign_dtype(self):
# check that the mask's dtype is updated when dtype is changed
a = np.zeros(4, dtype='f4,i4')
m = np.ma.array(a)
m.dtype = np.dtype('f4')
repr(m) # raises?
assert_equal(m.dtype, np.dtype('f4'))
# check that dtype changes that change shape of mask too much
# are not allowed
def assign():
m = np.ma.array(a)
m.dtype = np.dtype('f8')
assert_raises(ValueError, assign)
b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises?
assert_equal(b.dtype, np.dtype('f4'))
# check that nomask is preserved
a = np.zeros(4, dtype='f4')
m = np.ma.array(a)
m.dtype = np.dtype('f4,i4')
assert_equal(m.dtype, np.dtype('f4,i4'))
assert_equal(m._mask, np.ma.nomask)
class TestFillingValues(TestCase):
def test_check_on_scalar(self):
# Test _check_fill_value set to valid and invalid values
_check_fill_value = np.ma.core._check_fill_value
fval = _check_fill_value(0, int)
assert_equal(fval, 0)
fval = _check_fill_value(None, int)
assert_equal(fval, default_fill_value(0))
fval = _check_fill_value(0, "|S3")
assert_equal(fval, asbytes("0"))
fval = _check_fill_value(None, "|S3")
assert_equal(fval, default_fill_value(b"camelot!"))
self.assertRaises(TypeError, _check_fill_value, 1e+20, int)
self.assertRaises(TypeError, _check_fill_value, 'stuff', int)
def test_check_on_fields(self):
# Tests _check_fill_value with records
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('a', int), ('b', float), ('c', "|S3")]
# A check on a list should return a single record
fval = _check_fill_value([-999, -12345678.9, "???"], ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
# A check on None should output the defaults
fval = _check_fill_value(None, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [default_fill_value(0),
default_fill_value(0.),
asbytes(default_fill_value("0"))])
#.....Using a structured type as fill_value should work
fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype)
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using a flexible type w/ a different type shouldn't matter
# BEHAVIOR in 1.5 and earlier: match structured types by position
#fill_val = np.array((-999, -12345678.9, "???"),
# dtype=[("A", int), ("B", float), ("C", "|S3")])
# BEHAVIOR in 1.6 and later: match structured types by name
fill_val = np.array(("???", -999, -12345678.9),
dtype=[("c", "|S3"), ("a", int), ("b", float), ])
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using an object-array shouldn't matter either
fill_val = np.ndarray(shape=(1,), dtype=object)
fill_val[0] = (-999, -12345678.9, asbytes("???"))
fval = _check_fill_value(fill_val, object)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
# NOTE: This test was never run properly as "fill_value" rather than
# "fill_val" was assigned. Written properly, it fails.
#fill_val = np.array((-999, -12345678.9, "???"))
#fval = _check_fill_value(fill_val, ndtype)
#self.assertTrue(isinstance(fval, ndarray))
#assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....One-field-only flexible type should work as well
ndtype = [("a", int)]
fval = _check_fill_value(-999999999, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), (-999999999,))
def test_fillvalue_conversion(self):
# Tests the behavior of fill_value during conversion
# We had a tailored comment to make sure special attributes are
# properly dealt with
a = array(asbytes_nested(['3', '4', '5']))
a._optinfo.update({'comment':"updated!"})
b = array(a, dtype=int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
b = array(a, dtype=float)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0.))
b = a.astype(int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
assert_equal(b._optinfo['comment'], "updated!")
b = a.astype([('a', '|S3')])
assert_equal(b['a']._data, a._data)
assert_equal(b['a'].fill_value, a.fill_value)
def test_fillvalue(self):
# Yet more fun with the fill_value
data = masked_array([1, 2, 3], fill_value=-999)
series = data[[0, 2, 1]]
assert_equal(series._fill_value, data._fill_value)
mtype = [('f', float), ('s', '|S3')]
x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype)
x.fill_value = 999
assert_equal(x.fill_value.item(), [999., asbytes('999')])
assert_equal(x['f'].fill_value, 999)
assert_equal(x['s'].fill_value, asbytes('999'))
x.fill_value = (9, '???')
assert_equal(x.fill_value.item(), (9, asbytes('???')))
assert_equal(x['f'].fill_value, 9)
assert_equal(x['s'].fill_value, asbytes('???'))
x = array([1, 2, 3.1])
x.fill_value = 999
assert_equal(np.asarray(x.fill_value).dtype, float)
assert_equal(x.fill_value, 999.)
assert_equal(x._fill_value, np.array(999.))
def test_fillvalue_exotic_dtype(self):
# Tests yet more exotic flexible dtypes
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('i', int), ('s', '|S8'), ('f', float)]
control = np.array((default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),),
dtype=ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
# The shape shouldn't matter
ndtype = [('f0', float, (2, 2))]
control = np.array((default_fill_value(0.),),
dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
control = np.array((0,), dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
ndtype = np.dtype("int, (2,3)float, float")
control = np.array((default_fill_value(0),
default_fill_value(0.),
default_fill_value(0.),),
dtype="int, float, float").astype(ndtype)
test = _check_fill_value(None, ndtype)
assert_equal(test, control)
control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
def test_fillvalue_datetime_timedelta(self):
# Test default fillvalue for datetime64 and timedelta64 types.
# See issue #4476, this would return '?' which would cause errors
# elsewhere
for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m",
"h", "D", "W", "M", "Y"):
control = numpy.datetime64("NaT", timecode)
test = default_fill_value(numpy.dtype("<M8[" + timecode + "]"))
assert_equal(test, control)
control = numpy.timedelta64("NaT", timecode)
test = default_fill_value(numpy.dtype("<m8[" + timecode + "]"))
assert_equal(test, control)
def test_extremum_fill_value(self):
# Tests extremum fill values for flexible type.
a = array([(1, (2, 3)), (4, (5, 6))],
dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])
test = a.fill_value
assert_equal(test['A'], default_fill_value(a['A']))
assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))
assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))
test = minimum_fill_value(a)
assert_equal(test[0], minimum_fill_value(a['A']))
assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))
assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))
assert_equal(test[1], minimum_fill_value(a['B']))
test = maximum_fill_value(a)
assert_equal(test[0], maximum_fill_value(a['A']))
assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))
assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))
assert_equal(test[1], maximum_fill_value(a['B']))
def test_fillvalue_individual_fields(self):
# Test setting fill_value on individual fields
ndtype = [('a', int), ('b', int)]
# Explicit fill_value
a = array(list(zip([1, 2, 3], [4, 5, 6])),
fill_value=(-999, -999), dtype=ndtype)
aa = a['a']
aa.set_fill_value(10)
assert_equal(aa._fill_value, np.array(10))
assert_equal(tuple(a.fill_value), (10, -999))
a.fill_value['b'] = -10
assert_equal(tuple(a.fill_value), (10, -10))
# Implicit fill_value
t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=ndtype)
tt = t['a']
tt.set_fill_value(10)
assert_equal(tt._fill_value, np.array(10))
assert_equal(tuple(t.fill_value), (10, default_fill_value(0)))
def test_fillvalue_implicit_structured_array(self):
# Check that fill_value is always defined for structured arrays
ndtype = ('b', float)
adtype = ('a', float)
a = array([(1.,), (2.,)], mask=[(False,), (False,)],
fill_value=(np.nan,), dtype=np.dtype([adtype]))
b = empty(a.shape, dtype=[adtype, ndtype])
b['a'] = a['a']
b['a'].set_fill_value(a['a'].fill_value)
f = b._fill_value[()]
assert_(np.isnan(f[0]))
assert_equal(f[-1], default_fill_value(1.))
def test_fillvalue_as_arguments(self):
# Test adding a fill_value parameter to empty/ones/zeros
a = empty(3, fill_value=999.)
assert_equal(a.fill_value, 999.)
a = ones(3, fill_value=999., dtype=float)
assert_equal(a.fill_value, 999.)
a = zeros(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
a = identity(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
def test_shape_argument(self):
# Test that shape can be provides as an argument
# GH issue 6106
a = empty(shape=(3, ))
assert_equal(a.shape, (3, ))
a = ones(shape=(3, ), dtype=float)
assert_equal(a.shape, (3, ))
a = zeros(shape=(3, ), dtype=complex)
assert_equal(a.shape, (3, ))
def test_fillvalue_in_view(self):
# Test the behavior of fill_value in view
# Create initial masked array
x = array([1, 2, 3], fill_value=1, dtype=np.int64)
# Check that fill_value is preserved by default
y = x.view()
assert_(y.fill_value == 1)
# Check that fill_value is preserved if dtype is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute
y = x.view(MaskedArray)
assert_(y.fill_value == 1)
# Check that fill_value is preserved if type is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute (by
# default, the first argument is dtype, not type)
y = x.view(type=MaskedArray)
assert_(y.fill_value == 1)
# Check that code does not crash if passed an ndarray sub-class that
# does not have a _fill_value attribute
y = x.view(np.ndarray)
y = x.view(type=np.ndarray)
# Check that fill_value can be overriden with view
y = x.view(MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value can be overriden with view (using type=)
y = x.view(type=MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value gets reset if passed a dtype but not a
# fill_value. This is because even though in some cases one can safely
# cast the fill_value, e.g. if taking an int64 view of an int32 array,
# in other cases, this cannot be done (e.g. int32 view of an int64
# array with a large fill_value).
y = x.view(dtype=np.int32)
assert_(y.fill_value == 999999)
class TestUfuncs(TestCase):
# Test class for the application of ufuncs on MaskedArrays.
def setUp(self):
# Base data definition.
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_testUfuncRegression(self):
# Tests new ufuncs on MaskedArrays.
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
# 'nonzero', 'around',
'floor', 'ceil',
# 'sometrue', 'alltrue',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor',
]:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(numpy.ma.core, f)
args = self.d[:uf.nin]
ur = uf(*args)
mr = mf(*args)
assert_equal(ur.filled(0), mr.filled(0), f)
assert_mask_equal(ur.mask, mr.mask, err_msg=f)
def test_reduce(self):
# Tests reduce on MaskedArrays.
a = self.d[0]
self.assertTrue(not alltrue(a, axis=0))
self.assertTrue(sometrue(a, axis=0))
assert_equal(sum(a[:3], axis=0), 0)
assert_equal(product(a, axis=0), 0)
assert_equal(add.reduce(a), pi)
def test_minmax(self):
# Tests extrema on MaskedArrays.
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
assert_equal(amask.max(), a.max())
assert_equal(amask.min(), 5)
assert_equal(amask.max(0), a.max(0))
assert_equal(amask.min(0), [5, 6, 7, 8])
self.assertTrue(amask.max(1)[0].mask)
self.assertTrue(amask.min(1)[0].mask)
def test_ndarray_mask(self):
# Check that the mask of the result is a ndarray (not a MaskedArray...)
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
test = np.sqrt(a)
control = masked_array([-1, 0, 1, np.sqrt(2), -1],
mask=[1, 0, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
self.assertTrue(not isinstance(test.mask, MaskedArray))
def test_treatment_of_NotImplemented(self):
# Check that NotImplemented is returned at appropriate places
a = masked_array([1., 2.], mask=[1, 0])
self.assertRaises(TypeError, operator.mul, a, "abc")
self.assertRaises(TypeError, operator.truediv, a, "abc")
class MyClass(object):
__array_priority__ = a.__array_priority__ + 1
def __mul__(self, other):
return "My mul"
def __rmul__(self, other):
return "My rmul"
me = MyClass()
assert_(me * a == "My mul")
assert_(a * me == "My rmul")
# and that __array_priority__ is respected
class MyClass2(object):
__array_priority__ = 100
def __mul__(self, other):
return "Me2mul"
def __rmul__(self, other):
return "Me2rmul"
def __rdiv__(self, other):
return "Me2rdiv"
__rtruediv__ = __rdiv__
me_too = MyClass2()
assert_(a.__mul__(me_too) is NotImplemented)
assert_(all(multiply.outer(a, me_too) == "Me2rmul"))
assert_(a.__truediv__(me_too) is NotImplemented)
assert_(me_too * a == "Me2mul")
assert_(a * me_too == "Me2rmul")
assert_(a / me_too == "Me2rdiv")
class TestMaskedArrayInPlaceArithmetics(TestCase):
# Test MaskedArray Arithmetics
def setUp(self):
x = arange(10)
y = arange(10)
xm = arange(10)
xm[2] = masked
self.intdata = (x, y, xm)
self.floatdata = (x.astype(float), y.astype(float), xm.astype(float))
self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
self.othertypes = [np.dtype(_).type for _ in self.othertypes]
self.uint8data = (
x.astype(np.uint8),
y.astype(np.uint8),
xm.astype(np.uint8)
)
def test_inplace_addition_scalar(self):
# Test of inplace additions
(x, y, xm) = self.intdata
xm[2] = masked
x += 1
assert_equal(x, y + 1)
xm += 1
assert_equal(xm, y + 1)
(x, _, xm) = self.floatdata
id1 = x.data.ctypes._data
x += 1.
assert_(id1 == x.data.ctypes._data)
assert_equal(x, y + 1.)
def test_inplace_addition_array(self):
# Test of inplace additions
(x, y, xm) = self.intdata
m = xm.mask
a = arange(10, dtype=np.int16)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_subtraction_scalar(self):
# Test of inplace subtractions
(x, y, xm) = self.intdata
x -= 1
assert_equal(x, y - 1)
xm -= 1
assert_equal(xm, y - 1)
def test_inplace_subtraction_array(self):
# Test of inplace subtractions
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_multiplication_scalar(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
x *= 2.0
assert_equal(x, y * 2)
xm *= 2.0
assert_equal(xm, y * 2)
def test_inplace_multiplication_array(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_division_scalar_int(self):
# Test of inplace division
(x, y, xm) = self.intdata
x = arange(10) * 2
xm = arange(10) * 2
xm[2] = masked
x //= 2
assert_equal(x, y)
xm //= 2
assert_equal(xm, y)
def test_inplace_division_scalar_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
x /= 2.0
assert_equal(x, y / 2.0)
xm /= arange(10)
assert_equal(xm, ones((10,)))
def test_inplace_division_array_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x /= a
xm /= a
assert_equal(x, y / a)
assert_equal(xm, y / a)
assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0)))
def test_inplace_division_misc(self):
x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.]
y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = xm / ym
assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
#assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])
xm = xm.copy()
xm /= ym
assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
#assert_equal(xm._data,
# [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])
def test_datafriendly_add(self):
# Test keeping data w/ (inplace) addition
x = array([1, 2, 3], mask=[0, 0, 1])
# Test add w/ scalar
xx = x + 1
assert_equal(xx.data, [2, 3, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test iadd w/ scalar
x += 1
assert_equal(x.data, [2, 3, 3])
assert_equal(x.mask, [0, 0, 1])
# Test add w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x + array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 4, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test iadd w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x += array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 4, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_sub(self):
# Test keeping data w/ (inplace) subtraction
# Test sub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - 1
assert_equal(xx.data, [0, 1, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test isub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x -= 1
assert_equal(x.data, [0, 1, 3])
assert_equal(x.mask, [0, 0, 1])
# Test sub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 0, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test isub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x -= array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 0, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_mul(self):
# Test keeping data w/ (inplace) multiplication
# Test mul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * 2
assert_equal(xx.data, [2, 4, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test imul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x *= 2
assert_equal(x.data, [2, 4, 3])
assert_equal(x.mask, [0, 0, 1])
# Test mul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * array([10, 20, 30], mask=[1, 0, 0])
assert_equal(xx.data, [1, 40, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test imul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x *= array([10, 20, 30], mask=[1, 0, 0])
assert_equal(x.data, [1, 40, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_div(self):
# Test keeping data w/ (inplace) division
# Test div on scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x / 2.
assert_equal(xx.data, [1 / 2., 2 / 2., 3])
assert_equal(xx.mask, [0, 0, 1])
# Test idiv on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= 2.
assert_equal(x.data, [1 / 2., 2 / 2., 3])
assert_equal(x.mask, [0, 0, 1])
# Test div on array
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x / array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(xx.data, [1., 2. / 20., 3.])
assert_equal(xx.mask, [1, 0, 1])
# Test idiv on array
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(x.data, [1., 2 / 20., 3.])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_pow(self):
# Test keeping data w/ (inplace) power
# Test pow on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x ** 2.5
assert_equal(xx.data, [1., 2. ** 2.5, 3.])
assert_equal(xx.mask, [0, 0, 1])
# Test ipow on scalar
x **= 2.5
assert_equal(x.data, [1., 2. ** 2.5, 3])
assert_equal(x.mask, [0, 0, 1])
def test_datafriendly_add_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a += b
assert_equal(a, [[2, 2], [4, 4]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a += b
assert_equal(a, [[2, 2], [4, 4]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_sub_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_mul_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_inplace_addition_scalar_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
xm[2] = masked
x += t(1)
assert_equal(x, y + t(1))
xm += t(1)
assert_equal(xm, y + t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_addition_array_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_scalar_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x -= t(1)
assert_equal(x, y - t(1))
xm -= t(1)
assert_equal(xm, y - t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_array_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_scalar_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x *= t(2)
assert_equal(x, y * t(2))
xm *= t(2)
assert_equal(xm, y * t(2))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_array_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
x //= t(2)
xm //= t(2)
assert_equal(x, y)
assert_equal(xm, y)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x //= a
xm //= a
assert_equal(x, y // a)
assert_equal(xm, y // a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= t(2)
assert_equal(x, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
try:
xm /= t(2)
assert_equal(xm, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
if issubclass(t, np.integer):
assert_equal(len(w), 2, "Failed on type=%s." % t)
else:
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= a
assert_equal(x, y / a)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
try:
xm /= a
assert_equal(xm, y / a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
if issubclass(t, np.integer):
assert_equal(len(w), 2, "Failed on type=%s." % t)
else:
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_pow_type(self):
# Test keeping data w/ (inplace) power
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
# Test pow on scalar
x = array([1, 2, 3], mask=[0, 0, 1], dtype=t)
xx = x ** t(2)
xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t)
assert_equal(xx.data, xx_r.data)
assert_equal(xx.mask, xx_r.mask)
# Test ipow on scalar
x **= t(2)
assert_equal(x.data, xx_r.data)
assert_equal(x.mask, xx_r.mask)
assert_equal(len(w), 0, "Failed on type=%s." % t)
class TestMaskedArrayMethods(TestCase):
# Test class for miscellaneous MaskedArrays methods.
def setUp(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_generic_methods(self):
# Tests some MaskedArray methods.
a = array([1, 3, 2])
assert_equal(a.any(), a._data.any())
assert_equal(a.all(), a._data.all())
assert_equal(a.argmax(), a._data.argmax())
assert_equal(a.argmin(), a._data.argmin())
assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))
assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))
assert_equal(a.conj(), a._data.conj())
assert_equal(a.conjugate(), a._data.conjugate())
m = array([[1, 2], [3, 4]])
assert_equal(m.diagonal(), m._data.diagonal())
assert_equal(a.sum(), a._data.sum())
assert_equal(a.take([1, 2]), a._data.take([1, 2]))
assert_equal(m.transpose(), m._data.transpose())
def test_allclose(self):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
self.assertTrue(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
self.assertTrue(not allclose(a, b))
b[0] = np.inf
self.assertTrue(allclose(a, b))
# Test all close w/ masked
a = masked_array(a)
a[-1] = masked
self.assertTrue(allclose(a, b, masked_equal=True))
self.assertTrue(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
self.assertTrue(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
self.assertTrue(allclose(a, a))
def test_allany(self):
# Checks the any/all methods/functions.
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mx = masked_array(x, mask=m)
mxbig = (mx > 0.5)
mxsmall = (mx < 0.5)
self.assertFalse(mxbig.all())
self.assertTrue(mxbig.any())
assert_equal(mxbig.all(0), [False, False, True])
assert_equal(mxbig.all(1), [False, False, True])
assert_equal(mxbig.any(0), [False, False, True])
assert_equal(mxbig.any(1), [True, True, True])
self.assertFalse(mxsmall.all())
self.assertTrue(mxsmall.any())
assert_equal(mxsmall.all(0), [True, True, False])
assert_equal(mxsmall.all(1), [False, False, False])
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
def test_allany_onmatrices(self):
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
X = np.matrix(x)
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mX = masked_array(X, mask=m)
mXbig = (mX > 0.5)
mXsmall = (mX < 0.5)
self.assertFalse(mXbig.all())
self.assertTrue(mXbig.any())
assert_equal(mXbig.all(0), np.matrix([False, False, True]))
assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
assert_equal(mXbig.any(0), np.matrix([False, False, True]))
assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
self.assertFalse(mXsmall.all())
self.assertTrue(mXsmall.any())
assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
def test_allany_oddities(self):
# Some fun with all and any
store = empty((), dtype=bool)
full = array([1, 2, 3], mask=True)
self.assertTrue(full.all() is masked)
full.all(out=store)
self.assertTrue(store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
store = empty((), dtype=bool)
self.assertTrue(full.any() is masked)
full.any(out=store)
self.assertTrue(not store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
def test_argmax_argmin(self):
# Tests argmin & argmax on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_equal(mx.argmin(), 35)
assert_equal(mX.argmin(), 35)
assert_equal(m2x.argmin(), 4)
assert_equal(m2X.argmin(), 4)
assert_equal(mx.argmax(), 28)
assert_equal(mX.argmax(), 28)
assert_equal(m2x.argmax(), 31)
assert_equal(m2X.argmax(), 31)
assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5])
assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4])
assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0])
assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0])
assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ])
assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3])
assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1])
assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1])
def test_clip(self):
# Tests clip on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0])
mx = array(x, mask=m)
clipped = mx.clip(2, 8)
assert_equal(clipped.mask, mx.mask)
assert_equal(clipped._data, x.clip(2, 8))
assert_equal(clipped._data, mx._data.clip(2, 8))
def test_compress(self):
# test compress
a = masked_array([1., 2., 3., 4., 5.], fill_value=9999)
condition = (a > 1.5) & (a < 3.5)
assert_equal(a.compress(condition), [2., 3.])
a[[2, 3]] = masked
b = a.compress(condition)
assert_equal(b._data, [2., 3.])
assert_equal(b._mask, [0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
condition = (a < 4.)
b = a.compress(condition)
assert_equal(b._data, [1., 2., 3.])
assert_equal(b._mask, [0, 0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
a = masked_array([[10, 20, 30], [40, 50, 60]],
mask=[[0, 0, 1], [1, 0, 0]])
b = a.compress(a.ravel() >= 22)
assert_equal(b._data, [30, 40, 50, 60])
assert_equal(b._mask, [1, 1, 0, 0])
x = np.array([3, 1, 2])
b = a.compress(x >= 2, axis=1)
assert_equal(b._data, [[10, 30], [40, 60]])
assert_equal(b._mask, [[0, 1], [1, 0]])
def test_compressed(self):
# Tests compressed
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
a[0] = masked
b = a.compressed()
assert_equal(b, [2, 3, 4])
a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
self.assertTrue(isinstance(b, np.matrix))
a[0, 0] = masked
b = a.compressed()
assert_equal(b, [[2, 3, 4]])
def test_empty(self):
# Tests empty/like
datatype = [('a', int), ('b', float), ('c', '|S8')]
a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
dtype=datatype)
assert_equal(len(a.fill_value.item()), len(datatype))
b = empty_like(a)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
b = empty(len(a), dtype=datatype)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
# check empty_like mask handling
a = masked_array([1, 2, 3], mask=[False, True, False])
b = empty_like(a)
assert_(not np.may_share_memory(a.mask, b.mask))
b = a.view(masked_array)
assert_(np.may_share_memory(a.mask, b.mask))
def test_put(self):
# Tests put.
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
x = array(d, mask=m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is masked)
x[[1, 4]] = [10, 40]
#self.assertTrue(x.mask is not m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is not masked)
assert_equal(x, [0, 10, 2, -1, 40])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
i = [0, 2, 4, 6]
x.put(i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
put(x, i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
def test_put_nomask(self):
# GitHub issue 6425
x = zeros(10)
z = array([3., -1.], mask=[False, True])
x.put([1, 2], z)
self.assertTrue(x[0] is not masked)
assert_equal(x[0], 0)
self.assertTrue(x[1] is not masked)
assert_equal(x[1], 3)
self.assertTrue(x[2] is masked)
self.assertTrue(x[3] is not masked)
assert_equal(x[3], 0)
def test_put_hardmask(self):
# Tests put on hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d + 1, mask=m, hard_mask=True, copy=True)
xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5])
assert_equal(xh._data, [3, 4, 2, 4, 5])
def test_putmask(self):
x = arange(6) + 1
mx = array(x, mask=[0, 0, 0, 1, 1, 1])
mask = [0, 0, 1, 0, 0, 1]
# w/o mask, w/o masked values
xx = x.copy()
putmask(xx, mask, 99)
assert_equal(xx, [1, 2, 99, 4, 5, 99])
# w/ mask, w/o masked values
mxx = mx.copy()
putmask(mxx, mask, 99)
assert_equal(mxx._data, [1, 2, 99, 4, 5, 99])
assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0])
# w/o mask, w/ masked values
values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0])
xx = x.copy()
putmask(xx, mask, values)
assert_equal(xx._data, [1, 2, 30, 4, 5, 60])
assert_equal(xx._mask, [0, 0, 1, 0, 0, 0])
# w/ mask, w/ masked values
mxx = mx.copy()
putmask(mxx, mask, values)
assert_equal(mxx._data, [1, 2, 30, 4, 5, 60])
assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0])
# w/ mask, w/ masked values + hardmask
mxx = mx.copy()
mxx.harden_mask()
putmask(mxx, mask, values)
assert_equal(mxx, [1, 2, 30, 4, 5, 60])
def test_ravel(self):
# Tests ravel
a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel._mask.shape, aravel.shape)
a = array([0, 0], mask=[1, 1])
aravel = a.ravel()
assert_equal(aravel._mask.shape, a.shape)
a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel.shape, (1, 5))
assert_equal(aravel._mask.shape, a.shape)
# Checks that small_mask is preserved
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
assert_equal(a.ravel()._mask, [0, 0, 0, 0])
# Test that the fill_value is preserved
a.fill_value = -99
a.shape = (2, 2)
ar = a.ravel()
assert_equal(ar._mask, [0, 0, 0, 0])
assert_equal(ar._data, [1, 2, 3, 4])
assert_equal(ar.fill_value, -99)
# Test index ordering
assert_equal(a.ravel(order='C'), [1, 2, 3, 4])
assert_equal(a.ravel(order='F'), [1, 3, 2, 4])
def test_reshape(self):
# Tests reshape
x = arange(4)
x[0] = masked
y = x.reshape(2, 2)
assert_equal(y.shape, (2, 2,))
assert_equal(y._mask.shape, (2, 2,))
assert_equal(x.shape, (4,))
assert_equal(x._mask.shape, (4,))
def test_sort(self):
# Test sort
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
sortedx = sort(x)
assert_equal(sortedx._data, [1, 2, 3, 4])
assert_equal(sortedx._mask, [0, 0, 0, 1])
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [4, 1, 2, 3])
assert_equal(sortedx._mask, [1, 0, 0, 0])
x.sort()
assert_equal(x._data, [1, 2, 3, 4])
assert_equal(x._mask, [0, 0, 0, 1])
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
x.sort(endwith=False)
assert_equal(x._data, [4, 1, 2, 3])
assert_equal(x._mask, [1, 0, 0, 0])
x = [1, 4, 2, 3]
sortedx = sort(x)
self.assertTrue(not isinstance(sorted, MaskedArray))
x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [-2, -1, 0, 1, 2])
x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [1, 2, -2, -1, 0])
assert_equal(sortedx._mask, [1, 1, 0, 0, 0])
def test_sort_2d(self):
# Check sort of 2D array.
# 2D array w/o mask
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
# 2D array w/mask
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]])
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]])
# 3D
a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]],
[[1, 2, 3], [7, 8, 9], [4, 5, 6]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]],
[[4, 5, 6], [1, 2, 3], [7, 8, 9]]])
a[a % 4 == 0] = masked
am = a.copy()
an = a.filled(99)
am.sort(0)
an.sort(0)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(1)
an.sort(1)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(2)
an.sort(2)
assert_equal(am, an)
def test_sort_flexible(self):
# Test sort on flexible dtype.
a = array(
data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],
mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],
dtype=[('A', int), ('B', int)])
test = sort(a)
b = array(
data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],
mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
test = sort(a, endwith=False)
b = array(
data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ],
mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
def test_argsort(self):
# Test argsort
a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0])
assert_equal(np.argsort(a), argsort(a))
def test_squeeze(self):
# Check squeeze
data = masked_array([[1, 2, 3]])
assert_equal(data.squeeze(), [1, 2, 3])
data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]])
assert_equal(data.squeeze(), [1, 2, 3])
assert_equal(data.squeeze()._mask, [1, 1, 1])
data = masked_array([[1]], mask=True)
self.assertTrue(data.squeeze() is masked)
def test_swapaxes(self):
# Tests swapaxes on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mX = array(x, mask=m).reshape(6, 6)
mXX = mX.reshape(3, 2, 2, 3)
mXswapped = mX.swapaxes(0, 1)
assert_equal(mXswapped[-1], mX[:, -1])
mXXswapped = mXX.swapaxes(0, 2)
assert_equal(mXXswapped.shape, (2, 2, 3, 3))
def test_take(self):
# Tests take
x = masked_array([10, 20, 30, 40], [0, 1, 0, 1])
assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1]))
assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]])
assert_equal(x.take([[0, 1], [0, 1]]),
masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))
x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])
assert_equal(x.take([0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
assert_equal(take(x, [0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
def test_take_masked_indices(self):
# Test take w/ masked indices
a = np.array((40, 18, 37, 9, 22))
indices = np.arange(3)[None,:] + np.arange(5)[:, None]
mindices = array(indices, mask=(indices >= len(a)))
# No mask
test = take(a, mindices, mode='clip')
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 22],
[22, 22, 22]])
assert_equal(test, ctrl)
# Masked indices
test = take(a, mindices)
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 40],
[22, 40, 40]])
ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# Masked input + masked indices
a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0))
test = take(a, mindices)
ctrl[0, 1] = ctrl[1, 0] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_tolist(self):
# Tests to list
# ... on 1D
x = array(np.arange(12))
x[[1, -2]] = masked
xlist = x.tolist()
self.assertTrue(xlist[1] is None)
self.assertTrue(xlist[-2] is None)
# ... on 2D
x.shape = (3, 4)
xlist = x.tolist()
ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]]
assert_equal(xlist[0], [0, None, 2, 3])
assert_equal(xlist[1], [4, 5, 6, 7])
assert_equal(xlist[2], [8, 9, None, 11])
assert_equal(xlist, ctrl)
# ... on structured array w/ masked records
x = array(list(zip([1, 2, 3],
[1.1, 2.2, 3.3],
['one', 'two', 'thr'])),
dtype=[('a', int), ('b', float), ('c', '|S8')])
x[-1] = masked
assert_equal(x.tolist(),
[(1, 1.1, asbytes('one')),
(2, 2.2, asbytes('two')),
(None, None, None)])
# ... on structured array w/ masked fields
a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)],
dtype=[('a', int), ('b', int)])
test = a.tolist()
assert_equal(test, [[1, None], [3, 4]])
# ... on mvoid
a = a[0]
test = a.tolist()
assert_equal(test, [1, None])
def test_tolist_specialcase(self):
# Test mvoid.tolist: make sure we return a standard Python object
a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)])
# w/o mask: each entry is a np.void whose elements are standard Python
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
# w/ mask: each entry is a ma.void whose elements should be
# standard Python
a.mask[0] = (0, 1)
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
def test_toflex(self):
# Test the conversion to records
data = arange(10)
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = [('i', int), ('s', '|S3'), ('f', float)]
data = array([(i, s, f) for (i, s, f) in zip(np.arange(10),
'ABCDEFGHIJKLM',
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = np.dtype("int, (2,3)float, float")
data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10),
np.random.rand(10),
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal_records(record['_data'], data._data)
assert_equal_records(record['_mask'], data._mask)
def test_fromflex(self):
# Test the reconstruction of a masked_array from a record
a = array([1, 2, 3])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([1, 2, 3], mask=[0, 0, 1])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)],
dtype=[('A', int), ('B', float)])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.data, a.data)
def test_arraymethod(self):
# Test a _arraymethod w/ n argument
marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0])
control = masked_array([[1], [2], [3], [4], [5]],
mask=[0, 0, 1, 0, 0])
assert_equal(marray.T, control)
assert_equal(marray.transpose(), control)
assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0))
class TestMaskedArrayMathMethods(TestCase):
def setUp(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_cumsumprod(self):
# Tests cumsum & cumprod on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXcp = mX.cumsum(0)
assert_equal(mXcp._data, mX.filled(0).cumsum(0))
mXcp = mX.cumsum(1)
assert_equal(mXcp._data, mX.filled(0).cumsum(1))
mXcp = mX.cumprod(0)
assert_equal(mXcp._data, mX.filled(1).cumprod(0))
mXcp = mX.cumprod(1)
assert_equal(mXcp._data, mX.filled(1).cumprod(1))
def test_cumsumprod_with_output(self):
# Tests cumsum/cumprod w/ output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
for funcname in ('cumsum', 'cumprod'):
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty((3, 4), dtype=int)
result = xmmeth(axis=0, out=output)
self.assertTrue(result is output)
def test_ptp(self):
# Tests ptp on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
(n, m) = X.shape
assert_equal(mx.ptp(), mx.compressed().ptp())
rows = np.zeros(n, np.float)
cols = np.zeros(m, np.float)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
assert_equal(mX.ptp(0), cols)
assert_equal(mX.ptp(1), rows)
def test_add_object(self):
x = masked_array(['a', 'b'], mask=[1, 0], dtype=object)
y = x + 'x'
assert_equal(y[1], 'bx')
assert_(y.mask[0])
def test_sum_object(self):
# Test sum on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.sum(), 5)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.sum(axis=0), [5, 7, 9])
def test_prod_object(self):
# Test prod on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.prod(), 2 * 3)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.prod(axis=0), [4, 10, 18])
def test_meananom_object(self):
# Test mean/anom on object dtype
a = masked_array([1, 2, 3], dtype=np.object)
assert_equal(a.mean(), 2)
assert_equal(a.anom(), [-1, 0, 1])
def test_trace(self):
# Tests trace on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXdiag = mX.diagonal()
assert_equal(mX.trace(), mX.diagonal().compressed().sum())
assert_almost_equal(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0))
def test_dot(self):
# Tests dot on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
fx = mx.filled(0)
r = mx.dot(mx)
assert_almost_equal(r.filled(0), fx.dot(fx))
assert_(r.mask is nomask)
fX = mX.filled(0)
r = mX.dot(mX)
assert_almost_equal(r.filled(0), fX.dot(fX))
assert_(r.mask[1,3])
r1 = empty_like(r)
mX.dot(mX, r1)
assert_almost_equal(r, r1)
mYY = mXX.swapaxes(-1, -2)
fXX, fYY = mXX.filled(0), mYY.filled(0)
r = mXX.dot(mYY)
assert_almost_equal(r.filled(0), fXX.dot(fYY))
r1 = empty_like(r)
mXX.dot(mYY, r1)
assert_almost_equal(r, r1)
def test_dot_shape_mismatch(self):
# regression test
x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
z = masked_array([[0,1],[3,3]])
x.dot(y, out=z)
assert_almost_equal(z.filled(0), [[1, 0], [15, 16]])
assert_almost_equal(z.mask, [[0, 1], [0, 0]])
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_almost_equal(mX.std(axis=None, ddof=1),
mX.compressed().std(ddof=1))
assert_almost_equal(mX.var(axis=None, ddof=1),
mX.compressed().var(ddof=1))
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
def test_varstd_specialcases(self):
# Test a special case for var
nout = np.array(-1, dtype=float)
mout = array(-1, dtype=float)
x = array(arange(10), mask=True)
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method() is masked)
self.assertTrue(method(0) is masked)
self.assertTrue(method(-1) is masked)
# Using a masked array as explicit output
with warnings.catch_warnings():
warnings.simplefilter('ignore')
method(out=mout)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
with warnings.catch_warnings():
warnings.simplefilter('ignore')
method(out=nout)
self.assertTrue(np.isnan(nout))
x = array(arange(10), mask=True)
x[-1] = 9
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method(ddof=1) is masked)
self.assertTrue(method(0, ddof=1) is masked)
self.assertTrue(method(-1, ddof=1) is masked)
# Using a masked array as explicit output
method(out=mout, ddof=1)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout, ddof=1)
self.assertTrue(np.isnan(nout))
def test_varstd_ddof(self):
a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]])
test = a.std(axis=0, ddof=0)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=1)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=2)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [1, 1, 1])
def test_diag(self):
# Test diag
x = arange(9).reshape((3, 3))
x[1, 1] = masked
out = np.diag(x)
assert_equal(out, [0, 4, 8])
out = diag(x)
assert_equal(out, [0, 4, 8])
assert_equal(out.mask, [0, 1, 0])
out = diag(out)
control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]],
mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(out, control)
def test_axis_methods_nomask(self):
# Test the combination nomask & methods w/ axis
a = array([[1, 2, 3], [4, 5, 6]])
assert_equal(a.sum(0), [5, 7, 9])
assert_equal(a.sum(-1), [6, 15])
assert_equal(a.sum(1), [6, 15])
assert_equal(a.prod(0), [4, 10, 18])
assert_equal(a.prod(-1), [6, 120])
assert_equal(a.prod(1), [6, 120])
assert_equal(a.min(0), [1, 2, 3])
assert_equal(a.min(-1), [1, 4])
assert_equal(a.min(1), [1, 4])
assert_equal(a.max(0), [4, 5, 6])
assert_equal(a.max(-1), [3, 6])
assert_equal(a.max(1), [3, 6])
class TestMaskedArrayMathMethodsComplex(TestCase):
# Test class for miscellaneous MaskedArrays methods.
def setUp(self):
# Base data definition.
x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479j,
7.189j, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993j])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
class TestMaskedArrayFunctions(TestCase):
# Test class for miscellaneous functions.
def setUp(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
self.info = (xm, ym)
def test_masked_where_bool(self):
x = [1, 2]
y = masked_where(False, x)
assert_equal(y, [1, 2])
assert_equal(y[1], 2)
def test_masked_equal_wlist(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [0, 0, 1])
mx = masked_not_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [1, 1, 0])
def test_masked_equal_fill_value(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx._mask, [0, 0, 1])
assert_equal(mx.fill_value, 3)
def test_masked_where_condition(self):
# Tests masking functions.
x = array([1., 2., 3., 4., 5.])
x[2] = masked
assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2))
assert_equal(masked_where(greater_equal(x, 2), x),
masked_greater_equal(x, 2))
assert_equal(masked_where(less(x, 2), x), masked_less(x, 2))
assert_equal(masked_where(less_equal(x, 2), x),
masked_less_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
[99, 99, 3, 4, 5])
def test_masked_where_oddities(self):
# Tests some generic features.
atest = ones((10, 10, 10), dtype=float)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest, atest)
assert_equal(atest, ctest)
def test_masked_where_shape_constraint(self):
a = arange(10)
try:
test = masked_equal(1, a)
except IndexError:
pass
else:
raise AssertionError("Should have failed...")
test = masked_equal(a, 1)
assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
def test_masked_where_structured(self):
# test that masked_where on a structured array sets a structured
# mask (see issue #2972)
a = np.zeros(10, dtype=[("A", "<f2"), ("B", "<f4")])
am = np.ma.masked_where(a["A"] < 5, a)
assert_equal(am.mask.dtype.names, am.dtype.names)
assert_equal(am["A"],
np.ma.masked_array(np.zeros(10), np.ones(10)))
def test_masked_otherfunctions(self):
assert_equal(masked_inside(list(range(5)), 1, 3),
[0, 199, 199, 199, 4])
assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])
assert_equal(masked_inside(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 1, 3).mask,
[1, 1, 1, 1, 0])
assert_equal(masked_outside(array(list(range(5)),
mask=[0, 1, 0, 0, 0]), 1, 3).mask,
[1, 1, 0, 0, 1])
assert_equal(masked_equal(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 0])
assert_equal(masked_not_equal(array([2, 2, 1, 2, 1],
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 1])
def test_round(self):
a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890],
mask=[0, 1, 0, 0, 0])
assert_equal(a.round(), [1., 2., 3., 5., 6.])
assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7])
assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679])
b = empty_like(a)
a.round(out=b)
assert_equal(b, [1., 2., 3., 5., 6.])
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
def test_round_with_output(self):
# Testing round with an explicit output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = np.round(xm, decimals=2, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xm.round(decimals=2, out=output))
output = empty((3, 4), dtype=float)
result = xm.round(decimals=2, out=output)
self.assertTrue(result is output)
def test_round_with_scalar(self):
# Testing round with scalar/zero dimension input
# GH issue 2244
a = array(1.1, mask=[False])
assert_equal(a.round(), 1)
a = array(1.1, mask=[True])
assert_(a.round() is masked)
a = array(1.1, mask=[False])
output = np.empty(1, dtype=float)
output.fill(-9999)
a.round(out=output)
assert_equal(output, 1)
a = array(1.1, mask=[False])
output = array(-9999., mask=[True])
a.round(out=output)
assert_equal(output[()], 1)
a = array(1.1, mask=[True])
output = array(-9999., mask=[False])
a.round(out=output)
assert_(output[()] is masked)
def test_identity(self):
a = identity(5)
self.assertTrue(isinstance(a, MaskedArray))
assert_equal(a, np.identity(5))
def test_power(self):
x = -1.1
assert_almost_equal(power(x, 2.), 1.21)
self.assertTrue(power(x, masked) is masked)
x = array([-1.1, -1.1, 1.1, 1.1, 0.])
b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1])
y = power(x, b)
assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.])
assert_equal(y._mask, [1, 0, 0, 0, 1])
b.mask = nomask
y = power(x, b)
assert_equal(y._mask, [1, 0, 0, 0, 1])
z = x ** b
assert_equal(z._mask, y._mask)
assert_almost_equal(z, y)
assert_almost_equal(z._data, y._data)
x **= b
assert_equal(x._mask, y._mask)
assert_almost_equal(x, y)
assert_almost_equal(x._data, y._data)
def test_power_w_broadcasting(self):
# Test power w/ broadcasting
a2 = np.array([[1., 2., 3.], [4., 5., 6.]])
a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]])
b1 = np.array([2, 4, 3])
b2 = np.array([b1, b1])
b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]])
ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]],
mask=[[1, 1, 0], [0, 1, 1]])
# No broadcasting, base & exp w/ mask
test = a2m ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# No broadcasting, base w/ mask, exp w/o mask
test = a2m ** b2
assert_equal(test, ctrl)
assert_equal(test.mask, a2m.mask)
# No broadcasting, base w/o mask, exp w/ mask
test = a2 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, b2m.mask)
ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]],
mask=[[0, 1, 0], [0, 1, 0]])
test = b1 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
test = b2m ** b1
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_where(self):
# Test the where function
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
d = where(xm > 2, xm, -9)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
assert_equal(d._mask, xm._mask)
d = where(xm > 2, -9, ym)
assert_equal(d, [5., 0., 3., 2., -1., -9.,
-9., -10., -9., 1., 0., -9.])
assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0])
d = where(xm > 2, xm, masked)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
tmp = xm._mask.copy()
tmp[(xm <= 2).filled(True)] = True
assert_equal(d._mask, tmp)
ixm = xm.astype(int)
d = where(ixm > 2, ixm, masked)
assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3])
assert_equal(d.dtype, ixm.dtype)
def test_where_object(self):
a = np.array(None)
b = masked_array(None)
r = b.copy()
assert_equal(np.ma.where(True, a, a), r)
assert_equal(np.ma.where(True, b, b), r)
def test_where_with_masked_choice(self):
x = arange(10)
x[3] = masked
c = x >= 8
# Set False to masked
z = where(c, x, masked)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is masked)
assert_(z[7] is masked)
assert_(z[8] is not masked)
assert_(z[9] is not masked)
assert_equal(x, z)
# Set True to masked
z = where(c, masked, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
def test_where_with_masked_condition(self):
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
x = arange(1, 6)
x[-1] = masked
y = arange(1, 6) * 10
y[2] = masked
c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0])
cm = c.filled(1)
z = where(c, x, y)
zm = where(cm, x, y)
assert_equal(z, zm)
assert_(getmask(zm) is nomask)
assert_equal(zm, [1, 2, 3, 40, 50])
z = where(c, masked, 1)
assert_equal(z, [99, 99, 99, 1, 1])
z = where(c, 1, masked)
assert_equal(z, [99, 1, 1, 99, 99])
def test_where_type(self):
# Test the type conservation with where
x = np.arange(4, dtype=np.int32)
y = np.arange(4, dtype=np.float32) * 2.2
test = where(x > 1.5, y, x).dtype
control = np.find_common_type([np.int32, np.float32], [])
assert_equal(test, control)
def test_choose(self):
# Test choose
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
chosen = choose([2, 3, 1, 0], choices)
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='clip')
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='wrap')
assert_equal(chosen, array([20, 1, 12, 3]))
# Check with some masked indices
indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([99, 1, 12, 99]))
assert_equal(chosen.mask, [1, 0, 0, 1])
# Check with some masked choices
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([20, 31, 12, 3]))
assert_equal(chosen.mask, [1, 0, 0, 1])
def test_choose_with_out(self):
# Test choose with an explicit out keyword
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
store = empty(4, dtype=int)
chosen = choose([2, 3, 1, 0], choices, out=store)
assert_equal(store, array([20, 31, 12, 3]))
self.assertTrue(store is chosen)
# Check with some masked indices + out
store = empty(4, dtype=int)
indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([99, 31, 12, 99]))
assert_equal(store.mask, [1, 0, 0, 1])
# Check with some masked choices + out ina ndarray !
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
store = empty(4, dtype=int).view(ndarray)
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([999999, 31, 12, 999999]))
def test_reshape(self):
a = arange(10)
a[0] = masked
# Try the default
b = a.reshape((5, 2))
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ arguments as list instead of tuple
b = a.reshape(5, 2)
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ order
b = a.reshape((5, 2), order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
# Try w/ order
b = a.reshape(5, 2, order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
c = np.reshape(a, (2, 5))
self.assertTrue(isinstance(c, MaskedArray))
assert_equal(c.shape, (2, 5))
self.assertTrue(c[0, 0] is masked)
self.assertTrue(c.flags['C'])
def test_make_mask_descr(self):
# Test make_mask_descr
# Flexible
ntype = [('a', np.float), ('b', np.float)]
test = make_mask_descr(ntype)
assert_equal(test, [('a', np.bool), ('b', np.bool)])
# Standard w/ shape
ntype = (np.float, 2)
test = make_mask_descr(ntype)
assert_equal(test, (np.bool, 2))
# Standard standard
ntype = np.float
test = make_mask_descr(ntype)
assert_equal(test, np.dtype(np.bool))
# Nested
ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])]
test = make_mask_descr(ntype)
control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])])
assert_equal(test, control)
# Named+ shape
ntype = [('a', (np.float, 2))]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([('a', (np.bool, 2))]))
# 2 names
ntype = [(('A', 'a'), float)]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([(('A', 'a'), bool)]))
def test_make_mask(self):
# Test make_mask
# w/ a list as an input
mask = [0, 1]
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a ndarray as an input
mask = np.array([0, 1], dtype=np.bool)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a flexible-type ndarray as an input - use default
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [1, 1])
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, mdtype)
assert_equal(test, mask)
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.float), ('b', np.float)]
bdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, bdtype)
assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype))
def test_mask_or(self):
# Initialize
mtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype)
# Test using nomask as input
test = mask_or(mask, nomask)
assert_equal(test, mask)
test = mask_or(nomask, mask)
assert_equal(test, mask)
# Using False as input
test = mask_or(mask, False)
assert_equal(test, mask)
# Using True as input. Won't work, but keep it for the kicks
# test = mask_or(mask, True)
# control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype)
# assert_equal(test, control)
# Using another array w / the same dtype
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype)
test = mask_or(mask, other)
control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype)
assert_equal(test, control)
# Using another array w / a different dtype
othertype = [('A', np.bool), ('B', np.bool)]
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype)
try:
test = mask_or(mask, other)
except ValueError:
pass
# Using nested arrays
dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])]
amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)
bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)
cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)
assert_equal(mask_or(amask, bmask), cntrl)
def test_flatten_mask(self):
# Tests flatten mask
# Standarad dtype
mask = np.array([0, 0, 1], dtype=np.bool)
assert_equal(flatten_mask(mask), mask)
# Flexible dtype
mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
test = flatten_mask(mask)
control = np.array([0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
data = [(0, (0, 0)), (0, (0, 1))]
mask = np.array(data, dtype=mdtype)
test = flatten_mask(mask)
control = np.array([0, 0, 0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
def test_on_ndarray(self):
# Test functions on ndarrays
a = np.array([1, 2, 3, 4])
m = array(a, mask=False)
test = anom(a)
assert_equal(test, m.anom())
test = reshape(a, (2, 2))
assert_equal(test, m.reshape(2, 2))
def test_compress(self):
# Test compress function on ndarray and masked array
# Address Github #2495.
arr = np.arange(8)
arr.shape = 4, 2
cond = np.array([True, False, True, True])
control = arr[[0, 2, 3]]
test = np.ma.compress(cond, arr, axis=0)
assert_equal(test, control)
marr = np.ma.array(arr)
test = np.ma.compress(cond, marr, axis=0)
assert_equal(test, control)
def test_compressed(self):
# Test ma.compressed function.
# Address gh-4026
a = np.ma.array([1, 2])
test = np.ma.compressed(a)
assert_(type(test) is np.ndarray)
# Test case when input data is ndarray subclass
class A(np.ndarray):
pass
a = np.ma.array(A(shape=0))
test = np.ma.compressed(a)
assert_(type(test) is A)
# Test that compress flattens
test = np.ma.compressed([[1],[2]])
assert_equal(test.ndim, 1)
test = np.ma.compressed([[[[[1]]]]])
assert_equal(test.ndim, 1)
# Test case when input is MaskedArray subclass
class M(MaskedArray):
pass
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test.ndim, 1)
# with .compessed() overriden
class M(MaskedArray):
def compressed(self):
return 42
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test, 42)
class TestMaskedFields(TestCase):
def setUp(self):
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = ['one', 'two', 'three', 'four', 'five']
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mdtype = [('a', bool), ('b', bool), ('c', bool)]
mask = [0, 1, 0, 0, 1]
base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype)
def test_set_records_masks(self):
base = self.data['base']
mdtype = self.data['mdtype']
# Set w/ nomask or masked
base.mask = nomask
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = masked
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ simple boolean
base.mask = False
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = True
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ list
base.mask = [0, 0, 0, 1, 1]
assert_equal_records(base._mask,
np.array([(x, x, x) for x in [0, 0, 0, 1, 1]],
dtype=mdtype))
def test_set_record_element(self):
# Check setting an element of a record)
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[0] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 2, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'two', 'three', 'four', 'five']))
def test_set_record_slice(self):
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[:3] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 3, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'pi', 'pi', 'four', 'five']))
def test_mask_element(self):
"Check record access"
base = self.data['base']
base[0] = masked
for n in ('a', 'b', 'c'):
assert_equal(base[n].mask, [1, 1, 0, 0, 1])
assert_equal(base[n]._data, base._data[n])
def test_getmaskarray(self):
# Test getmaskarray on flexible dtype
ndtype = [('a', int), ('b', float)]
test = empty(3, dtype=ndtype)
assert_equal(getmaskarray(test),
np.array([(0, 0), (0, 0), (0, 0)],
dtype=[('a', '|b1'), ('b', '|b1')]))
test[:] = masked
assert_equal(getmaskarray(test),
np.array([(1, 1), (1, 1), (1, 1)],
dtype=[('a', '|b1'), ('b', '|b1')]))
def test_view(self):
# Test view w/ flexible dtype
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
# Transform globally to simple dtype
test = a.view(float)
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
# Transform globally to dty
test = a.view((float, 2))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)
a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0])),
dtype=[('a', bool), ('b', bool)])
# No mask
self.assertTrue(isinstance(a[1], MaskedArray))
# One element masked
self.assertTrue(isinstance(a[0], MaskedArray))
assert_equal_records(a[0]._data, a._data[0])
assert_equal_records(a[0]._mask, a._mask[0])
# All element masked
self.assertTrue(isinstance(a[-2], MaskedArray))
assert_equal_records(a[-2]._data, a._data[-2])
assert_equal_records(a[-2]._mask, a._mask[-2])
def test_setitem(self):
# Issue 4866: check that one can set individual items in [record][col]
# and [col][record] order
ndtype = np.dtype([('a', float), ('b', int)])
ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype)
ma['a'][1] = 3.0
assert_equal(ma['a'], np.array([1.0, 3.0]))
ma[1]['a'] = 4.0
assert_equal(ma['a'], np.array([1.0, 4.0]))
# Issue 2403
mdtype = np.dtype([('a', bool), ('b', bool)])
# soft mask
control = np.array([(False, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a[0]['a'] = 2
assert_equal(a.mask, control)
# hard mask
control = np.array([(True, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a[0]['a'] = 2
assert_equal(a.mask, control)
def test_element_len(self):
# check that len() works for mvoid (Github issue #576)
for rec in self.data['base']:
assert_equal(len(rec), len(self.data['ddtype']))
class TestMaskedView(TestCase):
def setUp(self):
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
self.data = (data, a, controlmask)
def test_view_to_nothing(self):
(data, a, controlmask) = self.data
test = a.view()
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test._data, a._data)
assert_equal(test._mask, a._mask)
def test_view_to_type(self):
(data, a, controlmask) = self.data
test = a.view(np.ndarray)
self.assertTrue(not isinstance(test, MaskedArray))
assert_equal(test, a._data)
assert_equal_records(test, data.view(a.dtype).squeeze())
def test_view_to_simple_dtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view(float)
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
def test_view_to_flexible_dtype(self):
(data, a, controlmask) = self.data
test = a.view([('A', float), ('B', float)])
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'])
assert_equal(test['B'], a['b'])
test = a[0].view([('A', float), ('B', float)])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][0])
assert_equal(test['B'], a['b'][0])
test = a[-1].view([('A', float), ('B', float)])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][-1])
assert_equal(test['B'], a['b'][-1])
def test_view_to_subdtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
# View on 1 masked element
test = a[0].view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data[0])
assert_equal(test.mask, (1, 0))
# View on 1 unmasked element
test = a[-1].view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data[-1])
def test_view_to_dtype_and_type(self):
(data, a, controlmask) = self.data
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
self.assertTrue(not isinstance(test, MaskedArray))
def test_masked_array():
a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])
assert_equal(np.argwhere(a), [[1], [3]])
def test_append_masked_array():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_equal([4,3,2], value=2)
result = np.ma.append(a, b)
expected_data = [1, 2, 3, 4, 3, 2]
expected_mask = [False, True, False, False, False, True]
assert_array_equal(result.data, expected_data)
assert_array_equal(result.mask, expected_mask)
a = np.ma.masked_all((2,2))
b = np.ma.ones((3,1))
result = np.ma.append(a, b)
expected_data = [1] * 3
expected_mask = [True] * 4 + [False] * 3
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
result = np.ma.append(a, b, axis=None)
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
def test_append_masked_array_along_axis():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
# When `axis` is specified, `values` must have the correct shape.
assert_raises(ValueError, np.ma.append, a, b, axis=0)
result = np.ma.append(a[np.newaxis,:], b, axis=0)
expected = np.ma.arange(1, 10)
expected[[1, 6]] = np.ma.masked
expected = expected.reshape((3,3))
assert_array_equal(result.data, expected.data)
assert_array_equal(result.mask, expected.mask)
def test_default_fill_value_complex():
# regression test for Python 3, where 'unicode' was not defined
assert default_fill_value(1 + 1j) == 1.e20 + 0.0j
###############################################################################
if __name__ == "__main__":
run_module_suite()
| pyparallel/numpy | numpy/ma/tests/test_core.py | Python | bsd-3-clause | 161,025 |
from chaco.api import Plot, ArrayPlotData, PlotAxis
from traits.api import HasTraits, Instance, Bool, Enum
class FrequencyPlot(HasTraits):
plot = Instance(Plot)
plotdata = Instance(ArrayPlotData, ())
def __init__(self, model):
super(FrequencyPlot, self).__init__(model=model)
model.set_plot(self)
def _plot_default(self):
plot = Plot(self.plotdata)
return plot
def update_plotdata(self):
self.plotdata.set_data("response", self.model.enclosures[0].dBmag)
def get_plot_component(self):
self.plotdata.set_data("index", self.model.F)
self.plotdata.set_data("response", self.model.enclosures[0].dBmag)
self.plot.plot(("index", "response"), type="line", index_scale="log")
left = PlotAxis(orientation='left',
title='response (dB)',
mapper=self.plot.value_mapper,
component=self.plot)
bottom = PlotAxis(orientation='bottom',
title='Frequency (Hz)',
mapper=self.plot.index_mapper,
component=self.plot)
self.plot.underlays.append(left)
self.plot.underlays.append(bottom)
return self.plot
| kjordahl/subwoofer | frequency_plot.py | Python | gpl-3.0 | 1,257 |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from six.moves.urllib import parse as urlparse
from txlib.http import exceptions
from txlib.http.auth import AnonymousAuth
from txlib.utils import _logger
class BaseRequest(object):
"""Base class for http request classes."""
errors = {
400: exceptions.RequestError,
401: exceptions.AuthorizationError,
403: exceptions.AuthenticationError,
404: exceptions.NotFoundError,
409: exceptions.ConflictError,
}
success = {
200: "OK",
201: "Created",
204: "Deleted",
}
error_messages = {
400: "Bad request: %s",
401: "Authorization is required: %s",
403: "Authentication error: %s",
404: "Entity was not found: %s",
409: "Error with the request: %s",
}
default_scheme = 'https'
def __init__(self, hostname, auth=AnonymousAuth()):
"""Initializer for the base class.
Save the hostname to use for all requests as well as any
authentication info needed.
Args:
hostname: The host for the requests.
auth: The authentication info needed for any requests.
"""
self._hostname = self._construct_full_hostname(hostname)
_logger.debug("Hostname is %s" % self._hostname)
self._auth_info = auth
def _construct_full_hostname(self, hostname):
"""Create a full (scheme included) hostname from the argument given.
Only HTTP and HTTP+SSL protocols are allowed.
Args:
hostname: The hostname to use.
Returns:
The full hostname.
Raises:
ValueError: A not supported protocol is used.
"""
if hostname.startswith(('http://', 'https://', )):
return hostname
if '://' in hostname:
protocol, host = hostname.split('://', 1)
raise ValueError('Protocol %s is not supported.' % protocol)
return '://'.join([self.default_scheme, hostname, ])
def _construct_full_url(self, path):
"""Construct the full url from the host and the path parts."""
return urlparse.urljoin(self._hostname, path)
def _error_message(self, code, msg):
"""Return the message that corresponds to the
request (status code and error message) specified.
Args:
`code`: The http status code.
`msg`: The message to display.
Returns:
The error message for the code given.
"""
return self.error_messages[code] % msg
def _exception_for(self, code):
"""Return the exception class suitable for the specified HTTP
status code.
Raises:
UnknownError: The HTTP status code is not one of the knowns.
"""
if code in self.errors:
return self.errors[code]
elif 500 <= code < 599:
return exceptions.RemoteServerError
else:
return exceptions.UnknownError
| transifex/transifex-python-library | txlib/http/base.py | Python | lgpl-3.0 | 3,037 |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from functools import partial
from flask import request
from wtforms.fields import BooleanField, HiddenField, IntegerField, SelectField, StringField
from wtforms.validators import DataRequired, InputRequired, NumberRange, Optional, ValidationError
from indico.modules.categories.models.categories import Category, EventMessageMode
from indico.modules.categories.util import get_image_data, get_visibility_options
from indico.modules.events import Event
from indico.modules.events.fields import IndicoThemeSelectField
from indico.modules.events.models.events import EventType
from indico.util.i18n import _
from indico.web.forms.base import IndicoForm
from indico.web.forms.fields import (AccessControlListField, EditableFileField, EmailListField, HiddenFieldList,
IndicoEnumSelectField, IndicoMarkdownField, IndicoProtectionField,
IndicoTimezoneSelectField, MultipleItemsField, PrincipalListField)
from indico.web.forms.widgets import HiddenCheckbox, SwitchWidget
class CategorySettingsForm(IndicoForm):
BASIC_FIELDS = ('title', 'description', 'timezone', 'lecture_theme', 'meeting_theme', 'visibility',
'suggestions_disabled', 'event_creation_notification_emails', 'notify_managers')
EVENT_HEADER_FIELDS = ('event_message_mode', 'event_message')
title = StringField(_("Title"), [DataRequired()])
description = IndicoMarkdownField(_("Description"))
timezone = IndicoTimezoneSelectField(_("Timezone"), [DataRequired()],
description=_("Default timezone event lists will show up in. It will also be "
"used as a default for new events."))
lecture_theme = IndicoThemeSelectField(_("Theme for Lectures"), [DataRequired()], event_type=EventType.lecture,
description=_("Default timetable theme used for lecture events"))
meeting_theme = IndicoThemeSelectField(_("Theme for Meetings"), [DataRequired()], event_type=EventType.meeting,
description=_("Default timetable theme used for meeting events"))
suggestions_disabled = BooleanField(_('Disable Suggestions'), widget=SwitchWidget(),
description=_("Enable this if you don't want Indico to suggest this category as"
" a possible addition to a user's favourites."))
event_message_mode = IndicoEnumSelectField(_("Message Type"), enum=EventMessageMode,
default=EventMessageMode.disabled,
description=_("This message will show up at the top of every event page "
"in this category"))
event_message = IndicoMarkdownField(_("Content"))
notify_managers = BooleanField(_("Notify managers"), widget=SwitchWidget(),
description=_("Whether to send email notifications to all managers of this category "
"when an event is created inside it or in any of its subcategories."))
event_creation_notification_emails = EmailListField(_("Notification E-mails"),
description=_("List of emails that will receive a notification "
"every time a new event is created inside the "
"category or one of its subcategories. "
"One email address per line."))
class CategoryIconForm(IndicoForm):
icon = EditableFileField("Icon", accepted_file_types='image/jpeg,image/jpg,image/png,image/gif',
add_remove_links=False, handle_flashes=True, get_metadata=partial(get_image_data, 'icon'),
description=_("Small icon that will show up next to category names in overview pages. "
"Will be automatically resized to 16x16 pixels. This may involve loss of "
"image quality, so try to upload images as close as possible to those "
"dimensions."))
class CategoryLogoForm(IndicoForm):
logo = EditableFileField("Logo", accepted_file_types='image/jpeg,image/jpg,image/png,image/gif',
add_remove_links=False, handle_flashes=True, get_metadata=partial(get_image_data, 'logo'),
description=_("Logo that will show up next to the category description. Will be "
"automatically resized to at most 200x200 pixels."))
class CategoryProtectionForm(IndicoForm):
_event_creation_fields = ('event_creation_restricted', 'event_creators', 'event_creation_notification_emails')
protection_mode = IndicoProtectionField(_('Protection mode'), protected_object=lambda form: form.protected_object)
acl = AccessControlListField(_('Access control list'), groups=True, allow_external=True, allow_networks=True,
default_text=_('Restrict access to this category'),
description=_('List of users allowed to access the category.'))
managers = PrincipalListField(_('Managers'), groups=True)
own_no_access_contact = StringField(_('No access contact'),
description=_('Contact information shown when someone lacks access to the '
'category'))
visibility = SelectField(_("Event visibility"), [Optional()], coerce=lambda x: None if x == '' else int(x),
description=_("""From which point in the category tree contents will be visible from """
"""(number of categories upwards). Applies to "Today's events" and """
"""Calendar. If the category is moved, this number will be preserved."""))
event_creation_restricted = BooleanField(_('Restricted event creation'), widget=SwitchWidget(),
description=_('Whether the event creation should be restricted '
'to a list of specific persons'))
event_creators = PrincipalListField(_('Event creators'), groups=True, allow_external=True,
description=_('Users allowed to create events in this category'))
def __init__(self, *args, **kwargs):
self.protected_object = category = kwargs.pop('category')
super(CategoryProtectionForm, self).__init__(*args, **kwargs)
self._init_visibility(category)
def _init_visibility(self, category):
self.visibility.choices = get_visibility_options(category, allow_invisible=False)
# Check if category visibility would be affected by any of the parents
real_horizon = category.real_visibility_horizon
own_horizon = category.own_visibility_horizon
if real_horizon and real_horizon.is_descendant_of(own_horizon):
self.visibility.warning = _("This category's visibility is currently limited by that of '{}'.").format(
real_horizon.title)
class CreateCategoryForm(IndicoForm):
"""Form to create a new Category"""
title = StringField(_("Title"), [DataRequired()])
description = IndicoMarkdownField(_("Description"))
class SplitCategoryForm(IndicoForm):
first_category = StringField(_('Category name #1'), [DataRequired()],
description=_('Selected events will be moved into a new sub-category with this '
'title.'))
second_category = StringField(_('Category name #2'), [DataRequired()],
description=_('Events that were not selected will be moved into a new sub-category '
'with this title.'))
event_id = HiddenFieldList()
all_selected = BooleanField(widget=HiddenCheckbox())
submitted = HiddenField()
def __init__(self, *args, **kwargs):
super(SplitCategoryForm, self).__init__(*args, **kwargs)
if self.all_selected.data:
self.event_id.data = []
self.first_category.label.text = _('Category name')
self.first_category.description = _('The events will be moved into a new sub-category with this title.')
del self.second_category
def is_submitted(self):
return super(SplitCategoryForm, self).is_submitted() and 'submitted' in request.form
class UpcomingEventsForm(IndicoForm):
max_entries = IntegerField(_('Max. events'), [InputRequired(), NumberRange(min=0)],
description=_("The maximum number of upcoming events to show. Events are sorted by "
"weight so events with a lower weight are more likely to be omitted if "
"there are too many events to show."))
entries = MultipleItemsField(_('Upcoming events'),
fields=[{'id': 'type', 'caption': _("Type"), 'required': True, 'type': 'select'},
{'id': 'id', 'caption': _("ID"), 'required': True, 'type': 'number',
'step': 1, 'coerce': int},
{'id': 'days', 'caption': _("Days"), 'required': True, 'type': 'number',
'step': 1, 'coerce': int},
{'id': 'weight', 'caption': _("Weight"), 'required': True, 'type': 'number',
'coerce': float}],
choices={'type': {'category': _('Category'),
'category_tree': _('Category & Subcategories'),
'event': _('Event')}},
description=_("Specify categories/events shown in the 'upcoming events' list on the "
"home page."))
def validate_entries(self, field):
if field.errors:
return
for entry in field.data:
if entry['days'] < 0:
raise ValidationError(_("'Days' must be a positive integer"))
if entry['type'] not in {'category', 'category_tree', 'event'}:
raise ValidationError(_('Invalid type'))
if entry['type'] in {'category', 'category_tree'} and not Category.get(entry['id'], is_deleted=False):
raise ValidationError(_('Invalid category: {}').format(entry['id']))
if entry['type'] == 'event' and not Event.get(entry['id'], is_deleted=False):
raise ValidationError(_('Invalid event: {}').format(entry['id']))
| mvidalgarcia/indico | indico/modules/categories/forms.py | Python | mit | 11,501 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.data.python.ops import unique
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class UniqueDatasetTest(test_base.DatasetTestBase):
def _testSimpleHelper(self, dtype, test_cases):
"""Test the `unique()` transformation on a list of test cases.
Args:
dtype: The `dtype` of the elements in each test case.
test_cases: A list of pairs of lists. The first component is the test
input that will be passed to the transformation; the second component
is the expected sequence of outputs from the transformation.
"""
# The `current_test_case` will be updated when we loop over `test_cases`
# below; declare it here so that the generator can capture it once.
current_test_case = []
dataset = dataset_ops.Dataset.from_generator(lambda: current_test_case,
dtype).apply(unique.unique())
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.cached_session() as sess:
for test_case, expected in test_cases:
current_test_case = test_case
sess.run(iterator.initializer)
for element in expected:
if dtype == dtypes.string:
element = compat.as_bytes(element)
self.assertAllEqual(element, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testSimpleInt(self):
for dtype in [dtypes.int32, dtypes.int64]:
self._testSimpleHelper(dtype, [
([], []),
([1], [1]),
([1, 1, 1, 1, 1, 1, 1], [1]),
([1, 2, 3, 4], [1, 2, 3, 4]),
([1, 2, 4, 3, 2, 1, 2, 3, 4], [1, 2, 4, 3]),
([[1], [1, 1], [1, 1, 1]], [[1], [1, 1], [1, 1, 1]]),
([[1, 1], [1, 1], [2, 2], [3, 3], [1, 1]], [[1, 1], [2, 2], [3, 3]]),
])
def testSimpleString(self):
self._testSimpleHelper(dtypes.string, [
([], []),
(["hello"], ["hello"]),
(["hello", "hello", "hello"], ["hello"]),
(["hello", "world"], ["hello", "world"]),
(["foo", "bar", "baz", "baz", "bar", "foo"], ["foo", "bar", "baz"]),
])
if __name__ == "__main__":
test.main()
| kobejean/tensorflow | tensorflow/contrib/data/python/kernel_tests/unique_dataset_op_test.py | Python | apache-2.0 | 3,320 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-02-26 16:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_auto_20180226_1601'),
]
operations = [
migrations.AddField(
model_name='achievement',
name='description_en',
field=models.TextField(null=True, verbose_name='description'),
),
migrations.AddField(
model_name='achievement',
name='description_it',
field=models.TextField(null=True, verbose_name='description'),
),
migrations.AddField(
model_name='achievement',
name='title_en',
field=models.CharField(max_length=50, null=True, verbose_name='title'),
),
migrations.AddField(
model_name='achievement',
name='title_it',
field=models.CharField(max_length=50, null=True, verbose_name='title'),
),
]
| flavoi/diventi | diventi/accounts/migrations/0007_auto_20180226_1708.py | Python | apache-2.0 | 1,057 |
# PHD.py
# Aaron Taylor
# Moose Abumeeiz
#
# The PHD ensures you have only positive pills
# when its in the players inventory
#
from pygame import *
from const import *
from Item import *
class PHD(Item):
"""The PHD is used to allow all positive affects on pills"""
collideable = False
pickedUp = False
tWidth = 64
tHeight = 64
| ExPHAT/binding-of-isaac | PHD.py | Python | mit | 340 |
# Copyright 2021 Alfredo de la Fuente - Avanzosc S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests import common
from odoo.tests import tagged
@tagged("post_install", "-at_install")
class TestNameCodeYearId(common.SavepointCase):
@classmethod
def setUpClass(cls):
super(TestNameCodeYearId, cls).setUpClass()
cls.event_obj = cls.env['event.event']
cls.skill_type_lang = cls.env.ref('hr_skills.hr_skill_type_lang')
cls.skill_spanish = cls.env.ref('hr_skills.hr_skill_spanish')
cls.skill_filipino = cls.env.ref('hr_skills.hr_skill_filipino')
cls.skill_type_lang.skill_language = True
cls.skill_spanish.code = 'SP'
cls.skill_filipino.code = 'FI'
def test_event_name_code_year_id(self):
vals = {'name': 'User for event lang level',
'date_begin': '2025-01-06 08:00:00',
'date_end': '2025-01-15 10:00:00',
'lang_id': self.skill_spanish.id}
event = self.event_obj.create(vals)
name = 'SP-{}-2025'.format(event.id)
self.assertEqual(event.name, name)
vals = {'date_begin': '2024-01-06 08:00:00',
'lang_id': self.skill_filipino.id}
event.write(vals)
name = 'FI-{}-2024'.format(event.id)
self.assertEqual(event.name, name)
| avanzosc/odoo-addons | event_name_code_year_id/tests/test_event_name_code_year_id.py | Python | agpl-3.0 | 1,353 |
import logging
import os
import time
from datetime import (datetime,
timedelta)
from hashlib import sha1
from datasource.bases.BaseHub import BaseHub
from datasource.DataHub import DataHub
from django.conf import settings
from treeherder.model import utils
logger = logging.getLogger(__name__)
class RefDataManager(object):
"""Model for reference data"""
def __init__(self):
procs_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'sql', 'reference.json')
master_host_config = {
"host": settings.DATABASES['default']['HOST'],
"user": settings.DATABASES['default']['USER'],
"passwd": settings.DATABASES['default'].get('PASSWORD') or '',
}
if 'OPTIONS' in settings.DATABASES['default']:
master_host_config.update(settings.DATABASES['default']['OPTIONS'])
read_host_config = {
"host": settings.DATABASES['read_only']['HOST'],
"user": settings.DATABASES['read_only']['USER'],
"passwd": settings.DATABASES['read_only'].get('PASSWORD') or '',
}
if 'OPTIONS' in settings.DATABASES['read_only']:
read_host_config.update(settings.DATABASES['read_only']['OPTIONS'])
data_source = {
'reference': {
"hub": "MySQL",
"master_host": master_host_config,
"read_host": read_host_config,
"require_host_type": True,
"default_db": settings.DATABASES['default']['NAME'],
"procs": [procs_path]
}
}
BaseHub.add_data_source(data_source)
self.dhub = DataHub.get("reference")
self.DEBUG = settings.DEBUG
# Support structure for reference data signatures
self.reference_data_signature_lookup = {}
self.build_signature_placeholders = []
# Support structures for building build platform SQL
self.build_platform_lookup = {}
self.build_where_filters = []
self.build_platform_placeholders = []
self.build_unique_platforms = []
# Support structures for building machine platform SQL
self.machine_platform_lookup = {}
self.machine_where_filters = []
self.machine_platform_placeholders = []
self.machine_unique_platforms = []
# Support structures for building job group SQL
self.job_group_lookup = {}
self.job_group_where_filters = []
self.job_group_placeholders = []
self.job_group_names_and_symbols = []
# Support structures for building job types SQL
self.job_type_lookup = {}
self.job_type_where_filters = []
self.job_type_placeholders = []
self.job_type_names_and_symbols = []
# Use this structure to map the job to the group id
self.job_type_to_group_lookup = {}
# Support structures for building product SQL
self.product_lookup = set()
self.product_where_in_list = []
self.product_placeholders = []
self.unique_products = []
# Support structures for building machine SQL
self.machine_name_lookup = set()
self.machine_where_in_list = []
self.machine_name_placeholders = []
self.machine_unique_names = []
self.machine_timestamp_update_placeholders = []
# Support structures for building option collection data structures
self.oc_hash_lookup = dict()
self.oc_where_in_list = []
self.oc_placeholders = []
self.oc_unique_collections = []
# Support structures for building option data structures
self.o_lookup = set()
self.o_placeholders = []
self.o_unique_options = []
self.o_where_in_list = []
# reference id lookup structure
self.id_lookup = {}
def disconnect(self):
self.dhub.disconnect()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.disconnect()
def execute(self, **kwargs):
return utils.retry_execute(self.dhub, logger, **kwargs)
def set_all_reference_data(self):
"""This method executes SQL to store data in all loaded reference
data structures. It returns lookup dictionaries where the key is
typically the string provided to the data structure and the value
includes the database id associated with it. Once all of the
reference data is processed, the reference data structures are
initialized to empty structures so the same class instance can be
used to process more reference data if necessary.
In general, users of this class should first iterate through job
data, calling appropriate add* class instance methods to load the
reference data once all of the data is loaded, call this method
to process the data.
"""
# This is not really an id lookup but a list of unique reference
# data signatures that can be used for subsequent queries
self.id_lookup['reference_data_signatures'] = self.process_reference_data_signatures()
# id lookup structure
self.id_lookup['build_platforms'] = self.process_build_platforms()
self.id_lookup['machine_platforms'] = self.process_machine_platforms()
# job groups need to be processed before job types so the associated
# group ids are available when the job types are stored
self.id_lookup['job_groups'] = self.process_job_groups()
self.id_lookup['job_types'] = self.process_job_types()
self.id_lookup['products'] = self.process_products()
self.id_lookup['machines'] = self.process_machines()
self.id_lookup['option_collections'] = self.process_option_collections()
self.reset_reference_data()
return self.id_lookup
def reset_reference_data(self):
"""Reset all reference data structures, this should be called after
processing data.
"""
# reference data signatures
self.reference_data_signature_lookup = {}
self.build_signature_placeholders = []
# reset build platforms
self.build_platform_lookup = {}
self.build_where_filters = []
self.build_platform_placeholders = []
self.build_unique_platforms = []
# reset machine platforms
self.machine_platform_lookup = {}
self.machine_where_filters = []
self.machine_platform_placeholders = []
self.machine_unique_platforms = []
# reset job groups
self.job_group_lookup = {}
self.job_group_where_filters = []
self.job_group_placeholders = []
self.job_group_names_and_symbols = []
self.job_type_to_group_lookup = {}
# reset job types
self.job_type_lookup = {}
self.job_type_where_filters = []
self.job_type_placeholders = []
self.job_type_names_and_symbols = []
# reset products
self.product_lookup = set()
self.product_where_in_list = []
self.product_placeholders = []
self.unique_products = []
# reset machines
self.machine_name_lookup = set()
self.machine_where_in_list = []
self.machine_name_placeholders = []
self.machine_unique_names = []
self.machine_timestamp_update_placeholders = []
# reset option collections
self.oc_hash_lookup = dict()
self.oc_where_in_list = []
self.oc_placeholders = []
self.oc_unique_collections = []
# reset options
self.o_lookup = set()
self.o_placeholders = []
self.o_unique_options = []
self.o_where_in_list = []
"""
Collection of add_* methods that take some kind of reference
data and populate a set of class instance data structures. These
methods allow a caller to iterate through a single list of
job data structures, generating cumulative sets of reference data.
"""
def add_reference_data_signature(self, name, build_system_type,
repository, reference_data):
signature = self.get_reference_data_signature(reference_data)
if signature not in self.reference_data_signature_lookup:
# No reference_data_name was provided use the signature
# in it's place, in the case of buildbot this will be the
# buildername
if name is None:
name = signature
placeholders = [name, signature]
placeholders.extend(reference_data)
placeholders.extend([int(time.time()), name, signature,
build_system_type, repository])
self.build_signature_placeholders.append(placeholders)
self.reference_data_signature_lookup[signature] = reference_data
return signature
def add_build_platform(self, os_name, platform, arch):
"""
Add build platform reference data. Requires an
operating system name, platform designator, and architecture
type.
os_name - linux | mac | win | Android | Firefox OS | ...
platform - fedora 12 | redhat 12 | 5.1.2600 | 6.1.7600 | OS X 10.7.2 | ...
architecture - x86 | x86_64 etc...
"""
os_name = os_name or 'unknown'
platform = platform or 'unknown'
arch = arch or 'unknown'
max_len = 25
os_name = os_name[0:max_len]
platform = platform[0:max_len]
arch = arch[0:max_len]
key = self._add_platform(
os_name, platform, arch,
self.build_platform_lookup,
self.build_platform_placeholders,
self.build_unique_platforms,
self.build_where_filters
)
return key
def add_machine_platform(self, os_name, platform, arch):
"""
Add machine platform reference data. Requires an
operating system name, platform designator, and architecture
type.
os_name - linux | mac | win | Android | Firefox OS | ...
platform - fedora 12 | redhat 12 | 5.1.2600 | 6.1.7600 | OS X 10.7.2 | ...
architecture - x86 | x86_64 etc...
"""
os_name = os_name or 'unknown'
platform = platform or 'unknown'
arch = arch or 'unknown'
max_len = 25
os_name = os_name[0:max_len]
platform = platform[0:max_len]
arch = arch[0:max_len]
key = self._add_platform(
os_name, platform, arch,
self.machine_platform_lookup,
self.machine_platform_placeholders,
self.machine_unique_platforms,
self.machine_where_filters
)
return key
def add_job_type(self, job_type, job_symbol, group_name, group_symbol):
"""Add job type names and symbols and job group names and symbols"""
job_type = job_type or 'unknown'
job_symbol = job_symbol or '?'
group_name = group_name or 'unknown'
group_symbol = group_symbol or '?'
max_name = 100
max_symbol = 25
job_type = job_type[0:max_name]
job_symbol = job_symbol[0:max_symbol]
group_name = group_name[0:max_name]
group_symbol = group_symbol[0:max_symbol]
self._add_name_and_symbol(
group_name, group_symbol, self.job_group_names_and_symbols,
self.job_group_placeholders, self.job_group_lookup,
self.job_group_where_filters
)
self._add_name_and_symbol(
job_type, job_symbol, self.job_type_names_and_symbols,
self.job_type_placeholders, self.job_type_lookup,
self.job_type_where_filters
)
job_key = RefDataManager.get_name_symbol_key(
job_type, job_symbol
)
group_key = RefDataManager.get_name_symbol_key(
group_name, group_symbol
)
# Use this structure to map the job to the group id
self.job_type_to_group_lookup[job_key] = {
'group_key': group_key, 'job_type': job_type,
'job_symbol': job_symbol
}
return job_key
def add_product(self, product):
"""Add product names"""
product = product or 'unknown'
product = product[0:50]
self._add_name(
product, self.product_lookup, self.product_placeholders,
self.unique_products, self.product_where_in_list
)
def _add_platform(
self,
os_name, platform, arch,
platform_lookup,
platform_placeholders,
unique_platforms,
where_filters):
"""
Internal method for adding platform information, the platform
could be a build or machine platform. The caller must provide
the appropriate instance data structures as arguments.
"""
key = RefDataManager.get_platform_key(os_name, platform, arch)
if key not in platform_lookup:
# Placeholders for the INSERT/SELECT SQL query
platform_placeholders.append(
[os_name, platform, arch, os_name, platform, arch]
)
# Placeholders for the id retrieval SELECT
unique_platforms.extend(
[os_name, platform, arch]
)
# Initializing return data structure
platform_lookup[key] = {
'id': 0,
'os_name': os_name,
'platform': platform,
'architecture': arch
}
# WHERE clause for the retrieval SELECT
where_filters.append(
"(`os_name` = %s AND `platform` = %s AND `architecture` = %s)".format(
os_name, platform, arch
)
)
return key
def _add_name(
self, name, name_lookup, name_placeholders, unique_names,
where_in_list):
"""
Internal method for adding reference data that consists of a single
name. The caller must provide the appropriate instance data
structures as arguments.
"""
if name not in name_lookup:
name_lookup.add(name)
# Placeholders for the INSERT/SELECT SQL query
name_placeholders.append(
[name, name]
)
# Placeholders for the id retrieval SELECT
unique_names.append(name)
# WHERE clause for the retrieval SELECT
where_in_list.append('%s')
def _add_name_and_symbol(
self, name, symbol, unique_names_and_symbols, name_placeholders,
name_symbol_lookup, where_filters):
"""
Internal method for adding reference data that consists of a single
name and associated character symbol. The caller must provide the
appropriate instance data structures as arguments.
"""
key = RefDataManager.get_name_symbol_key(name, symbol)
if key not in name_symbol_lookup:
# Placeholders for the INSERT/SELECT SQL query
name_placeholders.append(
[name, symbol, name, symbol]
)
# Placeholders for the id retrieval SELECT
unique_names_and_symbols.extend(
[name, symbol]
)
# Initializing return data structure
name_symbol_lookup[key] = {
'id': 0,
'name': name,
'symbol': symbol
}
# WHERE clause for the retrieval SELECT
where_filters.append(
"(`name` = %s AND `symbol` = %s)".format(name, symbol)
)
return key
def add_machine(self, machine_name, timestamp):
"""
Add machine name and timestamp. There are two timestamps stored in
the database for each machine, one associated with the first time
the machine is seen and another that acts as a heartbeat for the
machine.
"""
if machine_name not in self.machine_name_lookup:
machine_name = machine_name or 'unknown'
timestamp = timestamp or time.time()
machine_name = machine_name[0:50]
self.machine_name_lookup.add(machine_name)
# Placeholders for the INSERT/SELECT SQL query
self.machine_name_placeholders.append(
# machine_name, first_timestamp, last_timestamp,
# machine_name
[machine_name, timestamp, timestamp, machine_name]
)
# Placeholders for the id retrieval SELECT
self.machine_unique_names.append(machine_name)
# WHERE clause for the retrieval SELECT
self.machine_where_in_list.append('%s')
# NOTE: It's possible that the same machine occurs
# multiple times in names_and_timestamps with different
# timestamps. We're assuming those timestamps will be
# reasonably close to each other and the primary intent
# of storing the last_timestamp is to keep track of the
# approximate time a particular machine last reported.
self.machine_timestamp_update_placeholders.append(
[timestamp, machine_name]
)
def add_option_collection(self, option_set):
"""
Add an option collection. An option collection is made up of a
set of options. Each unique set of options is hashed, this hash
becomes the identifier for the option set. Options are stored
individually in the database, callers only interact directly with
sets of options, even when there's only on option in a set.
"""
# New set with elements in option_set but not in o_lookup
new_options = set(option_set) - self.o_lookup
if new_options:
# Extend o_lookup with new options
self.o_lookup = self.o_lookup.union(new_options)
for o in new_options:
# Prepare data structures for option insertion
self.o_placeholders.append([o, o])
self.o_unique_options.append(o)
self.o_where_in_list.append('%s')
option_collection_hash = self.get_option_collection_hash(
option_set
)
if option_collection_hash not in self.oc_hash_lookup:
# Build list of unique option collections
self.oc_hash_lookup[option_collection_hash] = option_set
return option_collection_hash
"""
The following set of process_* methods carry out the task
of SQL generation and execution using the class instance reference
data structures.
"""
def process_reference_data_signatures(self):
insert_proc = 'reference.inserts.create_reference_data_signature'
self.execute(
proc=insert_proc,
placeholders=self.build_signature_placeholders,
executemany=True,
debug_show=self.DEBUG)
return self.reference_data_signature_lookup.keys()
def process_build_platforms(self):
"""
Process the build platform reference data
"""
insert_proc = 'reference.inserts.create_build_platform'
select_proc = 'reference.selects.get_build_platforms'
return self._process_platforms(
insert_proc, select_proc,
self.build_platform_lookup,
self.build_platform_placeholders,
self.build_unique_platforms,
self.build_where_filters
)
def process_machine_platforms(self):
"""
Process the machine platform reference data
"""
insert_proc = 'reference.inserts.create_machine_platform'
select_proc = 'reference.selects.get_machine_platforms'
return self._process_platforms(
insert_proc, select_proc,
self.machine_platform_lookup,
self.machine_platform_placeholders,
self.machine_unique_platforms,
self.machine_where_filters
)
def process_job_groups(self):
"""
Process the job group reference data
"""
insert_proc = 'reference.inserts.create_job_group'
select_proc = 'reference.selects.get_job_groups'
return self._process_names_and_symbols(
insert_proc, select_proc,
self.job_group_lookup,
self.job_group_placeholders,
self.job_group_names_and_symbols,
self.job_group_where_filters
)
def process_job_types(self):
"""
Process the job type reference data
"""
insert_proc = 'reference.inserts.create_job_type'
select_proc = 'reference.selects.get_job_types'
job_type_lookup = self._process_names_and_symbols(
insert_proc, select_proc,
self.job_type_lookup,
self.job_type_placeholders,
self.job_type_names_and_symbols,
self.job_type_where_filters
)
update_placeholders = []
# Find which job_types do not have group ids
for job_key in job_type_lookup:
if not job_type_lookup[job_key]['job_group_id']:
job_data = self.job_type_to_group_lookup[job_key]
group_id = self.job_group_lookup[job_data['group_key']]['id']
update_placeholders.append(
[group_id, job_data['job_type'], job_data['job_symbol']]
)
if update_placeholders:
# Update the job types with the job group id
self.execute(
proc='reference.updates.update_job_type_group_id',
placeholders=update_placeholders,
executemany=True,
debug_show=self.DEBUG)
return job_type_lookup
def process_products(self):
"""
Process the product reference data
"""
insert_proc = 'reference.inserts.create_product'
select_proc = 'reference.selects.get_products'
return self._process_names(
insert_proc, select_proc,
self.product_where_in_list,
self.product_placeholders,
self.unique_products
)
def process_machines(self):
"""
Process the machine reference data
"""
if not self.machine_name_placeholders:
return {}
# Convert WHERE filters to string
where_in_clause = ",".join(self.machine_where_in_list)
select_proc = 'reference.selects.get_machines'
insert_proc = 'reference.inserts.create_machine'
update_proc = 'reference.updates.update_machine_timestamp'
self.execute(
proc=insert_proc,
placeholders=self.machine_name_placeholders,
executemany=True,
debug_show=self.DEBUG)
name_lookup = self.execute(
proc=select_proc,
placeholders=self.machine_unique_names,
replace=[where_in_clause],
key_column='name',
return_type='dict',
debug_show=self.DEBUG)
"""
There is a bug in the python mysqldb module that is triggered by the
use of an INSERT/SELECT/ON DUPLICATE KEY query with the executemany
option that results in
'TypeError: not all arguments converted during string formatting'
To circumvent this we do an explicit update to set the
last_timestamp. In parallel job execution this could lead to a
race condition where the machine timestamp is set by another
job processor but the intention of last_timestamp is to keep an
approximate time associated with the machine's last report so this
should not be a problem.
NOTE: There was a possibility of a data integrity issue caused by the
ON DUPLICATE KEY UPDATE strategy. When the ON DUPLICATE KEY clause
is executed the auto increment id will be incremented. This has
the potential to mangle previous stored machine_ids. This would
be bad...
"""
self.execute(
proc=update_proc,
placeholders=self.machine_timestamp_update_placeholders,
executemany=True,
debug_show=self.DEBUG)
return name_lookup
def process_option_collections(self):
"""
Process option collection data
"""
# Store options not seen yet
o_where_in_clause = ",".join(self.o_where_in_list)
option_id_lookup = self._get_or_create_options(
self.o_placeholders, self.o_unique_options, o_where_in_clause
)
# Get the list of option collection placeholders
for oc_hash in self.oc_hash_lookup:
for o in self.oc_hash_lookup[oc_hash]:
self.oc_placeholders.append([
oc_hash, option_id_lookup[o]['id'], oc_hash,
option_id_lookup[o]['id']
])
if not self.oc_placeholders:
return {}
self.execute(
proc='reference.inserts.create_option_collection',
placeholders=self.oc_placeholders,
executemany=True,
debug_show=self.DEBUG)
return self.oc_hash_lookup
def _process_platforms(
self, insert_proc, select_proc, platform_lookup,
platform_placeholders, unique_platforms, where_filters):
"""
Internal method for processing either build or machine platforms.
The caller is required to provide the appropriate data structures
depending on what type of platform is being processed.
"""
if where_filters:
self.execute(
proc=insert_proc,
placeholders=platform_placeholders,
executemany=True,
debug_show=self.DEBUG)
# Convert WHERE filters to string
where_in_clause = " OR ".join(where_filters)
# NOTE: This query is using master_host to insure we don't have a
# race condition with INSERT into master and SELECT new ids from
# the slave.
data_retrieved = self.execute(
proc=select_proc,
placeholders=unique_platforms,
replace=[where_in_clause],
debug_show=self.DEBUG)
for data in data_retrieved:
key = RefDataManager.get_platform_key(
data['os_name'], data['platform'], data['architecture']
)
platform_lookup[key]['id'] = int(data['id'])
return platform_lookup
def _process_names(
self, insert_proc, select_proc, where_in_list, name_placeholders,
unique_names):
"""
Internal method for processing reference data names. The caller is
required to provide the appropriate data structures for the target
reference data type.
"""
if not name_placeholders:
return {}
# Convert WHERE filters to string
where_in_clause = ",".join(where_in_list)
self.execute(
proc=insert_proc,
placeholders=name_placeholders,
executemany=True,
debug_show=self.DEBUG)
name_lookup = self.execute(
proc=select_proc,
placeholders=unique_names,
replace=[where_in_clause],
key_column='name',
return_type='dict',
debug_show=self.DEBUG)
return name_lookup
def _process_names_and_symbols(
self, insert_proc, select_proc, name_symbol_lookup,
name_symbol_placeholders, names_and_symbols, where_filters):
"""
Internal method for processing reference data names and their associated
symbols. The caller is required to provide the appropriate data
structures for the target reference data type.
"""
if where_filters:
self.execute(
proc=insert_proc,
placeholders=name_symbol_placeholders,
executemany=True,
debug_show=self.DEBUG)
# Convert WHERE filters to string
where_in_clause = " OR ".join(where_filters)
data_retrieved = self.execute(
proc=select_proc,
placeholders=names_and_symbols,
replace=[where_in_clause],
debug_show=self.DEBUG)
for data in data_retrieved:
key = RefDataManager.get_name_symbol_key(
data['name'], data['symbol']
)
name_symbol_lookup[key] = data
name_symbol_lookup[key]['id'] = int(data['id'])
return name_symbol_lookup
def get_or_create_build_platforms(self, platform_data):
"""
Get or create build platforms for a list of platform data.
See _get_or_create_platforms for data structure descriptions.
"""
insert_proc = 'reference.inserts.create_build_platform'
select_proc = 'reference.selects.get_build_platforms'
return self._get_or_create_platforms(
platform_data, insert_proc, select_proc,
self.build_platform_lookup,
self.build_platform_placeholders,
self.build_unique_platforms,
self.build_where_filters
)
def get_or_create_machine_platforms(self, platform_data):
"""
Get or create machine platforms for a list of platform data.
See _get_or_create_platforms for data structure descriptions.
"""
insert_proc = 'reference.inserts.create_machine_platform'
select_proc = 'reference.selects.get_machine_platforms'
return self._get_or_create_platforms(
platform_data, insert_proc, select_proc,
self.machine_platform_lookup,
self.machine_platform_placeholders,
self.machine_unique_platforms,
self.machine_where_filters
)
def _get_or_create_platforms(
self, platform_data, insert_proc, select_proc,
platform_lookup, platform_placeholders, unique_platforms,
where_filters):
"""
Takes a list of lists of os_name, platform, and architecture
columns and returns a dictionary to be used as a lookup for each
combination's associated id. Any platforms not found are created,
duplicate platforms are aggregated to minimize database operations.
platform_data =
[
[os_name, platform, architecture],
[os_name, platform, architecture],
...
]
returns {
"os_name-platform-architecture": {
id:id, os_name:os_name,
platform:platform,
architecture:architecture
},
"os_name-platform-architecture": {
id:id,
os_name:os_name,
platform:platform,
architecture:architecture
},
...
}
"""
for item in platform_data:
self._add_platform(
# os_name, platform, architecture
item[0], item[1], item[2],
platform_lookup, platform_placeholders,
unique_platforms, where_filters
)
return self._process_platforms(
insert_proc, select_proc,
platform_lookup,
platform_placeholders,
unique_platforms,
where_filters
)
@classmethod
def get_platform_key(cls, os_name, platform, architecture):
return "{0}-{1}-{2}".format(os_name, platform, architecture)
@classmethod
def get_name_symbol_key(cls, name, symbol):
return "{0}-{1}".format(name, symbol)
def get_or_create_job_groups(self, names):
"""
Get or create job groups given a list of job group names.
See _get_or_create_names for data structure descriptions.
"""
insert_proc = 'reference.inserts.create_job_group'
select_proc = 'reference.selects.get_job_groups'
return self._get_or_create_names_and_symbols(
names, insert_proc, select_proc,
self.job_group_names_and_symbols,
self.job_group_placeholders,
self.job_group_lookup,
self.job_group_where_filters)
def get_or_create_job_types(self, names):
"""
Get or create job types given a list of job type names.
See _get_or_create_names for data structure descriptions.
"""
insert_proc = 'reference.inserts.create_job_type'
select_proc = 'reference.selects.get_job_types'
return self._get_or_create_names_and_symbols(
names, insert_proc, select_proc,
self.job_type_names_and_symbols,
self.job_type_placeholders,
self.job_type_lookup,
self.job_type_where_filters)
def get_or_create_products(self, names):
"""
Get or create products given a list of product names. See
_get_or_create_names for data structure descriptions.
"""
insert_proc = 'reference.inserts.create_product'
select_proc = 'reference.selects.get_products'
return self._get_or_create_names(
names, insert_proc, select_proc,
self.product_lookup, self.product_placeholders,
self.unique_products, self.product_where_in_list)
def get_or_create_machines(self, names_and_timestamps):
"""
Takes a list of machine names and timestamps returns a dictionary to
be used as a lookup for each machine name's id. Any names not found
are inserted into the appropriate table, duplicate machine names are
aggregated to minimize database operations.
names = [
[ machine1, time1 ],
[ machine2, time2 ],
[ machine3, time3 ],
... ]
returns {
'machine1':{'id':id, 'name':name },
'machine1':{'id':id, 'name':name },
'machine1':{'id':id, 'name':name },
...
}
"""
for item in names_and_timestamps:
# machine name, timestamp
self.add_machine(item[0], item[1])
return self.process_machines()
def _get_or_create_names(self,
names, insert_proc, select_proc,
name_lookup, where_in_list, name_placeholders, unique_names):
"""
Takes a list of names and returns a dictionary to be used as a
lookup for each name's id. Any names not found are inserted into
the appropriate table, duplicate platforms are aggregated to
minimize database operations.
names = [ name1, name2, name3 ... ]
returns { 'name1':id, 'name2':id, 'name3':id, ... }
"""
for name in names:
self._add_name(
name, name_lookup, name_placeholders,
unique_names, where_in_list
)
return self._process_names(
insert_proc, select_proc, where_in_list, name_placeholders,
unique_names
)
def _get_or_create_names_and_symbols(
self, data, insert_proc, select_proc, names_and_symbols, placeholders,
name_symbol_lookup, where_filters):
"""
Takes a list of names and returns a dictionary to be used as a
lookup for each name's id. Any names not found are inserted into
the appropriate table, duplicate platforms are aggregated to
minimize database operations.
names = [
[name1, symbol1],
[name2, symbol2],
[name3, symbol3],
...
]
returns { 'name1':id, 'name2':id, 'name3':id, ... }
"""
for name_symbol in data:
self._add_name_and_symbol(
name_symbol[0], name_symbol[1], names_and_symbols,
placeholders, name_symbol_lookup, where_filters
)
return self._process_names_and_symbols(
insert_proc, select_proc, name_symbol_lookup, placeholders,
names_and_symbols, where_filters
)
def get_option_collection_hash(self, options):
"""returns an option_collection_hash given a list of options"""
options = sorted(list(options))
sha_hash = sha1()
# equivalent to loop over the options and call sha_hash.update()
sha_hash.update(''.join(options))
return sha_hash.hexdigest()
def get_or_create_option_collection(self, option_collections):
"""
Get or create option collections for each list of options provided.
[
[ option1, option2, option3 ],
...
]
"""
# Build set of unique options
for option_set in option_collections:
self.add_option_collection(option_set)
return self.process_option_collections()
def _get_or_create_options(
self, option_placeholders, unique_options, where_in_clause):
if not option_placeholders:
return {}
insert_proc = 'reference.inserts.create_option'
select_proc = 'reference.selects.get_options'
self.execute(
proc=insert_proc,
placeholders=option_placeholders,
executemany=True,
debug_show=self.DEBUG)
option_lookup = self.execute(
proc=select_proc,
placeholders=unique_options,
replace=[where_in_clause],
key_column='name',
return_type='dict',
debug_show=self.DEBUG)
return option_lookup
def get_db_name(self):
"""The name of the database holding the refdata tables"""
return self.dhub.conf["default_db"]
def get_all_option_collections(self):
"""
Returns all option collections in the following data structure
{
"hash1":{
option_collection_hash : "hash1",
opt:"opt1 opt2"
},
"hash2":{
option_collection_hash : "hash2",
opt:"opt3 opt4 opt5"
}
...
}
"""
return self.execute(
proc='reference.selects.get_all_option_collections',
debug_show=self.DEBUG,
key_column='option_collection_hash',
return_type='dict'
)
def get_repository_id(self, name):
"""get the id for the given repository"""
id_iter = self.execute(
proc='reference.selects.get_repository_id',
placeholders=[name],
debug_show=self.DEBUG,
return_type='iter')
return id_iter.get_column_data('id')
def get_repository_info(self, repository_id):
"""retrieves all the attributes of a repository"""
repo = self.execute(
proc='reference.selects.get_repository_info',
placeholders=[repository_id],
debug_show=self.DEBUG,
return_type='iter')
# retrieve the first elem from DataIterator
for r in repo:
return r
def get_all_repository_info(self):
return self.execute(
proc='reference.selects.get_all_repository_info',
debug_show=self.DEBUG,
return_type='iter')
def get_bug_numbers_list(self):
return self.execute(
proc='reference.selects.get_all_bug_numbers',
debug_show=self.DEBUG,
return_type='iter')
def delete_bugs(self, bug_ids):
"""delete a list of bugs given the ids"""
self.execute(
proc='reference.deletes.delete_bugs',
debug_show=self.DEBUG,
replace=[",".join(["%s"] * len(bug_ids))],
placeholders=list(bug_ids))
def update_bugscache(self, bug_list):
"""
Add content to the bugscache, updating/deleting/inserting
when necessary.
"""
bugs_stored = set(bug["id"] for bug in self.get_bug_numbers_list())
old_bugs = bugs_stored.difference(set(bug['id']
for bug in bug_list))
if old_bugs:
self.delete_bugs(old_bugs)
placeholders = []
for bug in bug_list:
# keywords come as a list of values, we need a string instead
bug['keywords'] = ",".join(bug['keywords'])
placeholders.append([bug.get(field, None) for field in (
'id', 'status', 'resolution', 'summary',
'cf_crash_signature', 'keywords', 'op_sys', 'last_change_time', 'id')])
self.execute(
proc='reference.inserts.create_bugscache',
placeholders=placeholders,
executemany=True,
debug_show=self.DEBUG)
# removing the first placeholder because is not used in the update query
del placeholders[0]
self.execute(
proc='reference.updates.update_bugscache',
placeholders=placeholders,
executemany=True,
debug_show=self.DEBUG)
def get_bug_suggestions(self, search_term):
"""
Retrieves two groups of bugs:
1) "Open recent bugs" (ie bug is not resolved & was modified in last 3 months)
2) "All other bugs" (ie all closed bugs + open bugs that were not modified in the last 3 months).
"""
max_size = 50
# 90 days ago
time_limit = datetime.now() - timedelta(days=90)
# Wrap search term so it is used as a phrase in the full-text search.
search_term_fulltext = search_term.join('""')
# Substitute escape and wildcard characters, so the search term is used
# literally in the LIKE statement.
search_term_like = search_term.replace('=', '==').replace('%', '=%').replace('_', '=_')
open_recent = self.execute(
proc='reference.selects.get_open_recent_bugs',
placeholders=[search_term_fulltext, search_term_like, time_limit, max_size + 1],
debug_show=self.DEBUG)
all_others = self.execute(
proc='reference.selects.get_all_others_bugs',
placeholders=[search_term_fulltext, search_term_like, time_limit, max_size + 1],
debug_show=self.DEBUG)
return dict(open_recent=open_recent, all_others=all_others)
def get_reference_data_signature(self, signature_properties):
sh = sha1()
sh.update(''.join(map(lambda x: str(x), signature_properties)))
return sh.hexdigest()
def get_reference_data_signature_names(self, signatures):
reference_data = {}
if signatures:
reference_data_signatures_where_in_clause = [
','.join(['%s'] * len(signatures))
]
reference_data = self.execute(
proc="reference.selects.get_reference_data_signature_names",
placeholders=signatures,
replace=reference_data_signatures_where_in_clause,
debug_show=self.DEBUG,
key_column='signature',
return_type='dict')
return reference_data
def get_reference_data(self, signatures):
# use job_id to map to reference data
reference_data = {}
if signatures:
reference_data_signatures_where_in_clause = [','.join(['%s'] * len(signatures))]
reference_data = self.execute(
proc="reference.selects.get_reference_data",
placeholders=signatures,
replace=reference_data_signatures_where_in_clause,
debug_show=self.DEBUG,
key_column='signature',
return_type='dict')
return reference_data
| avih/treeherder | treeherder/model/derived/refdata.py | Python | mpl-2.0 | 44,367 |
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from plaid.model.account_identity import AccountIdentity
globals()['AccountIdentity'] = AccountIdentity
class ProcessorIdentityGetResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'account': (AccountIdentity,), # noqa: E501
'request_id': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'account': 'account', # noqa: E501
'request_id': 'request_id', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, account, request_id, *args, **kwargs): # noqa: E501
"""ProcessorIdentityGetResponse - a model defined in OpenAPI
Args:
account (AccountIdentity):
request_id (str): A unique identifier for the request, which can be used for troubleshooting. This identifier, like all Plaid identifiers, is case sensitive.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.account = account
self.request_id = request_id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| plaid/plaid-python | plaid/model/processor_identity_get_response.py | Python | mit | 7,268 |
# Django settings for restaurant project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'local.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'qlc(*ep9zuu-7=bmvzg=f3=ai*9lfwsfjf#fh#2jy&%yf1hly@'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'restaurant.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'restaurant.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'clausula',
'brew',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| dekoza/django-clausula | example_project/restaurant/settings.py | Python | bsd-3-clause | 5,265 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import mock
from airflow.providers.google.cloud.operators.pubsub import (
PubSubCreateSubscriptionOperator, PubSubCreateTopicOperator, PubSubDeleteSubscriptionOperator,
PubSubDeleteTopicOperator, PubSubPublishMessageOperator,
)
TASK_ID = 'test-task-id'
TEST_PROJECT = 'test-project'
TEST_TOPIC = 'test-topic'
TEST_SUBSCRIPTION = 'test-subscription'
TEST_MESSAGES = [
{
'data': b'Hello, World!',
'attributes': {'type': 'greeting'}
},
{'data': b'Knock, knock'},
{'attributes': {'foo': ''}}]
TEST_POKE_INTERVAl = 0
class TestPubSubTopicCreateOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_failifexists(self, mock_hook):
operator = PubSubCreateTopicOperator(
task_id=TASK_ID,
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
fail_if_exists=True
)
operator.execute(None)
mock_hook.return_value.create_topic.assert_called_once_with(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
fail_if_exists=True,
labels=None,
message_storage_policy=None,
kms_key_name=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_succeedifexists(self, mock_hook):
operator = PubSubCreateTopicOperator(
task_id=TASK_ID,
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
fail_if_exists=False
)
operator.execute(None)
mock_hook.return_value.create_topic.assert_called_once_with(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
fail_if_exists=False,
labels=None,
message_storage_policy=None,
kms_key_name=None,
retry=None,
timeout=None,
metadata=None
)
class TestPubSubTopicDeleteOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_execute(self, mock_hook):
operator = PubSubDeleteTopicOperator(
task_id=TASK_ID,
project_id=TEST_PROJECT,
topic=TEST_TOPIC
)
operator.execute(None)
mock_hook.return_value.delete_topic.assert_called_once_with(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
fail_if_not_exists=False,
retry=None,
timeout=None,
metadata=None
)
class TestPubSubSubscriptionCreateOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_execute(self, mock_hook):
operator = PubSubCreateSubscriptionOperator(
task_id=TASK_ID,
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=TEST_SUBSCRIPTION
)
mock_hook.return_value.create_subscription.return_value = TEST_SUBSCRIPTION
response = operator.execute(None)
mock_hook.return_value.create_subscription.assert_called_once_with(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=TEST_SUBSCRIPTION,
subscription_project_id=None,
ack_deadline_secs=10,
fail_if_exists=False,
push_config=None,
retain_acked_messages=None,
message_retention_duration=None,
labels=None,
retry=None,
timeout=None,
metadata=None,
)
self.assertEqual(response, TEST_SUBSCRIPTION)
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_execute_different_project_ids(self, mock_hook):
another_project = 'another-project'
operator = PubSubCreateSubscriptionOperator(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=TEST_SUBSCRIPTION,
subscription_project_id=another_project,
task_id=TASK_ID
)
mock_hook.return_value.create_subscription.return_value = TEST_SUBSCRIPTION
response = operator.execute(None)
mock_hook.return_value.create_subscription.assert_called_once_with(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=TEST_SUBSCRIPTION,
subscription_project_id=another_project,
ack_deadline_secs=10,
fail_if_exists=False,
push_config=None,
retain_acked_messages=None,
message_retention_duration=None,
labels=None,
retry=None,
timeout=None,
metadata=None
)
self.assertEqual(response, TEST_SUBSCRIPTION)
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_execute_no_subscription(self, mock_hook):
operator = PubSubCreateSubscriptionOperator(
task_id=TASK_ID,
project_id=TEST_PROJECT,
topic=TEST_TOPIC
)
mock_hook.return_value.create_subscription.return_value = TEST_SUBSCRIPTION
response = operator.execute(None)
mock_hook.return_value.create_subscription.assert_called_once_with(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=None,
subscription_project_id=None,
ack_deadline_secs=10,
fail_if_exists=False,
push_config=None,
retain_acked_messages=None,
message_retention_duration=None,
labels=None,
retry=None,
timeout=None,
metadata=None,
)
self.assertEqual(response, TEST_SUBSCRIPTION)
class TestPubSubSubscriptionDeleteOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_execute(self, mock_hook):
operator = PubSubDeleteSubscriptionOperator(
task_id=TASK_ID,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION
)
operator.execute(None)
mock_hook.return_value.delete_subscription.assert_called_once_with(
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
fail_if_not_exists=False,
retry=None,
timeout=None,
metadata=None
)
class TestPubSubPublishOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.pubsub.PubSubHook')
def test_publish(self, mock_hook):
operator = PubSubPublishMessageOperator(task_id=TASK_ID,
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
messages=TEST_MESSAGES)
operator.execute(None)
mock_hook.return_value.publish.assert_called_once_with(
project_id=TEST_PROJECT, topic=TEST_TOPIC, messages=TEST_MESSAGES
)
| spektom/incubator-airflow | tests/providers/google/cloud/operators/test_pubsub.py | Python | apache-2.0 | 7,949 |
# Plots are currently included as images, because example is too big to
# run on readthedocs servers
"""
Cortical depth estimation from MGDM segmentation
=================================================
This example shows how to obtain a cortical laminar depth representation from
an MGDM segmentation result with the following steps:
1. Get a segmentation result from the *tissue_classification* example
2. Extract the cortex of the left hemisphere with
:func:`nighres.brain.extract_brain_region`
3. Cortical reconstruction with CRUISE
:func:`nighres.cortex.cruise_cortex_extraction` [1]_
4. Anatomical depth estimation trough
:func:`nighres.laminar.volumetric_layering` [2]_
Important note: this example assumes you have run the tissue classification
example first (example_tissue_classification.py)
"""
############################################################################
# Import and point to previous example
# -------------------------------------
# First we import ``nighres`` and the ``os`` module to set the output directory
# Make sure to run this file in a directory you have write access to, or
# change the ``out_dir`` variable below.
import nighres
import os
in_dir = os.path.join(os.getcwd(), 'nighres_examples/tissue_classification')
out_dir = os.path.join(os.getcwd(),
'nighres_examples/cortical_depth_estimation')
############################################################################
# We also try to import Nilearn plotting functions. If Nilearn is not
# installed, plotting will be skipped.
skip_plots = False
try:
from nilearn import plotting
except ImportError:
skip_plots = True
print('Nilearn could not be imported, plotting will be skipped')
############################################################################
# Now we pull the MGDM results from previous example
segmentation = os.path.join(in_dir, 'sub001_sess1_mgdm-seg.nii.gz')
boundary_dist = os.path.join(in_dir, 'sub001_sess1_mgdm-dist.nii.gz')
max_labels = os.path.join(in_dir, 'sub001_sess1_mgdm-lbls.nii.gz')
max_probas = os.path.join(in_dir, 'sub001_sess1_mgdm-mems.nii.gz')
if not (os.path.isfile(segmentation) and os.path.isfile(boundary_dist)
and os.path.isfile(max_labels) and os.path.isfile(max_probas)) :
print('This example builds upon the example_tissue_segmentation.py one')
print('Please run it first')
exit()
###########################################################################
# Region Extraction
# ------------------
# Here we pull from the MGDM output the needed regions for cortical
# reconstruction: the GM cortex ('region'), the underlying WM (with filled
# subcortex and ventricles, 'inside') and the surrounding CSF (with masked
# regions, 'background')
cortex = nighres.brain.extract_brain_region(segmentation=segmentation,
levelset_boundary=boundary_dist,
maximum_membership=max_probas,
maximum_label=max_labels,
extracted_region='left_cerebrum',
save_data=True,
file_name='sub001_sess1_left_cerebrum',
output_dir=out_dir)
############################################################################
# .. tip:: in Nighres functions that have several outputs return a
# dictionary storing the different outputs. You can find the keys in the
# docstring by typing ``nighres.brain.mp2rage_extract_brain_region?`` or
# list them with ``cortex.keys()``
#
# To check if the extraction worked well we plot the GM and WM probabilities.
# You can also open the images stored in ``out_dir`` in
# your favourite interactive viewer and scroll through the volume.
#
# Like Nilearn, we use Nibabel SpatialImage objects to pass data internally.
# Therefore, we can directly plot the outputs using `Nilearn plotting functions
# <http://nilearn.github.io/plotting/index.html#different-plotting-functions>`_
# .
if not skip_plots:
plotting.plot_img(cortex['region_proba'],
vmin=0, vmax=1, cmap='autumn', colorbar=True,
annotate=False, draw_cross=False)
plotting.plot_img(cortex['inside_proba'],
vmin=0, vmax=1, cmap='autumn', colorbar=True,
annotate=False, draw_cross=False)
############################################################################
# .. image:: ../_static/cortical_extraction1.png
############################################################################
############################################################################
# .. image:: ../_static/cortical_extraction2.png
#############################################################################
#############################################################################
# CRUISE cortical reconstruction
# --------------------------------
# Next, we use the extracted data as input for cortex reconstruction with the
# CRUISE algorithm. CRUISE works with the membership functions as a guide and
# the WM inside mask as a (topologically spherical) starting point to grow a
# refined GM/WM boundary and CSF/GM boundary
cruise = nighres.cortex.cruise_cortex_extraction(
init_image=cortex['inside_mask'],
wm_image=cortex['inside_proba'],
gm_image=cortex['region_proba'],
csf_image=cortex['background_proba'],
normalize_probabilities=True,
save_data=True,
file_name="sub001_sess1_left_cerebrum",
output_dir=out_dir)
###########################################################################
# Now we look at the topology-constrained segmentation CRUISE created
if not skip_plots:
plotting.plot_img(cruise['cortex'],
vmin=0, vmax=2, cmap='cubehelix', colorbar=True,
annotate=False, draw_cross=False)
###########################################################################
# .. image:: ../_static/cortical_extraction3.png
###########################################################################
#############################################################################
# Cortical surface inflation
# --------------------------------
# For display purposes, we create a surface mesh from the average cortical
# CRUISE surface, which we then inflate
cortical_surface = nighres.surface.levelset_to_mesh(
levelset_image=cruise['avg'],
save_data=True,
file_name="sub001_sess1_left_cerebrum.vtk",
output_dir=out_dir)
inflated_surface = nighres.surface.surface_inflation(
surface_mesh=cortical_surface['result'],
save_data=True,
file_name="sub001_sess1_left_cerebrum.vtk",
output_dir=out_dir)
#############################################################################
###########################################################################
# Volumetric layering
# ---------------------
# Finally, we use the GM/WM boundary (GWB) and CSF/GM boundary (CGB) from
# CRUISE to compute cortical depth with a volume-preserving technique
depth = nighres.laminar.volumetric_layering(
inner_levelset=cruise['gwb'],
outer_levelset=cruise['cgb'],
n_layers=4,
save_data=True,
file_name="sub001_sess1_left_cerebrum",
output_dir=out_dir)
###########################################################################
# Now we look at the laminar depth estimates
if not skip_plots:
plotting.plot_img(depth['depth'],
vmin=0, vmax=1, cmap='autumn', colorbar=True,
annotate=False, draw_cross=False)
############################################################################
# .. image:: ../_static/cortical_extraction4.png
#############################################################################
#############################################################################
# If the example is not run in a jupyter notebook, render the plots:
if not skip_plots:
plotting.show()
#############################################################################
# References
# -----------
# .. [1] Han et al (2004) CRUISE: Cortical Reconstruction Using Implicit
# Surface Evolution, NeuroImage, vol. 23, pp. 997--1012.
# .. [2] Waehnert et al (2014) Anatomically motivated modeling of cortical
# laminae. DOI: 10.1016/j.neuroimage.2013.03.078
| nighres/nighres | examples/example_02_cortical_depth_estimation.py | Python | apache-2.0 | 8,890 |
import unittest
from ..play.trade import bf_left
from ..play.trade import bf_right
from ..play.trade import dyn_trader
class TestTrade(unittest.TestCase):
def test_play(self):
p = [4, 9, 1, 3, 8, 7, 1]
check_k_1 = [None, 5, 5, 5, 7, 7, 7]
check_k_2 = [None, None, None, 7, 12, 12, 12]
check_k_3 = [None, None, None, None, None, 6, 6]
check_k_4 = [None, None, None, None, None, None, None]
res_k_1, o_n, o_v = dyn_trader(p, 1)
self.assertEqual(check_k_1, res_k_1[0])
res_k_2, o_n, o_v = dyn_trader(p, 2)
self.assertEqual(check_k_2, res_k_2[1])
res_k_3, o_n, o_v = dyn_trader(p, 3)
self.assertEqual(check_k_3, res_k_3[2])
res_k_4, o_n, o_v = dyn_trader(p, 4)
self.assertEqual(check_k_4, res_k_4[3])
print(o_n, o_v)
| alex-am/pyalgo | pyalgo/test/test_play.py | Python | gpl-3.0 | 831 |
#!/usr/bin/python3
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
#
# Copyright (C) 2010-2012 Bryce Harrington <bryce@canonical.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import, print_function, unicode_literals
import sys
import os
import re
import binascii
from .utils.debug import (ERR, warn, dbg)
from .utils.file_io import (load_binary, load_file)
def _bytes(edid, byte1, byte2=None):
if byte2:
return edid[byte1*2:(byte2+1)*2]
else:
return edid[byte1*2:(byte1+1)*2]
def binary_to_bytecode(filename):
data = load_binary(filename)
return [ binascii.b2a_hex(data).decode("utf-8") ]
def load_edid_bytecodes(filename):
"""Loads a list of unique EDIDs from a given file.
The file could be an Xorg.0.log with multiple bytecodes, or a binary
edid retrieved from the monitor itself, or previously saved Edid data.
Returns a list of bytecode strings, suitable for use with the Edid
object constructor.
"""
if (filename is None or filename == '' or not os.path.exists(filename)):
raise Exception("Invalid filename %s" %(filename))
try:
# Try loading as a plain text file first (ala Xorg.0.log)
lines = load_file(filename)
if len(lines) < 1:
raise Exception("Invalid file %s" %(filename))
if lines[0].startswith('00ffffffffffff00'):
# Looks like a regular edid file
return [ lines.join("\n") ]
# Next, assume it's an Xorg.0.log
raw_edid = ""
re_head = re.compile("\(II\) .*\(\d+\): EDID \(in hex\):$")
re_edid = re.compile("\(II\) .*\(\d+\):\s\t([0-9a-f]{32})$")
seen_edid_header = False
edid_raw = ""
edids = []
# TODO: Make sure only unique edids are returned
for line in lines:
if re_head.search(line):
seen_edid_header = True
elif seen_edid_header:
m = re_edid.search(line)
if not m:
edids.append(edid_raw)
edid_raw = ""
seen_edid_header = False
continue
edid_raw += m.group(1)
return edids
except UnicodeDecodeError:
return binary_to_bytecode(filename)
return None
class EdidFirmware(object):
EDID_FIRMWARE_PATH = '/lib/firmware/edid'
EDID_DRM_CONF_PATH = '/etc/modprobe.d/drm-kms-helper.conf'
def __init__(self):
pass
def list(self):
for filename in os.listdir(self.EDID_FIRMWARE_PATH):
edid_path = os.path.join(self.EDID_FIRMWARE_PATH, filename)
lines = binary_to_bytecode(edid_path)
edid = Edid("\n".join(lines))
edid._origin = "firmware"
yield edid
def install(self, edid_filename):
'''Installs the named edid file into the firmware directory'''
import errno
import shutil
try:
os.makedirs(self.EDID_FIRMWARE_PATH)
except OSError as exc:
if exc.errno != errno.EEXIST:
warn("Could not mkdir %s" %(self.EDID_FIRMWARE_PATH))
return False
# Install the EDID
edid_firmware = os.path.basename(edid_filename)
try:
target = os.path.join(self.EDID_FIRMWARE_PATH, edid_firmware)
shutil.copyfile(edid_filename, target)
print("Installed %s" %(target))
return True
except:
warn("Could not install firmware")
raise
return False
def uninstall(self, edid_filename):
'''Uninstalls the named edid file from the firmware directory'''
edid_firmware = os.path.basename(edid_filename)
try:
target = os.path.join(self.EDID_FIRMWARE_PATH, edid_firmware)
os.remove(target)
print("Uninstalled %s" %(target))
return True
except:
warn("Could not uninstall %s" %(target))
raise
return False
def activate(self, edid_name):
'''Activates given edid by passing it as a kernel command line parameter'''
# TODO: Install it automatically?
#self._install(edid_name)
f = open(self.EDID_DRM_CONF_PATH, 'w')
f.write("options drm_kms_helper edid_firmware=edid/%s\n" %(edid_name))
f.close()
print("Activated %s via %s" %(edid_name, self.EDID_DRM_CONF_PATH))
return True
def deactivate(self, edid_name):
line_to_remove = "options drm_kms_helper edid_firmware=edid/%s" %(edid_name)
f = open(self.EDID_DRM_CONF_PATH, 'w')
lines = f.readlines()
f.close()
f = open(self.EDID_DRM_CONF_PATH, 'w')
for line in lines:
if line != line_to_remove:
f.write(line)
f.close()
class Edid(object):
def __init__(self, bytecode=None):
'''bytecode: multiline hexadecimal text such as from an Xorg.0.log'''
self._items = None
self._origin = None
self.edid_raw = bytecode
if self.edid_raw is not None:
# TODO: Move this to top
assert bytecode.startswith('00ffffffffffff00'), "bytecode is not valid EDID data"
self._origin = "custom"
def save(self, filename):
file = open(filename, 'wb')
file.write(self.to_binary())
file.close()
return True
def _parse(self, edid):
if edid is None:
return None
return [
("Header", _bytes(edid,0,7)),
("Manufacturer", _bytes(edid,8,9)),
("Product ID code", _bytes(edid,10,11)),
("Serial Number", _bytes(edid,12,15)),
("Week of Manufacture", _bytes(edid,16)),
("Year of Manufacture", _bytes(edid,17)),
("EDID Version", _bytes(edid,18)),
("EDID Revision", _bytes(edid,19)),
("Video input def", _bytes(edid,20)),
("Max Horiz Image(cm)", _bytes(edid,21)),
("Max Vert Image(cm)", _bytes(edid,22)),
("Gamma", _bytes(edid,23)),
("Power management", _bytes(edid,24)),
("Chromaticity", _bytes(edid,25,34)),
("Timing I", _bytes(edid,35)),
("Timing II", _bytes(edid,36)),
("Reserved Timing", _bytes(edid,37)),
("Standard Timing", _bytes(edid,38,53)),
("Horiz Active (px)", _bytes(edid,56)),
("Horiz Blanking", _bytes(edid,57)),
("Horiz high", _bytes(edid,58)),
("Vert Active", _bytes(edid,59)),
("Vert Blank", _bytes(edid,60)),
("Vert high", _bytes(edid,61)),
("Horz Sync Offset (px)", _bytes(edid,62)),
("Horiz Sync Pulse Width (px)", _bytes(edid,63)),
("Vert Sync (lines)", _bytes(edid,64)),
("high", _bytes(edid,65)),
("Horiz Image Size (mm)", _bytes(edid,66)),
("Vert Image Size (mm)", _bytes(edid,67)),
("Image Size high", _bytes(edid,68)),
("Horiz Border", _bytes(edid,69)),
("Vert Border", _bytes(edid,70)),
("Interlacing", _bytes(edid,71)),
("Descriptor Block 2", _bytes(edid,72,89)),
("Descriptor Block 3", _bytes(edid,90,107)),
("Descriptor Block 4", _bytes(edid,108,125)),
("Extension Flag", _bytes(edid,126)),
("Checksum", _bytes(edid,127)),
]
def to_hex(self):
return self.edid_raw
def to_binary(self):
return bytes.fromhex(self.edid_raw)
@property
def items(self):
if self._items is None:
self._items = self._parse(self.to_hex())
return self._items
@property
def manufacturer(self):
code = self.items[1][1] # The ascii code
bstr = str(bin(int(code, 16)))[2:] # Converted to binary
bstr = bstr.zfill(15) # Fill left side with 0's
s = [ # Extract and convert letter codes to chars
chr(int(bstr[-15:-10],2) + ord('A') - 1),
chr(int(bstr[-10:-5],2) + ord('A') - 1),
chr(int(bstr[-5:],2) + ord('A') - 1)
]
return ''.join(s)
@property
def product_id(self):
return _bytes(self.to_hex(),10,11)
@property
def serial_number(self):
return _bytes(self.to_hex(),12,15)
@property
def name(self):
return "%s:%s SN#%s %d-%d v%s.%s %s" %(
self.manufacturer,
self.items[2][1],
self.items[3][1],
int(self.items[4][1], 16),
int(self.items[5][1], 16) + 1990,
self.items[6][1],
self.items[7][1],
self._origin
)
def __str__(self):
text = ''
for field, value in self.items:
text += "%-30s: %s\n" %(field, value)
return text
if __name__ == "__main__":
edidfile = sys.argv[1]
regex = re.compile("\(II\) .*\(\d+\):\s+(.*)$")
edid_text = ""
lines = load_file(edidfile)
for line in lines.split("\n"):
m = regex.search(line)
if m:
line = m.group(1)
edid_text += line
edid = Edid(edid_text)
print(edid)
| yasoob/PythonRSSReader | venv/lib/python2.7/dist-packages/xdiagnose/edid.py | Python | mit | 10,364 |
"""Utilities for the MySQL backend"""
from django.db import connection
#in-memory cached variable
SUPPORTS_FTS = None
def supports_full_text_search():
"""True if the database engine is MyISAM"""
from askbot.models import Question
global SUPPORTS_FTS
if SUPPORTS_FTS is None:
cursor = connection.cursor()
table_name = Question._meta.db_table
cursor.execute("SHOW CREATE TABLE %s" % table_name);
data = cursor.fetchone()
if 'ENGINE=MyISAM' in data[1]:
SUPPORTS_FTS = True
else:
SUPPORTS_FTS = False
return SUPPORTS_FTS
| aavrug/askbot-devel | askbot/utils/mysql.py | Python | gpl-3.0 | 611 |
# Test cases for VHT operations with hostapd
# Copyright (c) 2014, Qualcomm Atheros, Inc.
# Copyright (c) 2013, Intel Corporation
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import os
import subprocess, time
import hwsim_utils
import hostapd
from utils import HwsimSkip
from test_dfs import wait_dfs_event
from test_ap_csa import csa_supported
def vht_supported():
cmd = subprocess.Popen(["iw", "reg", "get"], stdout=subprocess.PIPE)
reg = cmd.stdout.read()
if "@ 80)" in reg or "@ 160)" in reg:
return True
return False
def test_ap_vht80(dev, apdev):
"""VHT with 80 MHz channel width"""
try:
hapd = None
params = { "ssid": "vht",
"country_code": "FI",
"hw_mode": "a",
"channel": "36",
"ht_capab": "[HT40+]",
"ieee80211n": "1",
"ieee80211ac": "1",
"vht_oper_chwidth": "1",
"vht_oper_centr_freq_seg0_idx": "42" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
dev[0].connect("vht", key_mgmt="NONE", scan_freq="5180")
hwsim_utils.test_connectivity(dev[0], hapd)
est = dev[0].get_bss(bssid)['est_throughput']
if est != "390001":
raise Exception("Unexpected BSS est_throughput: " + est)
except Exception, e:
if isinstance(e, Exception) and str(e) == "AP startup failed":
if not vht_supported():
raise HwsimSkip("80 MHz channel not supported in regulatory information")
raise
finally:
dev[0].request("DISCONNECT")
if hapd:
hapd.request("DISABLE")
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
def test_ap_vht80_params(dev, apdev):
"""VHT with 80 MHz channel width and number of optional features enabled"""
try:
hapd = None
params = { "ssid": "vht",
"country_code": "FI",
"hw_mode": "a",
"channel": "36",
"ht_capab": "[HT40+][SHORT-GI-40][DSS_CCK-40]",
"ieee80211n": "1",
"ieee80211ac": "1",
"vht_oper_chwidth": "1",
"vht_capab": "[MAX-MPDU-11454][RXLDPC][SHORT-GI-80][TX-STBC-2BY1][RX-STBC-1][MAX-A-MPDU-LEN-EXP0]",
"vht_oper_centr_freq_seg0_idx": "42",
"require_vht": "1" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
dev[1].connect("vht", key_mgmt="NONE", scan_freq="5180",
disable_vht="1", wait_connect=False)
dev[0].connect("vht", key_mgmt="NONE", scan_freq="5180")
ev = dev[1].wait_event(["CTRL-EVENT-ASSOC-REJECT"])
if ev is None:
raise Exception("Association rejection timed out")
if "status_code=104" not in ev:
raise Exception("Unexpected rejection status code")
dev[1].request("DISCONNECT")
hwsim_utils.test_connectivity(dev[0], hapd)
except Exception, e:
if isinstance(e, Exception) and str(e) == "AP startup failed":
if not vht_supported():
raise HwsimSkip("80 MHz channel not supported in regulatory information")
raise
finally:
dev[0].request("DISCONNECT")
dev[1].request("DISCONNECT")
if hapd:
hapd.request("DISABLE")
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
dev[1].flush_scan_cache()
def test_ap_vht_20(devs, apdevs):
"""VHT and 20 MHz channel"""
dev = devs[0]
ap = apdevs[0]
try:
hapd = None
params = { "ssid": "test-vht20",
"country_code": "DE",
"hw_mode": "a",
"channel": "36",
"ieee80211n": "1",
"ieee80211ac": "1",
"ht_capab": "",
"vht_capab": "",
"vht_oper_chwidth": "0",
"vht_oper_centr_freq_seg0_idx": "0",
"supported_rates": "60 120 240 360 480 540",
"require_vht": "1",
}
hapd = hostapd.add_ap(ap['ifname'], params)
dev.connect("test-vht20", scan_freq="5180", key_mgmt="NONE")
hwsim_utils.test_connectivity(dev, hapd)
finally:
dev.request("DISCONNECT")
if hapd:
hapd.request("DISABLE")
subprocess.call(['iw', 'reg', 'set', '00'])
dev.flush_scan_cache()
def test_ap_vht_40(devs, apdevs):
"""VHT and 40 MHz channel"""
dev = devs[0]
ap = apdevs[0]
try:
hapd = None
params = { "ssid": "test-vht40",
"country_code": "DE",
"hw_mode": "a",
"channel": "36",
"ieee80211n": "1",
"ieee80211ac": "1",
"ht_capab": "[HT40+]",
"vht_capab": "",
"vht_oper_chwidth": "0",
"vht_oper_centr_freq_seg0_idx": "0",
}
hapd = hostapd.add_ap(ap['ifname'], params)
dev.connect("test-vht40", scan_freq="5180", key_mgmt="NONE")
hwsim_utils.test_connectivity(dev, hapd)
finally:
dev.request("DISCONNECT")
if hapd:
hapd.request("DISABLE")
subprocess.call(['iw', 'reg', 'set', '00'])
dev.flush_scan_cache()
def test_ap_vht_capab_not_supported(dev, apdev):
"""VHT configuration with driver not supporting all vht_capab entries"""
try:
params = { "ssid": "vht",
"country_code": "FI",
"hw_mode": "a",
"channel": "36",
"ht_capab": "[HT40+][SHORT-GI-40][DSS_CCK-40]",
"ieee80211n": "1",
"ieee80211ac": "1",
"vht_oper_chwidth": "1",
"vht_capab": "[MAX-MPDU-7991][MAX-MPDU-11454][VHT160][VHT160-80PLUS80][RXLDPC][SHORT-GI-80][SHORT-GI-160][TX-STBC-2BY1][RX-STBC-1][RX-STBC-12][RX-STBC-123][RX-STBC-1234][SU-BEAMFORMER][SU-BEAMFORMEE][BF-ANTENNA-2][SOUNDING-DIMENSION-2][MU-BEAMFORMER][MU-BEAMFORMEE][VHT-TXOP-PS][HTC-VHT][MAX-A-MPDU-LEN-EXP0][MAX-A-MPDU-LEN-EXP7][VHT-LINK-ADAPT2][VHT-LINK-ADAPT3][RX-ANTENNA-PATTERN][TX-ANTENNA-PATTERN]",
"vht_oper_centr_freq_seg0_idx": "42",
"require_vht": "1" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False)
ev = hapd.wait_event(["AP-DISABLED"], timeout=5)
if ev is None:
raise Exception("Startup failure not reported")
for i in range(1, 7):
if "OK" not in hapd.request("SET vht_capab [MAX-A-MPDU-LEN-EXP%d]" % i):
raise Exception("Unexpected SET failure")
finally:
subprocess.call(['iw', 'reg', 'set', '00'])
def test_ap_vht160(dev, apdev):
"""VHT with 160 MHz channel width"""
try:
hapd = None
hapd2 = None
params = { "ssid": "vht",
"country_code": "FI",
"hw_mode": "a",
"channel": "36",
"ht_capab": "[HT40+]",
"ieee80211n": "1",
"ieee80211ac": "1",
"vht_oper_chwidth": "2",
"vht_oper_centr_freq_seg0_idx": "50",
'ieee80211d': '1',
'ieee80211h': '1' }
hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False)
ev = wait_dfs_event(hapd, "DFS-CAC-START", 5)
if "DFS-CAC-START" not in ev:
raise Exception("Unexpected DFS event")
state = hapd.get_status_field("state")
if state != "DFS":
if state == "DISABLED" and not os.path.exists("dfs"):
# Not all systems have recent enough CRDA version and
# wireless-regdb changes to support 160 MHz and DFS. For now,
# do not report failures for this test case.
raise HwsimSkip("CRDA or wireless-regdb did not support 160 MHz")
raise Exception("Unexpected interface state: " + state)
params = { "ssid": "vht2",
"country_code": "FI",
"hw_mode": "a",
"channel": "100",
"ht_capab": "[HT40+]",
"ieee80211n": "1",
"ieee80211ac": "1",
"vht_oper_chwidth": "2",
"vht_oper_centr_freq_seg0_idx": "114",
'ieee80211d': '1',
'ieee80211h': '1' }
hapd2 = hostapd.add_ap(apdev[1]['ifname'], params, wait_enabled=False)
ev = wait_dfs_event(hapd2, "DFS-CAC-START", 5)
if "DFS-CAC-START" not in ev:
raise Exception("Unexpected DFS event(2)")
state = hapd2.get_status_field("state")
if state != "DFS":
raise Exception("Unexpected interface state(2): " + state)
logger.info("Waiting for CAC to complete")
ev = wait_dfs_event(hapd, "DFS-CAC-COMPLETED", 70)
if "success=1" not in ev:
raise Exception("CAC failed")
if "freq=5180" not in ev:
raise Exception("Unexpected DFS freq result")
ev = hapd.wait_event(["AP-ENABLED"], timeout=5)
if not ev:
raise Exception("AP setup timed out")
state = hapd.get_status_field("state")
if state != "ENABLED":
raise Exception("Unexpected interface state")
ev = wait_dfs_event(hapd2, "DFS-CAC-COMPLETED", 70)
if "success=1" not in ev:
raise Exception("CAC failed(2)")
if "freq=5500" not in ev:
raise Exception("Unexpected DFS freq result(2)")
ev = hapd2.wait_event(["AP-ENABLED"], timeout=5)
if not ev:
raise Exception("AP setup timed out(2)")
state = hapd2.get_status_field("state")
if state != "ENABLED":
raise Exception("Unexpected interface state(2)")
freq = hapd2.get_status_field("freq")
if freq != "5500":
raise Exception("Unexpected frequency(2)")
dev[0].connect("vht", key_mgmt="NONE", scan_freq="5180")
hwsim_utils.test_connectivity(dev[0], hapd)
sig = dev[0].request("SIGNAL_POLL").splitlines()
if "FREQUENCY=5180" not in sig:
raise Exception("Unexpected SIGNAL_POLL value(1): " + str(sig))
if "WIDTH=160 MHz" not in sig:
raise Exception("Unexpected SIGNAL_POLL value(2): " + str(sig))
dev[1].connect("vht2", key_mgmt="NONE", scan_freq="5500")
hwsim_utils.test_connectivity(dev[1], hapd2)
sig = dev[1].request("SIGNAL_POLL").splitlines()
if "FREQUENCY=5500" not in sig:
raise Exception("Unexpected SIGNAL_POLL value(1): " + str(sig))
if "WIDTH=160 MHz" not in sig:
raise Exception("Unexpected SIGNAL_POLL value(2): " + str(sig))
except Exception, e:
if isinstance(e, Exception) and str(e) == "AP startup failed":
if not vht_supported():
raise HwsimSkip("80/160 MHz channel not supported in regulatory information")
raise
finally:
dev[0].request("DISCONNECT")
dev[1].request("DISCONNECT")
if hapd:
hapd.request("DISABLE")
if hapd2:
hapd2.request("DISABLE")
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
dev[1].flush_scan_cache()
def test_ap_vht80plus80(dev, apdev):
"""VHT with 80+80 MHz channel width"""
try:
hapd = None
hapd2 = None
params = { "ssid": "vht",
"country_code": "US",
"hw_mode": "a",
"channel": "52",
"ht_capab": "[HT40+]",
"ieee80211n": "1",
"ieee80211ac": "1",
"vht_oper_chwidth": "3",
"vht_oper_centr_freq_seg0_idx": "58",
"vht_oper_centr_freq_seg1_idx": "155",
'ieee80211d': '1',
'ieee80211h': '1' }
hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False)
# This will actually fail since DFS on 80+80 is not yet supported
ev = hapd.wait_event(["AP-DISABLED"], timeout=5)
# ignore result to avoid breaking the test once 80+80 DFS gets enabled
params = { "ssid": "vht2",
"country_code": "US",
"hw_mode": "a",
"channel": "36",
"ht_capab": "[HT40+]",
"ieee80211n": "1",
"ieee80211ac": "1",
"vht_oper_chwidth": "3",
"vht_oper_centr_freq_seg0_idx": "42",
"vht_oper_centr_freq_seg1_idx": "155" }
hapd2 = hostapd.add_ap(apdev[1]['ifname'], params, wait_enabled=False)
ev = hapd2.wait_event(["AP-ENABLED", "AP-DISABLED"], timeout=5)
if not ev:
raise Exception("AP setup timed out(2)")
if "AP-DISABLED" in ev:
# Assume this failed due to missing regulatory update for now
raise HwsimSkip("80+80 MHz channel not supported in regulatory information")
state = hapd2.get_status_field("state")
if state != "ENABLED":
raise Exception("Unexpected interface state(2)")
dev[1].connect("vht2", key_mgmt="NONE", scan_freq="5180")
hwsim_utils.test_connectivity(dev[1], hapd2)
sig = dev[1].request("SIGNAL_POLL").splitlines()
if "FREQUENCY=5180" not in sig:
raise Exception("Unexpected SIGNAL_POLL value(1): " + str(sig))
if "WIDTH=80+80 MHz" not in sig:
raise Exception("Unexpected SIGNAL_POLL value(2): " + str(sig))
if "CENTER_FRQ1=5210" not in sig:
raise Exception("Unexpected SIGNAL_POLL value(3): " + str(sig))
if "CENTER_FRQ2=5775" not in sig:
raise Exception("Unexpected SIGNAL_POLL value(4): " + str(sig))
except Exception, e:
if isinstance(e, Exception) and str(e) == "AP startup failed":
if not vht_supported():
raise HwsimSkip("80/160 MHz channel not supported in regulatory information")
raise
finally:
dev[0].request("DISCONNECT")
dev[1].request("DISCONNECT")
if hapd:
hapd.request("DISABLE")
if hapd2:
hapd2.request("DISABLE")
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
dev[1].flush_scan_cache()
def test_ap_vht80_csa(dev, apdev):
"""VHT with 80 MHz channel width and CSA"""
csa_supported(dev[0])
try:
hapd = None
params = { "ssid": "vht",
"country_code": "US",
"hw_mode": "a",
"channel": "149",
"ht_capab": "[HT40+]",
"ieee80211n": "1",
"ieee80211ac": "1",
"vht_oper_chwidth": "1",
"vht_oper_centr_freq_seg0_idx": "155" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect("vht", key_mgmt="NONE", scan_freq="5745")
hwsim_utils.test_connectivity(dev[0], hapd)
hapd.request("CHAN_SWITCH 5 5180 ht vht blocktx center_freq1=5210 sec_channel_offset=1 bandwidth=80")
ev = hapd.wait_event(["AP-CSA-FINISHED"], timeout=10)
if ev is None:
raise Exception("CSA finished event timed out")
if "freq=5180" not in ev:
raise Exception("Unexpected channel in CSA finished event")
time.sleep(0.5)
hwsim_utils.test_connectivity(dev[0], hapd)
hapd.request("CHAN_SWITCH 5 5745")
ev = hapd.wait_event(["AP-CSA-FINISHED"], timeout=10)
if ev is None:
raise Exception("CSA finished event timed out")
if "freq=5745" not in ev:
raise Exception("Unexpected channel in CSA finished event")
time.sleep(0.5)
hwsim_utils.test_connectivity(dev[0], hapd)
# This CSA to same channel will fail in kernel, so use this only for
# extra code coverage.
hapd.request("CHAN_SWITCH 5 5745")
hapd.wait_event(["AP-CSA-FINISHED"], timeout=1)
except Exception, e:
if isinstance(e, Exception) and str(e) == "AP startup failed":
if not vht_supported():
raise HwsimSkip("80 MHz channel not supported in regulatory information")
raise
finally:
dev[0].request("DISCONNECT")
if hapd:
hapd.request("DISABLE")
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
def test_ap_vht_on_24ghz(dev, apdev):
"""Subset of VHT features on 2.4 GHz"""
hapd = None
params = { "ssid": "test-vht-2g",
"hw_mode": "g",
"channel": "1",
"ieee80211n": "1",
"vendor_vht": "1",
"vht_capab": "[MAX-MPDU-11454]",
"vht_oper_chwidth": "0",
"vht_oper_centr_freq_seg0_idx": "1"
}
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
try:
if "OK" not in dev[0].request("VENDOR_ELEM_ADD 13 dd1300904c0400bf0c3240820feaff0000eaff0000"):
raise Exception("Failed to add vendor element")
dev[0].connect("test-vht-2g", scan_freq="2412", key_mgmt="NONE")
hwsim_utils.test_connectivity(dev[0], hapd)
sta = hapd.get_sta(dev[0].own_addr())
if '[VENDOR_VHT]' not in sta['flags']:
raise Exception("No VENDOR_VHT STA flag")
dev[1].connect("test-vht-2g", scan_freq="2412", key_mgmt="NONE")
sta = hapd.get_sta(dev[1].own_addr())
if '[VENDOR_VHT]' in sta['flags']:
raise Exception("Unexpected VENDOR_VHT STA flag")
finally:
dev[0].request("VENDOR_ELEM_REMOVE 13 *")
def test_prefer_vht40(dev, apdev):
"""Preference on VHT40 over HT40"""
try:
hapd2 = None
params = { "ssid": "test",
"country_code": "FI",
"hw_mode": "a",
"channel": "36",
"ieee80211n": "1",
"ht_capab": "[HT40+]" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
params = { "ssid": "test",
"country_code": "FI",
"hw_mode": "a",
"channel": "36",
"ieee80211n": "1",
"ieee80211ac": "1",
"ht_capab": "[HT40+]",
"vht_capab": "",
"vht_oper_chwidth": "0",
"vht_oper_centr_freq_seg0_idx": "0",
}
hapd2 = hostapd.add_ap(apdev[1]['ifname'], params)
bssid2 = apdev[1]['bssid']
dev[0].scan_for_bss(bssid, freq=5180)
dev[0].scan_for_bss(bssid2, freq=5180)
dev[0].connect("test", scan_freq="5180", key_mgmt="NONE")
if dev[0].get_status_field('bssid') != bssid2:
raise Exception("Unexpected BSS selected")
est = dev[0].get_bss(bssid)['est_throughput']
if est != "135000":
raise Exception("Unexpected BSS0 est_throughput: " + est)
est = dev[0].get_bss(bssid2)['est_throughput']
if est != "135001":
raise Exception("Unexpected BSS1 est_throughput: " + est)
finally:
dev[0].request("DISCONNECT")
if hapd2:
hapd2.request("DISABLE")
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
| wangybgit/Chameleon | hostapd-OpenWrt/tests/hwsim/test_ap_vht.py | Python | apache-2.0 | 19,862 |
from waab import SOURCE_ID_PATTERN
def description(req, d):
while SOURCE_ID_PATTERN.search(d):
d = SOURCE_ID_PATTERN.sub(lambda m: req.route_url('source', id=m.group('id')), d)
return d
| clld/waab | waab/util.py | Python | apache-2.0 | 204 |
#! /usr/bin/env python3
""" A Word class
Originally obtained from the 'pharm' repository, but modified.
"""
class Word(object):
doc_id = None
sent_id = None
in_sent_idx = None
word = None
pos = None
ner = None
lemma = None
dep_path = None
dep_parent = None
sent_id = None
box = None
def __init__(self, _doc_id, _sent_id, _in_sent_idx, _word, _pos, _ner,
_lemma, _dep_path, _dep_parent, _box):
self.doc_id = _doc_id
self.sent_id = _sent_id
self.in_sent_idx = _in_sent_idx
self.word = _word
self.pos = _pos
self.ner = _ner
self.dep_parent = _dep_parent
self.dep_path = _dep_path
self.box = _box
self.lemma = _lemma
# If do not do the following, outputting an Array in the language will
# crash
# XXX (Matteo) This was in the pharm code, not sure what it means
# I actually don't think this should go here.
# self.lemma = self.lemma.replace('"', "''")
# self.lemma = self.lemma.replace('\\', "_")
def __repr__(self):
return self.word
# Return the NER tag if different than 'O', otherwise return the lemma
def get_feature(self, use_pos=False):
if use_pos:
return self.pos
elif self.ner == 'O':
return self.lemma
else:
return self.ner
| amwenger/dd-genomics | code/dstruct/Word.py | Python | apache-2.0 | 1,410 |
from SloppyCell.ReactionNetworks import *
net = IO.from_SBML_file('BIOMD0000000049.xml', 'base')
net.compile()
# Add some useful things for plotting
net.add_parameter('total_active_Rap1')
to_sum = [k for k in net.species.keys() if k.count('Rap1_GTP')]
net.add_assignment_rule('total_active_Rap1', ' + '.join(to_sum))
net.add_parameter('total_active_Ras')
to_sum = [k for k in net.species.keys() if k.count('Ras_GTP')]
net.add_assignment_rule('total_active_Ras', ' + '.join(to_sum))
net.add_parameter('total_active_ERK')
to_sum = [k for k in net.species.keys() if k.count('ppERK')]
net.add_assignment_rule('total_active_ERK', ' + '.join(to_sum))
# SBML file is for 10 ng/ml EGF
EGF_ng_ml = net.get_var_ic('EGF')/10.
net.add_parameter('EGF_ng_ml', EGF_ng_ml, is_optimizable=False)
# Convert using molecular weights
NGF_ng_ml = EGF_ng_ml * 6.1/26.
net.add_parameter('NGF_ng_ml', NGF_ng_ml, is_optimizable=False)
# Fixed 10 ng/ml EGF stimulation
EGF10_net = net.copy('EGF10')
# Ramp from 0 to 1.5 ng/ml EGF over 60 min
EGF1pt5_60_ramp = EGF10_net.copy('EGF1pt5_60_ramp')
EGF1pt5_60_ramp.add_assignment_rule('EGF', '1.5 * EGF_ng_ml * time/3600.')
# Fixed 10 ng/ml NGF stimulation
NGF10_net = net.copy('NGF10')
NGF10_net.set_var_ic('EGF', 0)
NGF10_net.set_var_ic('NGF', 10. * NGF_ng_ml)
# Ramp from 0 to 1.5 ng/ml NGF over 60 min
NGF1pt5_60_ramp = NGF10_net.copy('NGF1pt5_60_ramp')
NGF1pt5_60_ramp.add_assignment_rule('NGF', '1.5 * NGF_ng_ml * time/3600.')
networks = [EGF10_net, EGF1pt5_60_ramp, NGF10_net, NGF1pt5_60_ramp]
int_times = [(0, 60)] * 4
| GutenkunstLab/SloppyCell | Example/Gutenkunst2007/Sasagawa_2005/Nets.py | Python | bsd-3-clause | 1,555 |
from pygeons.glossary import GLOSSARY
from pygeons.io.io import pygeons_toh5,pygeons_totext,pygeons_info,pygeons_crop,pygeons_merge
from pygeons.plot.plot import pygeons_vector_view,pygeons_strain_view
from pygeons.clean.clean import pygeons_clean
from pygeons.main.main import pygeons_strain,pygeons_reml,pygeons_autoclean,pygeons_fit
| treverhines/PyGeoNS | pygeons/__init__.py | Python | mit | 336 |
from headers import Headers
from url import Url
class Response(object):
"""
The ``Response`` object encapsulates HTTP style responses.
"""
def __init__(self, status, headers=Headers(), content=None,
message=None, request=None):
"""
Construct a new ``Response`` object.
:param status: HTTP status code for the response
:type status: integer
:param headers: HTTP headers
:type status: a list of tuples or a class:`Headers` object
:param content: content
:param message: HTTP message for the response
:param request: origin Request object used
:type request: class:`Request`
.. attribute:: redirects
List of redirections
"""
self._status = status
self.message = message
self.redirects = list()
if (not isinstance(headers, Headers)):
headers = Headers(headers)
self._headers = headers
self._content = content
self._request = request
@property
def status(self):
"""
Returns the HTTP status
:rtype: int
"""
return int(self._status)
@property
def is_info(self):
"""
Returns if the response was informational
:rtype: boolean
"""
if self.status >= 100 and self.status < 200:
return True
return False
@property
def is_success(self):
"""
Returns if the response was success
:rtypen: boolean
"""
if self.status >= 200 and self.status < 300:
return True
return False
@property
def is_redirect(self):
"""
Returns if the response was redirect
:rtype: boolean
"""
if self.status >= 300 and self.status < 400:
return True
return False
@property
def is_client_error(self):
"""
Returns if the response was a client error
:rtype: boolean
"""
if self.status >= 400 and self.status < 500:
return True
return False
@property
def is_server_error(self):
"""
Returns if the response was a client server
:rtype: boolean
"""
if self.status >= 500 and self.status < 600:
return True
@property
def is_error(self):
"""
Returns if the response was an error
:rtype: boolean
"""
if self.is_client_error or self.is_server_error:
return True
return False
@property
def base(self):
"""
Returns the base URI for this response
:rtype: class:`Url` or None
"""
url = None
if self.header('Content-Base'):
url = self.header('Content-Base')
if self.header('Content-Location'):
url = self.header('Content-Location')
if url is None and self.request:
url = self.request.url
if not url:
return None
if not isinstance(url, Url):
url = Url(url)
return url
@property
def request(self):
"""
Returns the request object that caused that response
:rtype: class:`Request`
"""
return self._request
@property
def content(self):
"""
Returns the actual content of the response
:rtype: string
"""
return self._content
@content.setter
def content(self, content):
"""
Set the actual content of the response
"""
self._content = content
def header(self, name):
"""
Returns the value for a given header
:rtype: string
"""
return self._headers.get(name)
@property
def headers(self):
"""
Returns the class:`Headers` object
:rtype: class:`Headers`
"""
return self._headers
@property
def status_line(self):
"""
Returns the string '<code> <message>'
:rtype: string
"""
return "{0} {1}".format(self.status, self.message)
@property
def last_modified(self):
"""
Returns a datetime object represeting the *Last-Modified* header
:rtype: class:`datetime`
"""
return self._headers.last_modified
@property
def date(self):
"""
Returns a datetime object represeting the *Date* header
:rtype: class:`datetime`
"""
return self._headers.date
@property
def expires(self):
"""
Returns a datetime object represeting the *Expires* header
:rtype: class:`datetime`
"""
return self._headers.expires
@property
def content_length(self):
"""
Returns the content-length of the actual response
:rtype: int
"""
return self._headers.content_length
@property
def content_is_text(self):
"""
Returns ``True`` if the "Content-Type" header is set to text
:rtype: boolean
"""
return self._headers.content_is_text
@property
def content_is_xml(self):
"""
Returns ``True`` if the "Content-Type" header is set to XML
:rtype: boolean
"""
return self._headers.content_is_xml
@property
def content_is_xhtml(self):
"""
Returns True if the "Content-Type" header is set to XHTML
:rtype: boolean
"""
return self._headers.content_is_xhtml
| XDrake99/IDTP | protocol/http/response.py | Python | mit | 5,596 |
# -*- coding: utf-8 -*-
print '''<!DOCTYPE html><html>'''
incluir(data,"head")
print ''''''
data["detalle"]=False
print ''''''
data["output"]="marco"
print ''''''
data["input"]="btn"
print '''<body class="container-fluid sin-marg pad-r08 pad-l08 ff">'''
incluir(data,"header")
print ''''''
incluir(data,"hero")
print ''''''
incluir(data,"barra-buscador")
print '''<section class="row"><div class="col-xs-12 col-sm-6 col-md-6 col-lg-6"><div class="pad-2"><h1>Inscribete</h1>'''
incluir(data,"previewImg-marco")
print '''<span> Nombre de la votación</span> </div></div><form class="col-xs-12 col-sm-6 col-md-6 col-lg-6" name="" id="inscripcionPartido"><div class="height-25 bg-ubuntu_ash pad-2 text-center alg-middle" ><input type="submit" name="" value="Registrarme"></div></form></section>'''
incluir(data,"footer")
print '''</body></html>'''
| ZerpaTechnology/AsenZor | apps/votSys2/user/vistas/templates/inscripcionPartido.py | Python | lgpl-3.0 | 844 |
# coding: utf-8
import numpy as np
class NeuralNetwork:
def __init__(self, layer_dims: tuple, *, sigma: float):
self.w, self.b, self.layer_dims = [], [], layer_dims
for i in range(1, len(layer_dims)):
self.w.append(np.random.randn(layer_dims[i], layer_dims[i - 1]) * sigma)
self.b.append(np.zeros((layer_dims[i], 1)))
def forward_propagation(self, x: np.ndarray, *, training: bool = False, dropout: dict = None) -> list:
a = [x]
nl = len(self.layer_dims)
for l in range(1, nl - 1):
al = np.dot(self.w[l - 1], a[l - 1]) + self.b[l - 1]
np.maximum(al, 0, out=al)
if not (dropout is None):
if training:
al = al * dropout["mask"][l]
else:
al = al * (1. - dropout["rate"])
a.append(al)
al = np.dot(self.w[nl - 2], a[nl - 2]) + self.b[nl - 2]
# TODO: solve overflow problem in some more reasonable way (https://zhuanlan.zhihu.com/p/22260935)
np.clip(al, -30., 30., al)
al = 1. / (1. + np.exp(-al))
a.append(al)
return a
def back_propagation(self, y: np.ndarray, a: list, dropout_mask: list = None) -> (list, list):
nl = len(self.layer_dims)
dz, dw, db = [None] * nl, [None] * (nl - 1), [None] * (nl - 1)
dz[nl - 1] = (a[nl - 1] - y) / y.shape[1]
for l in reversed(range(nl - 1)):
dw[l] = np.dot(dz[l + 1], a[l].T)
if not (dropout_mask is None):
dw[l] = dw[l] * dropout_mask[l + 1]
db[l] = np.sum(dz[l + 1], axis=1, keepdims=True)
dz[l] = np.dot(self.w[l].T, dz[l + 1]) * (a[l] > 0)
return dw, db
@staticmethod
def cost(y: np.ndarray, al: np.ndarray) -> np.ndarray:
return -np.mean(y * np.log(al) + (1 - y) * np.log(1. - al))
def gradient_check(self, x: np.ndarray, y: np.ndarray, eps: float = 1e-8):
nl = len(self.layer_dims)
dw, db = [None] * (nl - 1), [None] * (nl - 1)
a = self.forward_propagation(x)
tdw, tdb = self.back_propagation(y, a)
for l in range(nl - 1):
dw[l] = np.zeros(self.w[l].shape)
for i in range(self.w[l].shape[0]):
for j in range(self.w[l].shape[1]):
self.w[l][i, j] = self.w[l][i, j] + eps
a = self.forward_propagation(x)
c1 = self.cost(y, a[len(a) - 1])
self.w[l][i, j] = self.w[l][i, j] - eps * 2.
a = self.forward_propagation(x)
c2 = self.cost(y, a[len(a) - 1])
self.w[l][i, j] = self.w[l][i, j] + eps
dw[l][i, j] = (c1 - c2) / (eps * 2.)
db[l] = np.zeros(self.b[l].shape)
for i in range(self.b[l].shape[0]):
for j in range(self.b[l].shape[1]):
self.b[l][i, j] = self.b[l][i, j] + eps
a = self.forward_propagation(x)
c1 = self.cost(y, a[len(a) - 1])
self.b[l][i, j] = self.b[l][i, j] - eps * 2.
a = self.forward_propagation(x)
c2 = self.cost(y, a[len(a) - 1])
self.b[l][i, j] = self.b[l][i, j] + eps
db[l][i, j] = (c1 - c2) / (eps * 2.)
print(np.linalg.norm(tdw[l] - dw[l]), np.linalg.norm(tdb[l] - db[l]))
print(np.linalg.norm((tdw[l] - dw[l]) / dw[l]), np.linalg.norm((tdb[l] - db[l]) / db[l]))
def gradient_descent_update(self, dw: list, db: list, params=None) -> dict:
if params is None:
params = {"learning_rate": 0.7}
for l in range(len(self.layer_dims) - 1):
self.w[l] = self.w[l] - params["learning_rate"] * dw[l]
self.b[l] = self.b[l] - params["learning_rate"] * db[l]
return {}
def gradient_descent_momentum_update(self, dw: list, db: list, cache: dict, params=None) -> dict:
if params is None:
params = {"f": 0.1, "learning_rate": 0.02}
if not cache:
cache = {"v_w": [], "v_b": []}
for l in range(len(self.layer_dims) - 1):
cache["v_w"].append(np.zeros(self.w[l].shape))
cache["v_b"].append(np.zeros(self.b[l].shape))
for l in range(len(self.layer_dims) - 1):
cache["v_w"][l] = (1. - params["f"]) * cache["v_w"][l] + dw[l]
cache["v_b"][l] = (1. - params["f"]) * cache["v_b"][l] + db[l]
self.w[l] = self.w[l] - params["learning_rate"] * cache["v_w"][l]
self.b[l] = self.b[l] - params["learning_rate"] * cache["v_b"][l]
return cache
def optimize(self, x: np.ndarray, y: np.ndarray, x_cv: np.ndarray, y_cv: np.ndarray,
optimization_params: dict = None, iter_num: int = 1500, dropout_rate: float = None,
l2_decay: float = 0.) -> (float, float):
best_so_far = {"cost": np.infty, "w": None, "b": None, "iter_num": 0}
cache = {}
no_update_cnt = 0
for i in range(iter_num):
if dropout_rate is None:
a = self.forward_propagation(x, training=True)
dw, db = self.back_propagation(y, a)
else:
dropout_mask = [np.ones((x.shape[0], 1))]
for dim in self.layer_dims[1:-1]:
dropout_mask.append(np.random.rand(dim, 1) >= dropout_rate)
dropout_mask.append(np.ones((self.layer_dims[-1], 1)))
a = self.forward_propagation(x, dropout_mask=dropout_mask, dropout_rate=dropout_rate, training=True)
dw, db = self.back_propagation(y, a, dropout_mask=dropout_mask)
for l in range(len(self.layer_dims) - 1):
dw[l] = dw[l] + self.w[l] * l2_decay
db[l] = db[l] + self.b[l] * l2_decay
cache = self.gradient_descent_momentum_update(dw, db, cache, optimization_params)
a = self.forward_propagation(x_cv, dropout_rate=dropout_rate)
cost = self.cost(y_cv, a[-1])
if cost < best_so_far["cost"]:
best_so_far["cost"] = cost
best_so_far["w"] = self.w
best_so_far["b"] = self.b
best_so_far["iter_num"] = i + 1
no_update_cnt = 0
else:
no_update_cnt = no_update_cnt + 1
if no_update_cnt % 10 == 0:
optimization_params["learning_rate"] = optimization_params["learning_rate"] * 0.5
if no_update_cnt >= 30:
break
self.w = best_so_far["w"]
self.b = best_so_far["b"]
# print(best_so_far["iter_num"])
return self.cost(y, self.forward_propagation(x, dropout_rate=dropout_rate)[-1]), self.cost(
y_cv, self.forward_propagation(x_cv, dropout_rate=dropout_rate)[-1])
def predict(self, x: np.ndarray, dropout_rate: float = None):
a = self.forward_propagation(x, dropout_rate=dropout_rate)
return a[len(self.layer_dims) - 1] >= 0.5
| gonglinyuan/titanic | NeuralNetwork_old.py | Python | gpl-3.0 | 7,112 |
from contrib import *
from integrations import *
from handlers import *
from models import *
from views import *
from .test_decorators import TestDecoratorErrors
from .test_middleware import TestMiddleware
| Rediker-Software/doac | tests/tests/__init__.py | Python | mit | 206 |
#!/usr/bin/python3
import i3Common
new_workspace = i3Common.choose_workspace()
i3Common.switch_workspace_to_active_display(new_workspace.strip())
| nathanlippi/dotfiles | i3/i3.symlink/choose_workspace_active_display.py | Python | mit | 149 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from sentry.utils.db import is_postgres
class Migration(SchemaMigration):
def forwards(self, orm):
if is_postgres():
# Changing field 'Release.project_id'
db.execute("ALTER TABLE sentry_release ALTER COLUMN project_id DROP NOT NULL")
# Changing field 'ReleaseFile.project_id'
db.execute("ALTER TABLE sentry_releasefile ALTER COLUMN project_id DROP NOT NULL")
else:
# Changing field 'Release.project_id'
db.alter_column(
'sentry_release',
'project_id',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True)
)
# Changing field 'ReleaseFile.project_id'
db.alter_column(
'sentry_releasefile',
'project_id',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True)
)
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Release.project_id'
raise RuntimeError(
"Cannot reverse this migration. 'Release.project_id' and its values cannot be restored."
)
# The following code is provided here to aid in writing a correct migration
# Changing field 'Release.project_id'
db.alter_column(
'sentry_release', 'project_id',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')()
)
# User chose to not deal with backwards NULL issues for 'ReleaseFile.project_id'
raise RuntimeError(
"Cannot reverse this migration. 'ReleaseFile.project_id' and its values cannot be restored."
)
# The following code is provided here to aid in writing a correct migration
# Changing field 'ReleaseFile.project_id'
db.alter_column(
'sentry_releasefile', 'project_id',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')()
)
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.apitoken': {
'Meta': {
'object_name': 'ApiToken'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True'
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'token':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authenticator': {
'Meta': {
'unique_together': "(('user', 'type'),)",
'object_name': 'Authenticator',
'db_table': "'auth_authenticator'"
},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 1, 19, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.commit': {
'Meta': {
'unique_together': "(('repository_id', 'key'),)",
'object_name': 'Commit',
'index_together': "(('repository_id', 'date_added'),)"
},
'author': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.CommitAuthor']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'message': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'repository_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {
'unique_together': "(('organization_id', 'email'),)",
'object_name': 'CommitAuthor'
},
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.commitfilechange': {
'Meta': {
'unique_together': "(('commit', 'filename'),)",
'object_name': 'CommitFileChange'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'filename': ('django.db.models.fields.CharField', [], {
'max_length': '255'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '1'
})
},
'sentry.counter': {
'Meta': {
'object_name': 'Counter',
'db_table': "'sentry_projectcounter'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'unique': 'True'
}
),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {
'object_name': 'DSymBundle'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'sdk': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymSDK']"
}
)
},
'sentry.dsymobject': {
'Meta': {
'object_name': 'DSymObject'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_path': ('django.db.models.fields.TextField', [], {
'db_index': 'True'
}),
'uuid':
('django.db.models.fields.CharField', [], {
'max_length': '36',
'db_index': 'True'
}),
'vmaddr':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'vmsize':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
})
},
'sentry.dsymsdk': {
'Meta': {
'object_name':
'DSymSDK',
'index_together':
"[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"
},
'dsym_type':
('django.db.models.fields.CharField', [], {
'max_length': '20',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'sdk_name': ('django.db.models.fields.CharField', [], {
'max_length': '20'
}),
'version_build': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {
'unique_together': "[('object', 'address')]",
'object_name': 'DSymSymbol'
},
'address':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {
'unique_together': "(('project_id', 'name'),)",
'object_name': 'Environment'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {
'unique_together':
"(('event_id', 'key_id', 'value_id'),)",
'object_name':
'EventTag',
'index_together':
"(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {
'object_name': 'GlobalDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '36'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'short_id'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'short_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {
'object_name': 'GroupRedirect'
},
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'previous_group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'unique': 'True'
})
},
'sentry.grouprelease': {
'Meta': {
'unique_together': "(('group_id', 'release_id', 'environment'),)",
'object_name': 'GroupRelease'
},
'environment':
('django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64'
}),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {
'unique_together': "(('group', 'user'),)",
'object_name': 'GroupSubscription'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Project']"
}
),
'reason':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('group', 'key', 'value'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'",
'index_together': "(('project', 'key', 'value', 'last_seen'),)"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'token': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True',
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationonboardingtask': {
'Meta': {
'unique_together': "(('organization', 'task'),)",
'object_name': 'OrganizationOnboardingTask'
},
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_completed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'forced_color': (
'django.db.models.fields.CharField', [], {
'max_length': '6',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectdsymfile': {
'Meta': {
'unique_together': "(('project', 'uuid'),)",
'object_name': 'ProjectDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'uuid': ('django.db.models.fields.CharField', [], {
'max_length': '36'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {
'unique_together': "(('project_id', 'platform'),)",
'object_name': 'ProjectPlatform'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project_id', 'version'),)",
'object_name': 'Release'
},
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'releases'",
'symmetrical': 'False',
'through': "orm['sentry.ReleaseProject']",
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasecommit': {
'Meta': {
'unique_together': "(('release', 'commit'), ('release', 'order'))",
'object_name': 'ReleaseCommit'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseenvironment': {
'Meta': {
'unique_together': "(('project_id', 'release_id', 'environment_id'),)",
'object_name': 'ReleaseEnvironment',
'db_table': "'sentry_environmentrelease'"
},
'environment_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseproject': {
'Meta': {
'unique_together': "(('project', 'release'),)",
'object_name': 'ReleaseProject',
'db_table': "'sentry_release_project'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.repository': {
'Meta': {
'unique_together':
"(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))",
'object_name':
'Repository'
},
'config': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'external_id':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'provider':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'url': ('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
})
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_password_expired':
('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'session_nonce':
('django.db.models.fields.CharField', [], {
'max_length': '12',
'null': 'True'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useravatar': {
'Meta': {
'object_name': 'UserAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.useremail': {
'Meta': {
'unique_together': "(('user', 'email'),)",
'object_name': 'UserEmail'
},
'date_hash_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_verified': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'emails'",
'to': "orm['sentry.User']"
}
),
'validation_hash': (
'django.db.models.fields.CharField', [], {
'default': "u'ssVzZVyhkDOpmZUemjBvkkkRbhSwhPVT'",
'max_length': '32'
}
)
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
| looker/sentry | src/sentry/south_migrations/0285_auto__chg_field_release_project_id__chg_field_releasefile_project_id.py | Python | bsd-3-clause | 92,712 |
#!/usr/bin/env python
###############################################################
# AniConvert: Batch convert directories of videos using
# HandBrake. Intended to be used on anime and TV series,
# where files downloaded as a batch tend to have the same
# track layout. Can also automatically select a single audio
# and subtitle track based on language preference.
#
# Copyright (c) 2015 Andrew Sun (@crossbowffs)
# Distributed under the MIT license
###############################################################
from __future__ import print_function
import argparse
import collections
import errno
import logging
import os
import re
import subprocess
import sys
###############################################################
# Configuration values, no corresponding command-line args
###############################################################
# Name of the HandBrake CLI binary. Set this to the full path
# of the binary if the script cannot find it automatically.
HANDBRAKE_EXE = "HandBrakeCLI"
# The format string for logging messages
LOGGING_FORMAT = "[%(levelname)s] %(message)s"
# If no output directory is explicitly specified, the output
# files will be placed in a directory with this value appended
# to the name of the input directory.
DEFAULT_OUTPUT_SUFFIX = "-converted"
# Define the arguments to pass to HandBrake.
# Do not define any of the following:
# -i <input>
# -o <output>
# -a <audio track>
# -s <subtitle track>
# -w <width>
# -l <height>
# Obviously, do not define anything that would cause HandBrake
# to not convert the video file either.
HANDBRAKE_ARGS = """
-E ffaac
-B 160
-6 dpl2
-R Auto
-e x264
-q 20.0
--vfr
--audio-copy-mask aac,ac3,dtshd,dts,mp3
--audio-fallback ffaac
--loose-anamorphic
--modulus 2
--x264-preset medium
--h264-profile high
--h264-level 3.1
--subtitle-burned
"""
###############################################################
# Default values and explanations for command-line args
###############################################################
# List of video formats to process. Other file formats in the
# input directory will be ignored. On the command line, specify
# as "-i mkv,mp4"
INPUT_VIDEO_FORMATS = ["mkv", "mp4"]
# The format to convert the videos to. Only "mp4", "mkv", and
# "m4v" are accepted, because those are the only formats that
# HandBrake can write. On the command line, specify as "-j mp4"
OUTPUT_VIDEO_FORMAT = "mp4"
# A list of preferred audio languages, ordered from most
# to least preferable. If there is only one audio track in the
# most preferable language, it will be automatically selected.
# If more than one track is in the most preferable language,
# you will be prompted to select one. If no tracks are
# in the most preferable language, the program will check
# the second most preferable language, and so on. This value
# should use the iso639-2 (3 letter) language code format.
# You may also specify "none" as one of the items in this list.
# If it is reached, the track will be discarded. For example,
# "-a eng,none" will use English audio if it is available, or
# remove the audio track otherwise. On the command line,
# specify as "-a jpn,eng"
AUDIO_LANGUAGES = ["jpn", "eng"]
# This is the same as the preferred audio languages, but
# for subtitles. On the command line, specify as "-s eng"
SUBTITLE_LANGUAGES = ["eng"]
# What to do when the destination file already exists. Can be
# one of:
# "prompt": Ask the user what to do
# "skip": Skip the file and proceed to the next one
# "overwrite": Overwrite the destination file
# On the command line, specify as "-w skip"
DUPLICATE_ACTION = "skip"
# The width and height of the output video, in the format
# "1280x720". "1080p" and "720p" are common values and
# translate to 1920x1080 and 1280x720, respectively.
# A value of "auto" is also accepted, and will preserve
# the input video dimensions. On the command line, specify
# as "-d 1280x720", "-d 720p", or "-d auto"
OUTPUT_DIMENSIONS = "auto"
# The minimum severity for an event to be logged. Levels
# from least severe to most servere are "debug", "info",
# "warning", "error", and "critical". On the command line,
# specify as "-l info"
LOGGING_LEVEL = "info"
# By default, if there is only a single track, and it has
# language code "und" (undefined), it will automatically be
# selected. If you do not want this behavior, set this flag
# to true. On the command line, specify as "-u"
MANUAL_UND = False
# Set this to true to search sub-directories within the input
# directory. Files will be output in the correspondingly named
# folder in the destination directory. On the command line,
# specify as "-r"
RECURSIVE_SEARCH = False
###############################################################
# End of configuration values, code begins here
###############################################################
try:
input = raw_input
except NameError:
pass
class TrackInfo(object):
def __init__(self, audio_track, subtitle_track):
self.audio_track = audio_track
self.subtitle_track = subtitle_track
class BatchInfo(object):
def __init__(self, dir_path, track_map):
self.dir_path = dir_path
self.track_map = track_map
class FFmpegStreamInfo(object):
def __init__(self, stream_index, codec_type, codec_name, language_code, metadata):
self.stream_index = stream_index
self.codec_type = codec_type
self.codec_name = codec_name
self.language_code = language_code
self.metadata = metadata
class HandBrakeAudioInfo(object):
pattern1 = re.compile(r"(\d+), (.+) \(iso639-2: ([a-z]{3})\)")
pattern2 = re.compile(r"(\d+), (.+) \(iso639-2: ([a-z]{3})\), (\d+)Hz, (\d+)bps")
def __init__(self, info_str):
match = self.pattern1.match(info_str)
if not match:
raise ValueError("Unknown audio track info format: " + repr(info_str))
self.index = int(match.group(1))
self.description = match.group(2)
self.language_code = match.group(3)
match = self.pattern2.match(info_str)
if match:
self.sample_rate = int(match.group(4))
self.bit_rate = int(match.group(5))
else:
self.sample_rate = None
self.bit_rate = None
self.title = None
def __str__(self):
format_str = (
"Description: {description}\n"
"Language code: {language_code}"
)
if self.sample_rate:
format_str += "\nSample rate: {sample_rate}Hz"
if self.bit_rate:
format_str += "\nBit rate: {bit_rate}bps"
return format_str.format(**self.__dict__)
def __hash__(self):
return hash((
self.index,
self.description,
self.language_code,
self.sample_rate,
self.language_code,
self.title
))
def __eq__(self, other):
if not isinstance(other, HandBrakeAudioInfo):
return False
return (
self.index == other.index and
self.description == other.description and
self.language_code == other.language_code and
self.sample_rate == other.sample_rate and
self.language_code == other.language_code and
self.title == other.title
)
class HandBrakeSubtitleInfo(object):
pattern = re.compile(r"(\d+), (.+) \(iso639-2: ([a-z]{3})\) \((\S+)\)\((\S+)\)")
def __init__(self, info_str):
match = self.pattern.match(info_str)
if not match:
raise ValueError("Unknown subtitle track info format: " + repr(info_str))
self.index = int(match.group(1))
self.language = match.group(2)
self.language_code = match.group(3)
self.format = match.group(4)
self.source = match.group(5)
self.title = None
def __str__(self):
format_str = (
"Language: {language}\n"
"Language code: {language_code}\n"
"Format: {format}\n"
"Source: {source}"
)
return format_str.format(**self.__dict__)
def __hash__(self):
return hash((
self.index,
self.language,
self.language_code,
self.format,
self.source,
self.title
))
def __eq__(self, other):
if not isinstance(other, HandBrakeSubtitleInfo):
return False
return (
self.index == other.index and
self.language == other.language and
self.language_code == other.language_code and
self.format == other.format and
self.source == other.source and
self.title == other.title
)
def print_err(message="", end="\n", flush=False):
print(message, end=end, file=sys.stderr)
if flush:
sys.stderr.flush()
def indent_text(text, prefix):
if isinstance(prefix, int):
prefix = " " * prefix
lines = text.splitlines()
return "\n".join(prefix + line for line in lines)
def on_walk_error(exception):
logging.error("Cannot read directory: '%s'", exception.filename)
def get_files_in_dir(path, extensions, recursive):
extensions = {e.lower() for e in extensions}
for (dir_path, subdir_names, file_names) in os.walk(path, onerror=on_walk_error):
filtered_files = []
for file_name in file_names:
extension = os.path.splitext(file_name)[1][1:]
if extension.lower() in extensions:
filtered_files.append(file_name)
if len(filtered_files) > 0:
filtered_files.sort()
yield (dir_path, filtered_files)
if recursive:
subdir_names.sort()
else:
del subdir_names[:]
def get_output_dir(base_output_dir, base_input_dir, dir_path):
relative_path = os.path.relpath(dir_path, base_input_dir)
if relative_path == ".":
return base_output_dir
return os.path.join(base_output_dir, relative_path)
def replace_extension(file_name, new_extension):
new_file_name = os.path.splitext(file_name)[0] + "." + new_extension
return new_file_name
def get_simplified_path(base_dir_path, full_path):
base_parent_dir_path = os.path.dirname(base_dir_path)
return os.path.relpath(full_path, base_parent_dir_path)
def get_output_path(base_output_dir, base_input_dir, input_path, output_format):
relative_path = os.path.relpath(input_path, base_input_dir)
temp_path = os.path.join(base_output_dir, relative_path)
out_path = os.path.splitext(temp_path)[0] + "." + output_format
return out_path
def try_create_directory(path):
try:
os.makedirs(path, 0o755)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def try_delete_file(path):
try:
os.remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def run_handbrake_scan(handbrake_path, input_path):
output = subprocess.check_output([
handbrake_path,
"-i", input_path,
"--scan"
], stderr=subprocess.STDOUT)
return output.decode("utf-8")
def parse_handbrake_track_info(output_lines, start_index, info_cls):
prefix = " + "
prefix_len = len(prefix)
tracks = []
i = start_index + 1
while i < len(output_lines) and output_lines[i].startswith(prefix):
info_str = output_lines[i][prefix_len:]
info = info_cls(info_str)
tracks.append(info)
i += 1
return (i, tracks)
def parse_ffmpeg_stream_metadata(output_lines, start_index, metadata_pattern):
metadata = {}
i = start_index + 1
while i < len(output_lines):
match = metadata_pattern.match(output_lines[i])
if not match:
break
metadata[match.group(1)] = match.group(2)
i += 1
return (i, metadata)
def parse_ffmpeg_stream_info(output_lines, start_index):
stream_pattern = re.compile(r"\s{4}Stream #0\.(\d+)(\(([a-z]{3})\))?: (\S+): (\S+?)")
metadata_pattern = re.compile(r"\s{6}(\S+)\s*: (.+)")
audio_streams = []
subtitle_streams = []
i = start_index + 1
while i < len(output_lines) and output_lines[i].startswith(" "):
match = stream_pattern.match(output_lines[i])
if not match:
i += 1
continue
stream_index = match.group(1)
language_code = match.group(3) or "und"
codec_type = match.group(4)
codec_name = match.group(5)
i += 1
if codec_type == "Audio":
current_stream = audio_streams
elif codec_type == "Subtitle":
current_stream = subtitle_streams
else:
continue
if output_lines[i].startswith(" Metadata:"):
i, metadata = parse_ffmpeg_stream_metadata(output_lines, i, metadata_pattern)
else:
metadata = {}
info = FFmpegStreamInfo(stream_index, codec_type, codec_name, language_code, metadata)
current_stream.append(info)
return (i, audio_streams, subtitle_streams)
def merge_track_titles(hb_tracks, ff_streams):
if not ff_streams:
return
assert len(hb_tracks) == len(ff_streams), "Track count mismatch"
for hb_track, ff_stream in zip(hb_tracks, ff_streams):
assert hb_track.language_code == ff_stream.language_code, "Track language code mismatch"
hb_track.title = ff_stream.metadata.get("title")
def parse_handbrake_scan_output(output):
lines = output.splitlines()
hb_audio_tracks = None
hb_subtitle_tracks = None
ff_audio_streams = None
ff_subtitle_streams = None
i = 0
while i < len(lines):
if lines[i].startswith("Input #0, "):
logging.debug("Found FFmpeg stream info")
i, ff_audio_streams, ff_subtitle_streams = parse_ffmpeg_stream_info(lines, i)
message_format = "FFmpeg: %d audio track(s), %d subtitle track(s)"
logging.debug(message_format, len(ff_audio_streams), len(ff_subtitle_streams))
continue
if lines[i] == " + audio tracks:":
logging.debug("Found HandBrake audio track info")
i, hb_audio_tracks = parse_handbrake_track_info(lines, i, HandBrakeAudioInfo)
logging.debug("HandBrake: %d audio track(s)", len(hb_audio_tracks))
continue
if lines[i] == " + subtitle tracks:":
logging.debug("Found HandBrake subtitle track info")
i, hb_subtitle_tracks = parse_handbrake_track_info(lines, i, HandBrakeSubtitleInfo)
logging.debug("HandBrake: %d subtitle track(s)", len(hb_subtitle_tracks))
continue
i += 1
merge_track_titles(hb_audio_tracks, ff_audio_streams)
merge_track_titles(hb_subtitle_tracks, ff_subtitle_streams)
return (hb_audio_tracks, hb_subtitle_tracks)
def get_track_info(handbrake_path, input_path):
scan_output = run_handbrake_scan(handbrake_path, input_path)
return parse_handbrake_scan_output(scan_output)
def get_track_by_index(track_list, track_index):
for track in track_list:
if track.index == track_index:
return track
raise IndexError("Invalid track index: " + str(track_index))
def filter_tracks_by_language(track_list, preferred_languages, manual_und):
for preferred_language_code in preferred_languages:
preferred_language_code = preferred_language_code.lower()
if preferred_language_code == "none":
return None
und_count = 0
filtered_tracks = []
for track in track_list:
if track.language_code == preferred_language_code:
filtered_tracks.append(track)
elif track.language_code == "und":
und_count += 1
filtered_tracks.append(track)
if len(filtered_tracks) - und_count >= 1:
return filtered_tracks
elif len(track_list) == und_count:
if und_count == 1 and not manual_und:
return track_list
return []
return []
def print_track_list(track_list, file_name, track_type):
track_type = track_type.capitalize()
print_err("+ Video: '{0}'".format(file_name))
for track in track_list:
message_format = " + [{1}] {0} track: {2}"
print_err(message_format.format(track_type, track.index, track.title or ""))
print_err(indent_text(str(track), " + "))
def prompt_select_track(track_list, filtered_track_list, file_name, track_type):
print_err("Please select {0} track:".format(track_type))
print_track_list(filtered_track_list, file_name, track_type)
prompt_format = "Choose a {0} track # (type 'all' to view all choices): "
alt_prompt_format = "Choose a {0} track # (type 'none' for no track): "
if len(track_list) == len(filtered_track_list):
prompt_format = alt_prompt_format
while True:
print_err(prompt_format.format(track_type), end="")
try:
input_str = input().lower()
except KeyboardInterrupt:
print_err(flush=True)
raise
if input_str == "all":
print_track_list(track_list, file_name, track_type)
prompt_format = alt_prompt_format
continue
if input_str == "none":
return None
try:
track_index = int(input_str)
except ValueError:
print_err("Enter a valid number!")
continue
try:
return get_track_by_index(track_list, track_index)
except IndexError:
print_err("Enter a valid index!")
def prompt_overwrite_file(file_name):
print_err("The destination file already exists: '{0}'".format(file_name))
while True:
print_err("Do you want to overwrite it? (y/n): ", end="")
try:
input_str = input().lower()
except KeyboardInterrupt:
print_err(flush=True)
raise
if input_str == "y":
return True
elif input_str == "n":
return False
else:
print_err("Enter either 'y' or 'n'!")
def select_best_track(track_list, preferred_languages, manual_und,
file_name, track_type):
if len(track_list) == 0:
logging.info("No %s tracks found", track_type)
return None
filtered_tracks = filter_tracks_by_language(track_list,
preferred_languages, manual_und)
if filtered_tracks is None:
logging.info("Matched 'none' language, discarding %s track", track_type)
return None
if len(filtered_tracks) == 1:
track = filtered_tracks[0]
message_format = "Automatically selected %s track #%d with language '%s'"
logging.info(message_format, track_type, track.index, track.language_code)
return track
else:
if len(filtered_tracks) == 0:
filtered_tracks = track_list
message_format = "Failed to find any %s tracks that match language list: %s"
else:
message_format = "More than one %s track matches language list: %s"
logging.info(message_format, track_type, preferred_languages)
track = prompt_select_track(track_list, filtered_tracks, file_name, track_type)
if track:
message_format = "User selected %s track #%d with language '%s'"
logging.info(message_format, track_type, track.index, track.language_code)
else:
logging.info("User discarded %s track", track_type)
return track
def select_best_track_cached(selected_track_map, track_list,
preferred_languages, manual_und, file_name, track_type):
track_set = tuple(track_list)
try:
track = selected_track_map[track_set]
except KeyError:
track = select_best_track(track_list, preferred_languages,
manual_und, file_name, track_type)
selected_track_map[track_set] = track
else:
track_type = track_type.capitalize()
message_format = "%s track layout already seen, "
if track:
message_format += "selected #%d with language '%s'"
logging.debug(message_format, track_type, track.index, track.language_code)
else:
message_format += "no track selected"
logging.debug(message_format, track_type)
return track
def process_handbrake_output(process):
pattern1 = re.compile(r"Encoding: task \d+ of \d+, (\d+\.\d\d) %")
pattern2 = re.compile(
r"Encoding: task \d+ of \d+, (\d+\.\d\d) % "
r"\((\d+\.\d\d) fps, avg (\d+\.\d\d) fps, ETA (\d\dh\d\dm\d\ds)\)")
percent_complete = None
current_fps = None
average_fps = None
estimated_time = None
prev_message = ""
format_str = "Progress: {percent:.2f}% done"
long_format_str = format_str + " (FPS: {fps:.2f}, average FPS: {avg_fps:.2f}, ETA: {eta})"
try:
while True:
output = process.stdout.readline()
if len(output) == 0:
break
output = output.rstrip()
match = pattern1.match(output)
if not match:
continue
percent_complete = float(match.group(1))
match = pattern2.match(output)
if match:
format_str = long_format_str
current_fps = float(match.group(2))
average_fps = float(match.group(3))
estimated_time = match.group(4)
message = format_str.format(
percent=percent_complete,
fps=current_fps,
avg_fps=average_fps,
eta=estimated_time)
print_err(message, end="")
blank_count = max(len(prev_message) - len(message), 0)
print_err(" " * blank_count, end="\r")
prev_message = message
finally:
print_err(flush=True)
def run_handbrake(arg_list):
logging.debug("HandBrake args: '%s'", subprocess.list2cmdline(arg_list))
process = subprocess.Popen(
arg_list,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
try:
process_handbrake_output(process)
except:
process.kill()
process.wait()
raise
retcode = process.wait()
if retcode != 0:
raise subprocess.CalledProcessError(retcode, arg_list)
def get_handbrake_args(handbrake_path, input_path, output_path,
audio_track, subtitle_track, video_dimensions):
args = HANDBRAKE_ARGS.replace("\n", " ").strip().split()
args += ["-i", input_path]
args += ["-o", output_path]
if audio_track:
args += ["-a", str(audio_track.index)]
else:
args += ["-a", "none"]
if subtitle_track:
args += ["-s", str(subtitle_track.index)]
if video_dimensions != "auto":
args += ["-w", str(video_dimensions[0])]
args += ["-l", str(video_dimensions[1])]
return [handbrake_path] + args
def check_handbrake_executable(file_path):
if not os.path.isfile(file_path):
return False
message_format = "Found HandBrakeCLI binary at '%s'"
if not os.access(file_path, os.X_OK):
message_format += ", but it is not executable"
logging.warning(message_format, file_path)
return False
logging.info(message_format, file_path)
return True
def find_handbrake_executable_in_path(name):
if os.name == "nt" and not name.lower().endswith(".exe"):
name += ".exe"
path_env = os.environ.get("PATH", os.defpath)
path_env_split = path_env.split(os.pathsep)
path_env_split.insert(0, os.path.abspath(os.path.dirname(__file__)))
for dir_path in path_env_split:
file_path = os.path.join(dir_path, name)
if check_handbrake_executable(file_path):
return file_path
return None
def find_handbrake_executable():
name = HANDBRAKE_EXE
if os.path.dirname(name):
logging.info("Full path to HandBrakeCLI binary specified, ignoring PATH")
if check_handbrake_executable(name):
return name
else:
handbrake_path = find_handbrake_executable_in_path(name)
if handbrake_path:
return handbrake_path
logging.error("Could not find executable HandBrakeCLI binary")
return None
def check_output_path(args, output_path):
simp_output_path = get_simplified_path(args.output_dir, output_path)
if not os.path.exists(output_path):
return True
if os.path.isdir(output_path):
logging.error("Output path '%s' is a directory, skipping file", simp_output_path)
return False
if args.duplicate_action == "prompt":
return prompt_overwrite_file(simp_output_path)
elif args.duplicate_action == "skip":
logging.info("Destination file '%s' already exists, skipping", simp_output_path)
return False
elif args.duplicate_action == "overwrite":
logging.info("Destination file '%s' already exists, overwriting", simp_output_path)
return True
def filter_convertible_files(args, dir_path, file_names):
output_dir = get_output_dir(args.output_dir, args.input_dir, dir_path)
try:
try_create_directory(output_dir)
except OSError as e:
logging.error("Cannot create output directory: '%s'", output_dir)
return []
convertible_files = []
for file_name in file_names:
output_file_name = replace_extension(file_name, args.output_format)
output_path = os.path.join(output_dir, output_file_name)
if not check_output_path(args, output_path):
continue
convertible_files.append(file_name)
return convertible_files
def get_track_map(args, dir_path, file_names):
selected_audio_track_map = {}
selected_subtitle_track_map = {}
track_map = collections.OrderedDict()
for file_name in file_names:
logging.info("Scanning '%s'", file_name)
file_path = os.path.join(dir_path, file_name)
try:
audio_tracks, subtitle_tracks = get_track_info(
args.handbrake_path, file_path)
except subprocess.CalledProcessError as e:
logging.error("Error occurred while scanning '%s': %s", file_name, e)
continue
selected_audio_track = select_best_track_cached(
selected_audio_track_map, audio_tracks,
args.audio_languages, args.manual_und,
file_name, "audio")
selected_subtitle_track = select_best_track_cached(
selected_subtitle_track_map, subtitle_tracks,
args.subtitle_languages, args.manual_und,
file_name, "subtitle")
track_map[file_name] = TrackInfo(
selected_audio_track, selected_subtitle_track)
return track_map
def generate_batch(args, dir_path, file_names):
simp_dir_path = get_simplified_path(args.input_dir, dir_path)
logging.info("Scanning videos in '%s'", simp_dir_path)
convertible_files = filter_convertible_files(args, dir_path, file_names)
track_map = get_track_map(args, dir_path, convertible_files)
if len(track_map) == 0:
logging.warning("No videos in '%s' can be converted", simp_dir_path)
return None
return BatchInfo(dir_path, track_map)
def generate_batches(args):
dir_list = get_files_in_dir(args.input_dir, args.input_formats, args.recursive_search)
batch_list = []
found = False
for dir_path, file_names in dir_list:
found = True
batch = generate_batch(args, dir_path, file_names)
if batch:
batch_list.append(batch)
if not found:
message = "No videos found in input directory"
if not args.recursive_search:
message += ", for recursive search specify '-r'"
logging.info(message)
return batch_list
def execute_batch(args, batch):
output_dir = get_output_dir(args.output_dir, args.input_dir, batch.dir_path)
try_create_directory(output_dir)
for file_name, track_info in batch.track_map.items():
output_file_name = replace_extension(file_name, args.output_format)
input_path = os.path.join(batch.dir_path, file_name)
output_path = os.path.join(output_dir, output_file_name)
simp_input_path = get_simplified_path(args.input_dir, input_path)
handbrake_args = get_handbrake_args(args.handbrake_path,
input_path, output_path, track_info.audio_track,
track_info.subtitle_track, args.output_dimensions)
logging.info("Converting '%s'", simp_input_path)
try:
run_handbrake(handbrake_args)
except subprocess.CalledProcessError as e:
logging.error("Error occurred while converting '%s': %s", simp_input_path, e)
try_delete_file(output_path)
except:
logging.info("Conversion aborted, cleaning up temporary files")
try_delete_file(output_path)
raise
def sanitize_and_validate_args(args):
args.input_dir = os.path.abspath(args.input_dir)
if not args.output_dir:
args.output_dir = args.input_dir + DEFAULT_OUTPUT_SUFFIX
args.output_dir = os.path.abspath(args.output_dir)
if not os.path.exists(args.input_dir):
logging.error("Input directory does not exist: '%s'", args.input_dir)
return False
if os.path.isfile(args.input_dir):
logging.error("Input directory is a file: '%s'", args.input_dir)
return False
if not os.access(args.input_dir, os.R_OK | os.X_OK):
logging.error("Cannot read from input directory: '%s'", args.input_dir)
return False
if os.path.isfile(args.output_dir):
logging.error("Output directory is a file: '%s'", args.output_dir)
return False
if os.path.isdir(args.output_dir) and not os.access(args.output_dir, os.W_OK | os.X_OK):
logging.error("Cannot write to output directory: '%s'", args.output_dir)
return False
if args.input_dir == args.output_dir:
logging.error("Input and output directories are the same: '%s'", args.input_dir)
return False
if args.handbrake_path:
args.handbrake_path = os.path.abspath(args.handbrake_path)
if not os.path.isfile(args.handbrake_path):
logging.error("HandBrakeCLI binary not found: '%s'", args.handbrake_path)
return False
if not os.access(args.handbrake_path, os.X_OK):
logging.error("HandBrakeCLI binary is not executable: '%s'", args.handbrake_path)
return False
else:
args.handbrake_path = find_handbrake_executable()
if not args.handbrake_path:
return False
return True
def arg_error(message):
raise argparse.ArgumentTypeError(message)
def parse_output_dimensions(value):
value_lower = value.lower()
if value_lower == "auto":
return value_lower
if value_lower == "1080p":
return (1920, 1080)
if value_lower == "720p":
return (1280, 720)
match = re.match(r"^(\d+)x(\d+)$", value_lower)
if not match:
arg_error("Invalid video dimensions: " + repr(value))
width = int(match.group(1))
height = int(match.group(2))
return (width, height)
def parse_duplicate_action(value):
value_lower = value.lower()
if value_lower not in {"prompt", "skip", "overwrite"}:
arg_error("Invalid duplicate action: " + repr(value))
return value_lower
def parse_language_list(value):
language_list = value.split(",")
for language in language_list:
language = language.lower()
if language == "none":
continue
elif language == "und":
arg_error("Do not specify 'und' language, use '-u' flag instead")
elif not language.isalpha() or len(language) != 3:
arg_error("Invalid iso639-2 code: " + repr(language))
return language_list
def parse_logging_level(value):
level = getattr(logging, value.upper(), None)
if level is None:
arg_error("Invalid logging level: " + repr(value))
return level
def parse_input_formats(value):
format_list = value.split(",")
for input_format in format_list:
if input_format.startswith("."):
arg_error("Do not specify the leading '.' on input formats")
if not input_format.isalnum():
arg_error("Invalid input format: " + repr(input_format))
return format_list
def parse_output_format(value):
if value.startswith("."):
arg_error("Do not specify the leading '.' on output format")
if value.lower() not in {"mp4", "mkv", "m4v"}:
arg_error("Invalid output format (only mp4, mkv, and m4v are supported): " + repr(value))
return value
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_dir")
parser.add_argument("-o", "--output-dir")
parser.add_argument("-x", "--handbrake-path")
parser.add_argument("-r", "--recursive-search",
action="store_true", default=RECURSIVE_SEARCH)
parser.add_argument("-u", "--manual-und",
action="store_true", default=MANUAL_UND)
parser.add_argument("-i", "--input-formats",
type=parse_input_formats, default=INPUT_VIDEO_FORMATS)
parser.add_argument("-j", "--output-format",
type=parse_output_format, default=OUTPUT_VIDEO_FORMAT)
parser.add_argument("-l", "--logging-level",
type=parse_logging_level, default=LOGGING_LEVEL)
parser.add_argument("-w", "--duplicate-action",
type=parse_duplicate_action, default=DUPLICATE_ACTION)
parser.add_argument("-d", "--output-dimensions",
type=parse_output_dimensions, default=OUTPUT_DIMENSIONS)
parser.add_argument("-a", "--audio-languages",
type=parse_language_list, default=AUDIO_LANGUAGES)
parser.add_argument("-s", "--subtitle-languages",
type=parse_language_list, default=SUBTITLE_LANGUAGES)
return parser.parse_args()
def main():
args = parse_args()
logging.basicConfig(format=LOGGING_FORMAT, level=args.logging_level, stream=sys.stdout)
if not sanitize_and_validate_args(args):
return
batches = generate_batches(args)
for batch in batches:
execute_batch(args, batch)
logging.info("Done!")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| apsun/AniConvert | aniconvert.py | Python | mit | 34,380 |
from django import forms
from helios.payment.models import PaymentOption
class PaymentForm(forms.Form):
payment_option = forms.ModelChoiceField(
queryset=PaymentOption.objects.all(),
empty_label=None,
widget=forms.RadioSelect(attrs={
'class': 'order',
}),
)
| panosl/helios | helios/payment/forms.py | Python | bsd-3-clause | 313 |
class TestModule(object):
tests=lambda self: {'in': lambda l,v: v in l, '==': lambda a,b: a==b,
'inoreq': lambda l,v: v in l if type(l) is list else v==l,}
| thoto/ansible-role-strongswan | test_plugins/main.py | Python | gpl-3.0 | 173 |
from unittest import TestCase
from gpcook.modules.inspec import generate_inspec_tests
class TestInspec(TestCase):
# Compare output to example adml file
def test_inspec_stub(self):
self.assertTrue(generate_inspec_tests())
| rorychatt/GPCook | tests/modules/inspec_test.py | Python | mit | 240 |
# ext/associationproxy.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Contain the ``AssociationProxy`` class.
The ``AssociationProxy`` is a Python property object which provides
transparent proxied access to the endpoint of an association object.
See the example ``examples/association/proxied_association.py``.
"""
import itertools
import operator
import weakref
from sqlalchemy import exceptions
from sqlalchemy import orm
from sqlalchemy import util
from sqlalchemy.orm import collections
from sqlalchemy.sql import not_
def association_proxy(target_collection, attr, **kw):
"""Return a Python property implementing a view of a target
attribute which references an attribute on members of the
target.
Implements a read/write view over an instance's *target_collection*,
extracting *attr* from each member of the collection. The property acts
somewhat like this list comprehension::
[getattr(member, *attr*)
for member in getattr(instance, *target_collection*)]
Unlike the list comprehension, the collection returned by the property is
always in sync with *target_collection*, and mutations made to either
collection will be reflected in both.
The association proxy also works with scalar attributes, which in
turn reference scalar attributes or collections.
Implements a Python property representing a relationship as a collection of
simpler values, or a scalar value. The proxied property will mimic the collection type of
the target (list, dict or set), or, in the case of a one to one relationship,
a simple scalar value.
:param target_collection: Name of the relationship attribute we'll proxy to,
usually created with :func:`~sqlalchemy.orm.relationship`.
:param attr: Attribute on the associated instance or instances we'll proxy for.
For example, given a target collection of [obj1, obj2], a list created
by this proxy property would look like [getattr(obj1, *attr*),
getattr(obj2, *attr*)]
If the relationship is one-to-one or otherwise uselist=False, then simply:
getattr(obj, *attr*)
:param creator: optional.
When new items are added to this proxied collection, new instances of
the class collected by the target collection will be created. For list
and set collections, the target class constructor will be called with
the 'value' for the new instance. For dict types, two arguments are
passed: key and value.
If you want to construct instances differently, supply a *creator*
function that takes arguments as above and returns instances.
For scalar relationships, creator() will be called if the target is None.
If the target is present, set operations are proxied to setattr() on the
associated object.
If you have an associated object with multiple attributes, you may set
up multiple association proxies mapping to different attributes. See
the unit tests for examples, and for examples of how creator() functions
can be used to construct the scalar relationship on-demand in this
situation.
:param \*\*kw: Passes along any other keyword arguments to
:class:`.AssociationProxy`.
"""
return AssociationProxy(target_collection, attr, **kw)
class AssociationProxy(object):
"""A descriptor that presents a read/write view of an object attribute."""
def __init__(self, target_collection, attr, creator=None,
getset_factory=None, proxy_factory=None,
proxy_bulk_set=None):
"""Arguments are:
target_collection
Name of the collection we'll proxy to, usually created with
'relationship()' in a mapper setup.
attr
Attribute on the collected instances we'll proxy for. For example,
given a target collection of [obj1, obj2], a list created by this
proxy property would look like [getattr(obj1, attr), getattr(obj2,
attr)]
creator
Optional. When new items are added to this proxied collection, new
instances of the class collected by the target collection will be
created. For list and set collections, the target class constructor
will be called with the 'value' for the new instance. For dict
types, two arguments are passed: key and value.
If you want to construct instances differently, supply a 'creator'
function that takes arguments as above and returns instances.
getset_factory
Optional. Proxied attribute access is automatically handled by
routines that get and set values based on the `attr` argument for
this proxy.
If you would like to customize this behavior, you may supply a
`getset_factory` callable that produces a tuple of `getter` and
`setter` functions. The factory is called with two arguments, the
abstract type of the underlying collection and this proxy instance.
proxy_factory
Optional. The type of collection to emulate is determined by
sniffing the target collection. If your collection type can't be
determined by duck typing or you'd like to use a different
collection implementation, you may supply a factory function to
produce those collections. Only applicable to non-scalar relationships.
proxy_bulk_set
Optional, use with proxy_factory. See the _set() method for
details.
"""
self.target_collection = target_collection
self.value_attr = attr
self.creator = creator
self.getset_factory = getset_factory
self.proxy_factory = proxy_factory
self.proxy_bulk_set = proxy_bulk_set
self.owning_class = None
self.key = '_%s_%s_%s' % (
type(self).__name__, target_collection, id(self))
self.collection_class = None
def _get_property(self):
return (orm.class_mapper(self.owning_class).
get_property(self.target_collection))
@util.memoized_property
def target_class(self):
"""The class the proxy is attached to."""
return self._get_property().mapper.class_
@util.memoized_property
def scalar(self):
scalar = not self._get_property().uselist
if scalar:
self._initialize_scalar_accessors()
return scalar
@util.memoized_property
def _value_is_scalar(self):
return not self._get_property().\
mapper.get_property(self.value_attr).uselist
def __get__(self, obj, class_):
if self.owning_class is None:
self.owning_class = class_ and class_ or type(obj)
if obj is None:
return self
if self.scalar:
return self._scalar_get(getattr(obj, self.target_collection))
else:
try:
# If the owning instance is reborn (orm session resurrect,
# etc.), refresh the proxy cache.
creator_id, proxy = getattr(obj, self.key)
if id(obj) == creator_id:
return proxy
except AttributeError:
pass
proxy = self._new(_lazy_collection(obj, self.target_collection))
setattr(obj, self.key, (id(obj), proxy))
return proxy
def __set__(self, obj, values):
if self.owning_class is None:
self.owning_class = type(obj)
if self.scalar:
creator = self.creator and self.creator or self.target_class
target = getattr(obj, self.target_collection)
if target is None:
setattr(obj, self.target_collection, creator(values))
else:
self._scalar_set(target, values)
else:
proxy = self.__get__(obj, None)
if proxy is not values:
proxy.clear()
self._set(proxy, values)
def __delete__(self, obj):
if self.owning_class is None:
self.owning_class = type(obj)
delattr(obj, self.key)
def _initialize_scalar_accessors(self):
if self.getset_factory:
get, set = self.getset_factory(None, self)
else:
get, set = self._default_getset(None)
self._scalar_get, self._scalar_set = get, set
def _default_getset(self, collection_class):
attr = self.value_attr
getter = operator.attrgetter(attr)
if collection_class is dict:
setter = lambda o, k, v: setattr(o, attr, v)
else:
setter = lambda o, v: setattr(o, attr, v)
return getter, setter
def _new(self, lazy_collection):
creator = self.creator and self.creator or self.target_class
self.collection_class = util.duck_type_collection(lazy_collection())
if self.proxy_factory:
return self.proxy_factory(lazy_collection, creator, self.value_attr, self)
if self.getset_factory:
getter, setter = self.getset_factory(self.collection_class, self)
else:
getter, setter = self._default_getset(self.collection_class)
if self.collection_class is list:
return _AssociationList(lazy_collection, creator, getter, setter, self)
elif self.collection_class is dict:
return _AssociationDict(lazy_collection, creator, getter, setter, self)
elif self.collection_class is set:
return _AssociationSet(lazy_collection, creator, getter, setter, self)
else:
raise exceptions.ArgumentError(
'could not guess which interface to use for '
'collection_class "%s" backing "%s"; specify a '
'proxy_factory and proxy_bulk_set manually' %
(self.collection_class.__name__, self.target_collection))
def _inflate(self, proxy):
creator = self.creator and self.creator or self.target_class
if self.getset_factory:
getter, setter = self.getset_factory(self.collection_class, self)
else:
getter, setter = self._default_getset(self.collection_class)
proxy.creator = creator
proxy.getter = getter
proxy.setter = setter
def _set(self, proxy, values):
if self.proxy_bulk_set:
self.proxy_bulk_set(proxy, values)
elif self.collection_class is list:
proxy.extend(values)
elif self.collection_class is dict:
proxy.update(values)
elif self.collection_class is set:
proxy.update(values)
else:
raise exceptions.ArgumentError(
'no proxy_bulk_set supplied for custom '
'collection_class implementation')
@property
def _comparator(self):
return self._get_property().comparator
def any(self, criterion=None, **kwargs):
if self._value_is_scalar:
value_expr = getattr(self.target_class, self.value_attr).has(criterion, **kwargs)
else:
value_expr = getattr(self.target_class, self.value_attr).any(criterion, **kwargs)
# check _value_is_scalar here, otherwise
# we're scalar->scalar - call .any() so that
# the "can't call any() on a scalar" msg is raised.
if self.scalar and not self._value_is_scalar:
return self._comparator.has(
value_expr
)
else:
return self._comparator.any(
value_expr
)
def has(self, criterion=None, **kwargs):
return self._comparator.has(
getattr(self.target_class, self.value_attr).has(criterion, **kwargs)
)
def contains(self, obj):
if self.scalar and not self._value_is_scalar:
return self._comparator.has(
getattr(self.target_class, self.value_attr).contains(obj)
)
else:
return self._comparator.any(**{self.value_attr: obj})
def __eq__(self, obj):
return self._comparator.has(**{self.value_attr: obj})
def __ne__(self, obj):
return not_(self.__eq__(obj))
class _lazy_collection(object):
def __init__(self, obj, target):
self.ref = weakref.ref(obj)
self.target = target
def __call__(self):
obj = self.ref()
if obj is None:
raise exceptions.InvalidRequestError(
"stale association proxy, parent object has gone out of "
"scope")
return getattr(obj, self.target)
def __getstate__(self):
return {'obj':self.ref(), 'target':self.target}
def __setstate__(self, state):
self.ref = weakref.ref(state['obj'])
self.target = state['target']
class _AssociationCollection(object):
def __init__(self, lazy_collection, creator, getter, setter, parent):
"""Constructs an _AssociationCollection.
This will always be a subclass of either _AssociationList,
_AssociationSet, or _AssociationDict.
lazy_collection
A callable returning a list-based collection of entities (usually an
object attribute managed by a SQLAlchemy relationship())
creator
A function that creates new target entities. Given one parameter:
value. This assertion is assumed::
obj = creator(somevalue)
assert getter(obj) == somevalue
getter
A function. Given an associated object, return the 'value'.
setter
A function. Given an associated object and a value, store that
value on the object.
"""
self.lazy_collection = lazy_collection
self.creator = creator
self.getter = getter
self.setter = setter
self.parent = parent
col = property(lambda self: self.lazy_collection())
def __len__(self):
return len(self.col)
def __nonzero__(self):
return bool(self.col)
def __getstate__(self):
return {'parent':self.parent, 'lazy_collection':self.lazy_collection}
def __setstate__(self, state):
self.parent = state['parent']
self.lazy_collection = state['lazy_collection']
self.parent._inflate(self)
class _AssociationList(_AssociationCollection):
"""Generic, converting, list-to-list proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object):
return self.getter(object)
def _set(self, object, value):
return self.setter(object, value)
def __getitem__(self, index):
return self._get(self.col[index])
def __setitem__(self, index, value):
if not isinstance(index, slice):
self._set(self.col[index], value)
else:
if index.stop is None:
stop = len(self)
elif index.stop < 0:
stop = len(self) + index.stop
else:
stop = index.stop
step = index.step or 1
rng = range(index.start or 0, stop, step)
if step == 1:
for i in rng:
del self[index.start]
i = index.start
for item in value:
self.insert(i, item)
i += 1
else:
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s" % (len(value),
len(rng)))
for i, item in zip(rng, value):
self._set(self.col[i], item)
def __delitem__(self, index):
del self.col[index]
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __getslice__(self, start, end):
return [self._get(member) for member in self.col[start:end]]
def __setslice__(self, start, end, values):
members = [self._create(v) for v in values]
self.col[start:end] = members
def __delslice__(self, start, end):
del self.col[start:end]
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or
just use the underlying collection directly from its property
on the parent.
"""
for member in self.col:
yield self._get(member)
raise StopIteration
def append(self, value):
item = self._create(value)
self.col.append(item)
def count(self, value):
return sum([1 for _ in
itertools.ifilter(lambda v: v == value, iter(self))])
def extend(self, values):
for v in values:
self.append(v)
def insert(self, index, value):
self.col[index:index] = [self._create(value)]
def pop(self, index=-1):
return self.getter(self.col.pop(index))
def remove(self, value):
for i, val in enumerate(self):
if val == value:
del self.col[i]
return
raise ValueError("value not in list")
def reverse(self):
"""Not supported, use reversed(mylist)"""
raise NotImplementedError
def sort(self):
"""Not supported, use sorted(mylist)"""
raise NotImplementedError
def clear(self):
del self.col[0:len(self.col)]
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __lt__(self, other):
return list(self) < other
def __le__(self, other):
return list(self) <= other
def __gt__(self, other):
return list(self) > other
def __ge__(self, other):
return list(self) >= other
def __cmp__(self, other):
return cmp(list(self), other)
def __add__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return list(self) + other
def __radd__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return other + list(self)
def __mul__(self, n):
if not isinstance(n, int):
return NotImplemented
return list(self) * n
__rmul__ = __mul__
def __iadd__(self, iterable):
self.extend(iterable)
return self
def __imul__(self, n):
# unlike a regular list *=, proxied __imul__ will generate unique
# backing objects for each copy. *= on proxied lists is a bit of
# a stretch anyhow, and this interpretation of the __imul__ contract
# is more plausibly useful than copying the backing objects.
if not isinstance(n, int):
return NotImplemented
if n == 0:
self.clear()
elif n > 1:
self.extend(list(self) * (n - 1))
return self
def copy(self):
return list(self)
def __repr__(self):
return repr(list(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in locals().items():
if (util.callable(func) and func.func_name == func_name and
not func.__doc__ and hasattr(list, func_name)):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
_NotProvided = util.symbol('_NotProvided')
class _AssociationDict(_AssociationCollection):
"""Generic, converting, dict-to-dict proxy."""
def _create(self, key, value):
return self.creator(key, value)
def _get(self, object):
return self.getter(object)
def _set(self, object, key, value):
return self.setter(object, key, value)
def __getitem__(self, key):
return self._get(self.col[key])
def __setitem__(self, key, value):
if key in self.col:
self._set(self.col[key], key, value)
else:
self.col[key] = self._create(key, value)
def __delitem__(self, key):
del self.col[key]
def __contains__(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def has_key(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def __iter__(self):
return self.col.iterkeys()
def clear(self):
self.col.clear()
def __eq__(self, other):
return dict(self) == other
def __ne__(self, other):
return dict(self) != other
def __lt__(self, other):
return dict(self) < other
def __le__(self, other):
return dict(self) <= other
def __gt__(self, other):
return dict(self) > other
def __ge__(self, other):
return dict(self) >= other
def __cmp__(self, other):
return cmp(dict(self), other)
def __repr__(self):
return repr(dict(self.items()))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
if key not in self.col:
self.col[key] = self._create(key, default)
return default
else:
return self[key]
def keys(self):
return self.col.keys()
def iterkeys(self):
return self.col.iterkeys()
def values(self):
return [ self._get(member) for member in self.col.values() ]
def itervalues(self):
for key in self.col:
yield self._get(self.col[key])
raise StopIteration
def items(self):
return [(k, self._get(self.col[k])) for k in self]
def iteritems(self):
for key in self.col:
yield (key, self._get(self.col[key]))
raise StopIteration
def pop(self, key, default=_NotProvided):
if default is _NotProvided:
member = self.col.pop(key)
else:
member = self.col.pop(key, default)
return self._get(member)
def popitem(self):
item = self.col.popitem()
return (item[0], self._get(item[1]))
def update(self, *a, **kw):
if len(a) > 1:
raise TypeError('update expected at most 1 arguments, got %i' %
len(a))
elif len(a) == 1:
seq_or_map = a[0]
for item in seq_or_map:
if isinstance(item, tuple):
self[item[0]] = item[1]
else:
self[item] = seq_or_map[item]
for key, value in kw:
self[key] = value
def copy(self):
return dict(self.items())
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in locals().items():
if (util.callable(func) and func.func_name == func_name and
not func.__doc__ and hasattr(dict, func_name)):
func.__doc__ = getattr(dict, func_name).__doc__
del func_name, func
class _AssociationSet(_AssociationCollection):
"""Generic, converting, set-to-set proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object):
return self.getter(object)
def _set(self, object, value):
return self.setter(object, value)
def __len__(self):
return len(self.col)
def __nonzero__(self):
if self.col:
return True
else:
return False
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or just use
the underlying collection directly from its property on the parent.
"""
for member in self.col:
yield self._get(member)
raise StopIteration
def add(self, value):
if value not in self:
self.col.add(self._create(value))
# for discard and remove, choosing a more expensive check strategy rather
# than call self.creator()
def discard(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
break
def remove(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
return
raise KeyError(value)
def pop(self):
if not self.col:
raise KeyError('pop from an empty set')
member = self.col.pop()
return self._get(member)
def update(self, other):
for value in other:
self.add(value)
def __ior__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.add(value)
return self
def _set(self):
return set(iter(self))
def union(self, other):
return set(self).union(other)
__or__ = union
def difference(self, other):
return set(self).difference(other)
__sub__ = difference
def difference_update(self, other):
for value in other:
self.discard(value)
def __isub__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.discard(value)
return self
def intersection(self, other):
return set(self).intersection(other)
__and__ = intersection
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __iand__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def symmetric_difference(self, other):
return set(self).symmetric_difference(other)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __ixor__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def issubset(self, other):
return set(self).issubset(other)
def issuperset(self, other):
return set(self).issuperset(other)
def clear(self):
self.col.clear()
def copy(self):
return set(self)
def __eq__(self, other):
return set(self) == other
def __ne__(self, other):
return set(self) != other
def __lt__(self, other):
return set(self) < other
def __le__(self, other):
return set(self) <= other
def __gt__(self, other):
return set(self) > other
def __ge__(self, other):
return set(self) >= other
def __repr__(self):
return repr(set(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in locals().items():
if (util.callable(func) and func.func_name == func_name and
not func.__doc__ and hasattr(set, func_name)):
func.__doc__ = getattr(set, func_name).__doc__
del func_name, func
| eunchong/build | third_party/sqlalchemy_0_7_1/sqlalchemy/ext/associationproxy.py | Python | bsd-3-clause | 28,675 |
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
parse_iso8601,
)
class HuajiaoIE(InfoExtractor):
IE_DESC = '花椒直播'
_VALID_URL = r'https?://(?:www\.)?huajiao\.com/l/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.huajiao.com/l/38941232',
'md5': 'd08bf9ac98787d24d1e4c0283f2d372d',
'info_dict': {
'id': '38941232',
'ext': 'mp4',
'title': '#新人求关注#',
'description': 're:.*',
'duration': 2424.0,
'thumbnail': 're:^https?://.*\.jpg$',
'timestamp': 1475866459,
'upload_date': '20161007',
'uploader': 'Penny_余姿昀',
'uploader_id': '75206005',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
feed_json = self._search_regex(
r'var\s+feed\s*=\s*({.+})', webpage, 'feed json')
feed = self._parse_json(feed_json, video_id)
description = self._html_search_meta(
'description', webpage, 'description', fatal=False)
def get(section, field):
return feed.get(section, {}).get(field)
return {
'id': video_id,
'title': feed['feed']['formated_title'],
'description': description,
'duration': parse_duration(get('feed', 'duration')),
'thumbnail': get('feed', 'image'),
'timestamp': parse_iso8601(feed.get('creatime'), ' '),
'uploader': get('author', 'nickname'),
'uploader_id': get('author', 'uid'),
'formats': self._extract_m3u8_formats(
feed['feed']['m3u8'], video_id, 'mp4', 'm3u8_native'),
}
| TRox1972/youtube-dl | youtube_dl/extractor/huajiao.py | Python | unlicense | 1,849 |
# ptext module: place this in your import directory.
# ptext.draw(text, pos=None, **options)
# Please see README.md for explanation of options.
# https://github.com/cosmologicon/pygame-text
from __future__ import division
from math import ceil, sin, cos, radians, exp
import pygame
DEFAULT_FONT_SIZE = 24
REFERENCE_FONT_SIZE = 100
DEFAULT_LINE_HEIGHT = 1.0
DEFAULT_PARAGRAPH_SPACE = 0.0
DEFAULT_FONT_NAME = None
FONT_NAME_TEMPLATE = "%s"
DEFAULT_COLOR = "white"
DEFAULT_BACKGROUND = None
DEFAULT_SHADE = 0
DEFAULT_OUTLINE_COLOR = "black"
DEFAULT_SHADOW_COLOR = "black"
OUTLINE_UNIT = 1 / 24
SHADOW_UNIT = 1 / 18
DEFAULT_ALIGN = "left" # left, center, or right
DEFAULT_ANCHOR = 0, 0 # 0, 0 = top left ; 1, 1 = bottom right
DEFAULT_STRIP = True
ALPHA_RESOLUTION = 16
ANGLE_RESOLUTION_DEGREES = 3
AUTO_CLEAN = True
MEMORY_LIMIT_MB = 64
MEMORY_REDUCTION_FACTOR = 0.5
pygame.font.init()
_font_cache = {}
def getfont(fontname=None, fontsize=None, sysfontname=None,
bold=None, italic=None, underline=None):
if fontname is not None and sysfontname is not None:
raise ValueError("Can't set both fontname and sysfontname")
if fontname is None and sysfontname is None:
fontname = DEFAULT_FONT_NAME
if fontsize is None:
fontsize = DEFAULT_FONT_SIZE
key = fontname, fontsize, sysfontname, bold, italic, underline
if key in _font_cache:
return _font_cache[key]
if sysfontname is not None:
font = pygame.font.SysFont(sysfontname, fontsize, bold or False, italic or False)
else:
if fontname is not None:
fontname = FONT_NAME_TEMPLATE % fontname
try:
font = pygame.font.Font(fontname, fontsize)
except IOError:
raise IOError("unable to read font filename: %s" % fontname)
if bold is not None:
font.set_bold(bold)
if italic is not None:
font.set_italic(italic)
if underline is not None:
font.set_underline(underline)
_font_cache[key] = font
return font
def wrap(text, fontname=None, fontsize=None, sysfontname=None,
bold=None, italic=None, underline=None, width=None, widthem=None, strip=None):
if widthem is None:
font = getfont(fontname, fontsize, sysfontname, bold, italic, underline)
elif width is not None:
raise ValueError("Can't set both width and widthem")
else:
font = getfont(fontname, REFERENCE_FONT_SIZE, sysfontname, bold, italic, underline)
width = widthem * REFERENCE_FONT_SIZE
if strip is None:
strip = DEFAULT_STRIP
paras = text.replace("\t", " ").split("\n")
lines = []
for jpara, para in enumerate(paras):
if strip:
para = para.rstrip(" ")
if width is None:
lines.append((para, jpara))
continue
if not para:
lines.append(("", jpara))
continue
# Preserve leading spaces in all cases.
a = len(para) - len(para.lstrip(" "))
# At any time, a is the rightmost known index you can legally split a line. I.e. it's legal
# to add para[:a] to lines, and line is what will be added to lines if para is split at a.
a = para.index(" ", a) if " " in para else len(para)
line = para[:a]
while a + 1 < len(para):
# b is the next legal place to break the line, with bline the corresponding line to add.
if " " not in para[a + 1:]:
b = len(para)
bline = para
elif strip:
# Lines may be split at any space character that immediately follows a non-space
# character.
b = para.index(" ", a + 1)
while para[b - 1] == " ":
if " " in para[b + 1:]:
b = para.index(" ", b + 1)
else:
b = len(para)
break
bline = para[:b]
else:
# Lines may be split at any space character, or any character immediately following
# a space character.
b = a + 1 if para[a] == " " else para.index(" ", a + 1)
bline = para[:b]
if font.size(bline)[0] <= width:
a, line = b, bline
else:
lines.append((line, jpara))
para = para[a:].lstrip(" ") if strip else para[a:]
a = para.index(" ", 1) if " " in para[1:] else len(para)
line = para[:a]
if para:
lines.append((line, jpara))
return lines
_fit_cache = {}
def _fitsize(text, fontname, sysfontname, bold, italic, underline, width, height, lineheight, pspace, strip):
key = text, fontname, sysfontname, bold, italic, underline, width, height, lineheight, pspace, strip
if key in _fit_cache:
return _fit_cache[key]
def fits(fontsize):
texts = wrap(text, fontname, fontsize, sysfontname, bold, italic, underline, width, strip)
font = getfont(fontname, fontsize, sysfontname, bold, italic, underline)
w = max(font.size(line)[0] for line, jpara in texts)
linesize = font.get_linesize() * lineheight
paraspace = font.get_linesize() * pspace
h = int(round((len(texts) - 1) * linesize + texts[-1][1] * paraspace)) + font.get_height()
return w <= width and h <= height
a, b = 1, 256
if not fits(a):
fontsize = a
elif fits(b):
fontsize = b
else:
while b - a > 1:
c = (a + b) // 2
if fits(c):
a = c
else:
b = c
fontsize = a
_fit_cache[key] = fontsize
return fontsize
def _resolvecolor(color, default):
if color is None:
color = default
if color is None:
return None
try:
return tuple(pygame.Color(color))
except ValueError:
return tuple(color)
def _applyshade(color, shade):
f = exp(-0.4 * shade)
r, g, b = [
min(max(int(round((c + 50) * f - 50)), 0), 255)
for c in color[:3]
]
return (r, g, b) + tuple(color[3:])
def _resolvealpha(alpha):
if alpha >= 1:
return 1
return max(int(round(alpha * ALPHA_RESOLUTION)) / ALPHA_RESOLUTION, 0)
def _resolveangle(angle):
if not angle:
return 0
angle %= 360
return int(round(angle / ANGLE_RESOLUTION_DEGREES)) * ANGLE_RESOLUTION_DEGREES
# Return the set of points in the circle radius r, using Bresenham's circle algorithm
_circle_cache = {}
def _circlepoints(r):
r = int(round(r))
if r in _circle_cache:
return _circle_cache[r]
x, y, e = r, 0, 1 - r
_circle_cache[r] = points = []
while x >= y:
points.append((x, y))
y += 1
if e < 0:
e += 2 * y - 1
else:
x -= 1
e += 2 * (y - x) - 1
points += [(y, x) for x, y in points if x > y]
points += [(-x, y) for x, y in points if x]
points += [(x, -y) for x, y in points if y]
points.sort()
return points
_surf_cache = {}
_surf_tick_usage = {}
_surf_size_total = 0
_unrotated_size = {}
_tick = 0
def getsurf(text, fontname=None, fontsize=None, sysfontname=None, bold=None, italic=None,
underline=None, width=None, widthem=None, strip=None, color=None,
background=None, antialias=True, ocolor=None, owidth=None, scolor=None, shadow=None,
gcolor=None, shade=None, alpha=1.0, align=None, lineheight=None, pspace=None, angle=0,
cache=True):
global _tick, _surf_size_total
if fontname is None:
fontname = DEFAULT_FONT_NAME
if fontsize is None:
fontsize = DEFAULT_FONT_SIZE
fontsize = int(round(fontsize))
if align is None:
align = DEFAULT_ALIGN
if align in ["left", "center", "right"]:
align = [0, 0.5, 1][["left", "center", "right"].index(align)]
if lineheight is None:
lineheight = DEFAULT_LINE_HEIGHT
if pspace is None:
pspace = DEFAULT_PARAGRAPH_SPACE
color = _resolvecolor(color, DEFAULT_COLOR)
background = _resolvecolor(background, DEFAULT_BACKGROUND)
gcolor = _resolvecolor(gcolor, None)
if shade is None:
shade = DEFAULT_SHADE
if shade:
gcolor = _applyshade(gcolor or color, shade)
shade = 0
ocolor = None if owidth is None else _resolvecolor(ocolor, DEFAULT_OUTLINE_COLOR)
scolor = None if shadow is None else _resolvecolor(scolor, DEFAULT_SHADOW_COLOR)
opx = None if owidth is None else ceil(owidth * fontsize * OUTLINE_UNIT)
spx = None if shadow is None else tuple(ceil(s * fontsize * SHADOW_UNIT) for s in shadow)
alpha = _resolvealpha(alpha)
angle = _resolveangle(angle)
strip = DEFAULT_STRIP if strip is None else strip
key = (text, fontname, fontsize, sysfontname, bold, italic, underline, width, widthem, strip,
color, background, antialias, ocolor, opx, scolor, spx, gcolor, alpha, align, lineheight,
pspace, angle)
if key in _surf_cache:
_surf_tick_usage[key] = _tick
_tick += 1
return _surf_cache[key]
texts = wrap(text, fontname, fontsize, sysfontname, bold, italic, underline,
width=width, widthem=widthem, strip=strip)
if angle:
surf0 = getsurf(text, fontname, fontsize, sysfontname, bold, italic, underline,
width, widthem, strip, color, background, antialias,
ocolor, owidth, scolor, shadow, gcolor, 0, alpha, align, lineheight, pspace,
cache=cache)
if angle in (90, 180, 270):
surf = pygame.transform.rotate(surf0, angle)
else:
surf = pygame.transform.rotozoom(surf0, angle, 1.0)
_unrotated_size[(surf.get_size(), angle, text)] = surf0.get_size()
elif alpha < 1.0:
surf0 = getsurf(text, fontname, fontsize, sysfontname, bold, italic, underline,
width, widthem, strip, color, background, antialias,
ocolor, owidth, scolor, shadow, gcolor=gcolor, shade=0, align=align,
lineheight=lineheight, pspace=pspace, cache=cache)
surf = surf0.copy()
_surf = surf0.copy()
_surf.fill((255, 255, 255, int(alpha * 255.0)))
surf.blit(_surf, (0, 0), None, pygame.BLEND_RGBA_MULT)
del _surf
# array = pygame.surfarray.pixels_alpha(surf)
# array[:, :] = (array[:, :] * alpha).astype(array.dtype)
# del array
elif spx is not None:
surf0 = getsurf(text, fontname, fontsize, sysfontname, bold, italic, underline,
width, widthem, strip, color=color, background=(0, 0, 0, 0), antialias=antialias,
gcolor=gcolor, shade=0, align=align, lineheight=lineheight, pspace=pspace, cache=cache)
ssurf = getsurf(text, fontname, fontsize, sysfontname, bold, italic, underline,
width, widthem, strip, color=scolor, background=(0, 0, 0, 0), antialias=antialias,
align=align, lineheight=lineheight, pspace=pspace, cache=cache)
w0, h0 = surf0.get_size()
sx, sy = spx
surf = pygame.Surface((w0 + abs(sx), h0 + abs(sy))).convert_alpha()
surf.fill(background or (0, 0, 0, 0))
dx, dy = max(sx, 0), max(sy, 0)
surf.blit(ssurf, (dx, dy))
x0, y0 = abs(sx) - dx, abs(sy) - dy
if len(color) > 3 and color[3] == 0:
raise Exception("spx, color[3]==0")
# array = pygame.surfarray.pixels_alpha(surf)
# array0 = pygame.surfarray.pixels_alpha(surf0)
# array[x0:x0 + w0, y0:y0 + h0] -= array0.clip(max=array[x0:x0 + w0, y0:y0 + h0])
# del array, array0
pass
else:
surf.blit(surf0, (x0, y0))
elif opx is not None:
surf0 = getsurf(text, fontname, fontsize, sysfontname, bold, italic, underline,
width, widthem, strip, color=color, background=(0, 0, 0, 0), antialias=antialias,
gcolor=gcolor, shade=0, align=align, lineheight=lineheight, pspace=pspace, cache=cache)
osurf = getsurf(text, fontname, fontsize, sysfontname, bold, italic, underline,
width, widthem, strip, color=ocolor, background=(0, 0, 0, 0), antialias=antialias,
align=align, lineheight=lineheight, pspace=pspace, cache=cache)
w0, h0 = surf0.get_size()
surf = pygame.Surface((w0 + 2 * opx, h0 + 2 * opx)).convert_alpha()
surf.fill(background or (0, 0, 0, 0))
for dx, dy in _circlepoints(opx):
surf.blit(osurf, (dx + opx, dy + opx))
if len(color) > 3 and color[3] == 0:
# array = pygame.surfarray.pixels_alpha(surf)
# array0 = pygame.surfarray.pixels_alpha(surf0)
# array[opx:-opx, opx:-opx] -= array0.clip(max=array[opx:-opx, opx:-opx])
# del array, array0
# raise Exception("opx, color[3] == 0")
# _surf = surf0.copy()
# _surf.fill((0, 0, 0, 0))
# _surf.blit(surf, (0, 0), None, pygame.BLEND_RGBA_MAX)
# surf0.blit(_surf, (0, 0), None, pygame.BLEND_RGBA_MULT)
_surf = surf0.copy()
_surf.fill((0, 0, 0, 255), None, pygame.BLEND_RGBA_MULT)
surf.blit(_surf, (opx, opx), None, pygame.BLEND_RGBA_SUB)
del _surf
else:
surf.blit(surf0, (opx, opx))
else:
font = getfont(fontname, fontsize, sysfontname, bold, italic, underline)
# pygame.Font.render does not allow passing None as an argument value for background.
if background is None or (len(background) > 3 and background[3] == 0) or gcolor is not None:
lsurfs = [font.render(text, antialias, color).convert_alpha() for text, jpara in texts]
else:
lsurfs = [font.render(text, antialias, color, background).convert_alpha() for text, jpara in texts]
if gcolor is not None:
# import numpy
# m = numpy.clip(numpy.arange(lsurfs[0].get_height()) * 2.0 / font.get_ascent() - 1.0, 0, 1)
# for lsurf in lsurfs:
# array = pygame.surfarray.pixels3d(lsurf)
# for j in (0, 1, 2):
# array[:, :, j] = ((1.0 - m) * array[:, :, j] + m * gcolor[j]).astype(array.dtype)
# del array
_surf_height = lsurfs[0].get_height()
m = (_x * 2.0 / font.get_ascent() - 1.0 for _x in range(_surf_height))
m = [0 if _x < 0 else (1 if _x > 1 else _x) for _x in m]
for lsurf in lsurfs:
grad1 = pygame.Surface((1, _surf_height))
grad2 = pygame.Surface((1, _surf_height))
for idx, _m_val in enumerate(m):
_inv_m_val = 1.0 - _m_val
_color = (int(round(_inv_m_val * 255)),
int(round(_inv_m_val * 255)),
int(round(_inv_m_val * 255)))
grad1.set_at((0, idx), _color)
_color = (int(round(_m_val * gcolor[0])),
int(round(_m_val * gcolor[1])),
int(round(_m_val * gcolor[2])))
grad2.set_at((0, idx), _color)
grad1 = pygame.transform.scale(grad1, lsurf.get_size())
grad2 = pygame.transform.scale(grad2, lsurf.get_size())
lsurf.blit(grad1, (0, 0), None, pygame.BLEND_RGB_MULT)
lsurf.blit(grad2, (0, 0), None, pygame.BLEND_RGB_ADD)
del grad1
del grad2
if len(lsurfs) == 1 and gcolor is None:
surf = lsurfs[0]
else:
w = max(lsurf.get_width() for lsurf in lsurfs)
linesize = font.get_linesize() * lineheight
parasize = font.get_linesize() * pspace
ys = [int(round(k * linesize + jpara * parasize)) for k, (text, jpara) in enumerate(texts)]
h = ys[-1] + font.get_height()
surf = pygame.Surface((w, h)).convert_alpha()
surf.fill(background or (0, 0, 0, 0))
for y, lsurf in zip(ys, lsurfs):
x = int(round(align * (w - lsurf.get_width())))
surf.blit(lsurf, (x, y))
if cache:
w, h = surf.get_size()
_surf_size_total += 4 * w * h
_surf_cache[key] = surf
_surf_tick_usage[key] = _tick
_tick += 1
return surf
_default_surf_sentinel = ()
def draw(text, pos=None,
fontname=None, fontsize=None, sysfontname=None,
antialias=True, bold=None, italic=None, underline=None,
color=None, background=None,
top=None, left=None, bottom=None, right=None,
topleft=None, bottomleft=None, topright=None, bottomright=None,
midtop=None, midleft=None, midbottom=None, midright=None,
center=None, centerx=None, centery=None,
width=None, widthem=None, lineheight=None, pspace=None, strip=None,
align=None,
owidth=None, ocolor=None,
shadow=None, scolor=None,
gcolor=None, shade=None,
alpha=1.0,
anchor=None,
angle=0,
surf=_default_surf_sentinel,
cache=True):
if topleft:
left, top = topleft
if bottomleft:
left, bottom = bottomleft
if topright:
right, top = topright
if bottomright:
right, bottom = bottomright
if midtop:
centerx, top = midtop
if midleft:
left, centery = midleft
if midbottom:
centerx, bottom = midbottom
if midright:
right, centery = midright
if center:
centerx, centery = center
x, y = pos or (None, None)
hanchor, vanchor = anchor or (None, None)
if left is not None:
x, hanchor = left, 0
if centerx is not None:
x, hanchor = centerx, 0.5
if right is not None:
x, hanchor = right, 1
if top is not None:
y, vanchor = top, 0
if centery is not None:
y, vanchor = centery, 0.5
if bottom is not None:
y, vanchor = bottom, 1
if x is None:
raise ValueError("Unable to determine horizontal position")
if y is None:
raise ValueError("Unable to determine vertical position")
if align is None:
align = hanchor
if hanchor is None:
hanchor = DEFAULT_ANCHOR[0]
if vanchor is None:
vanchor = DEFAULT_ANCHOR[1]
tsurf = getsurf(text, fontname, fontsize, sysfontname, bold, italic, underline, width, widthem,
strip, color, background, antialias, ocolor, owidth, scolor, shadow, gcolor, shade, alpha,
align, lineheight, pspace, angle, cache)
angle = _resolveangle(angle)
if angle:
w0, h0 = _unrotated_size[(tsurf.get_size(), angle, text)]
S, C = sin(radians(angle)), cos(radians(angle))
dx, dy = (0.5 - hanchor) * w0, (0.5 - vanchor) * h0
x += dx * C + dy * S - 0.5 * tsurf.get_width()
y += -dx * S + dy * C - 0.5 * tsurf.get_height()
else:
x -= hanchor * tsurf.get_width()
y -= vanchor * tsurf.get_height()
x = int(round(x))
y = int(round(y))
if surf is _default_surf_sentinel:
surf = pygame.display.get_surface()
if surf is not None:
surf.blit(tsurf, (x, y))
if AUTO_CLEAN:
clean()
return tsurf, (x, y)
def drawbox(text, rect, fontname=None, sysfontname=None, lineheight=None, pspace=None, anchor=None,
bold=None, italic=None, underline=None, strip=None, **kwargs):
if fontname is None:
fontname = DEFAULT_FONT_NAME
if lineheight is None:
lineheight = DEFAULT_LINE_HEIGHT
if pspace is None:
pspace = DEFAULT_PARAGRAPH_SPACE
hanchor, vanchor = anchor = anchor or (0.5, 0.5)
rect = pygame.Rect(rect)
x = rect.x + hanchor * rect.width
y = rect.y + vanchor * rect.height
fontsize = _fitsize(text, fontname, sysfontname, bold, italic, underline,
rect.width, rect.height, lineheight, pspace, strip)
return draw(text, (x, y), fontname=fontname, fontsize=fontsize, lineheight=lineheight,
pspace=pspace, width=rect.width, strip=strip, anchor=anchor, **kwargs)
def clean():
global _surf_size_total
memory_limit = MEMORY_LIMIT_MB * (1 << 20)
if _surf_size_total < memory_limit:
return
memory_limit *= MEMORY_REDUCTION_FACTOR
keys = sorted(_surf_cache, key=_surf_tick_usage.get)
for key in keys:
w, h = _surf_cache[key].get_size()
del _surf_cache[key]
del _surf_tick_usage[key]
_surf_size_total -= 4 * w * h
if _surf_size_total < memory_limit:
break
| gentooza/Freedom-Fighters-of-Might-Magic | src/gamelib/gummworld2/pygametext.py | Python | gpl-3.0 | 21,402 |
import logging
from typing import Dict, List
import pandas as pd
import pytest
from great_expectations.core import (
ExpectationConfiguration,
ExpectationValidationResult,
)
from great_expectations.core.expectation_diagnostics.expectation_test_data_cases import (
ExpectationTestCase,
ExpectationTestDataCases,
TestBackend,
TestData,
)
from great_expectations.core.expectation_diagnostics.supporting_types import (
ExpectationExecutionEngineDiagnostics,
)
from great_expectations.exceptions import GreatExpectationsError
from great_expectations.execution_engine import ExecutionEngine
from great_expectations.expectations.metrics.util import column_reflection_fallback
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.types import RenderedStringTemplateContent
from great_expectations.self_check.util import build_sa_validator_with_data
from great_expectations.self_check.util import (
build_test_backends_list as build_test_backends_list_v3,
)
from great_expectations.self_check.util import (
generate_expectation_tests,
generate_test_table_name,
should_we_generate_this_test,
)
from great_expectations.validator.metric_configuration import MetricConfiguration
from great_expectations.validator.validator import Validator
logger = logging.getLogger(__name__)
try:
import sqlalchemy as sqlalchemy
from sqlalchemy import create_engine
# noinspection PyProtectedMember
from sqlalchemy.engine import Engine
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.sql import Select
except ImportError:
sqlalchemy = None
create_engine = None
Engine = None
Select = None
SQLAlchemyError = None
logger.debug("Unable to load SqlAlchemy or one of its subclasses.")
def get_table_columns_metric(engine: ExecutionEngine) -> [MetricConfiguration, dict]:
resolved_metrics: dict = {}
results: dict
table_column_types_metric: MetricConfiguration = MetricConfiguration(
metric_name="table.column_types",
metric_domain_kwargs=dict(),
metric_value_kwargs={
"include_nested": True,
},
metric_dependencies=None,
)
results = engine.resolve_metrics(metrics_to_resolve=(table_column_types_metric,))
resolved_metrics.update(results)
table_columns_metric: MetricConfiguration = MetricConfiguration(
metric_name="table.columns",
metric_domain_kwargs=dict(),
metric_value_kwargs=None,
metric_dependencies={
"table.column_types": table_column_types_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(table_columns_metric,), metrics=resolved_metrics
)
resolved_metrics.update(results)
return table_columns_metric, resolved_metrics
@pytest.fixture(scope="module")
def expectation_and_runtime_configuration_with_evaluation_parameters():
configuration = ExpectationConfiguration(
expectation_type="expect_column_min_to_be_between",
kwargs={
"column": "live",
"min_value": {"$PARAMETER": "MIN_VAL_PARAM"},
"max_value": {"$PARAMETER": "MAX_VAL_PARAM"},
"result_format": "SUMMARY",
},
meta={"BasicDatasetProfiler": {"confidence": "very low"}},
)
# runtime configuration with evaluation_parameters loaded
runtime_configuration_with_eval = {
"styling": {
"default": {"classes": ["badge", "badge-secondary"]},
"params": {"column": {"classes": ["badge", "badge-primary"]}},
},
"include_column_name": None,
"evaluation_parameters": {"MIN_VAL_PARAM": 15, "MAX_VAL_PARAM": 20},
}
return configuration, runtime_configuration_with_eval
def test_prescriptive_renderer_no_decorator(
expectation_and_runtime_configuration_with_evaluation_parameters,
):
(
configuration,
runtime_configuration_with_eval,
) = expectation_and_runtime_configuration_with_evaluation_parameters
def bare_bones_prescriptive_renderer(
configuration=None,
runtime_configuration=None,
):
runtime_configuration = runtime_configuration or {}
styling = runtime_configuration.get("styling")
params = configuration.kwargs
template_str = "$column minimum value must be greater than or equal to $min_value and less than or equal to $max_value"
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
res = bare_bones_prescriptive_renderer(
configuration=configuration,
runtime_configuration=runtime_configuration_with_eval,
)
assert len(res) == 1
# string template should remain constant
assert (
res[0].string_template["template"]
== "$column minimum value must be greater than or equal to $min_value and less than or equal to $max_value"
)
# params should contain our evaluation parameters
assert res[0].string_template["params"]["min_value"] == {
"$PARAMETER": "MIN_VAL_PARAM"
}
assert res[0].string_template["params"]["max_value"] == {
"$PARAMETER": "MAX_VAL_PARAM"
}
# full json dict comparison
assert res[0].to_json_dict() == {
"content_block_type": "string_template",
"string_template": {
"template": "$column minimum value must be greater than or equal to $min_value and less than or equal to $max_value",
"params": {
"column": "live",
"min_value": {"$PARAMETER": "MIN_VAL_PARAM"},
"max_value": {"$PARAMETER": "MAX_VAL_PARAM"},
"result_format": "SUMMARY",
},
"styling": {
"default": {"classes": ["badge", "badge-secondary"]},
"params": {"column": {"classes": ["badge", "badge-primary"]}},
},
},
}
def test_prescriptive_renderer_with_decorator(
expectation_and_runtime_configuration_with_evaluation_parameters,
):
(
configuration,
runtime_configuration_with_eval,
) = expectation_and_runtime_configuration_with_evaluation_parameters
@render_evaluation_parameter_string
def bare_bones_prescriptive_renderer(
configuration=None,
runtime_configuration=None,
):
runtime_configuration = runtime_configuration or {}
styling = runtime_configuration.get("styling")
params = configuration.kwargs
template_str = "$column minimum value must be greater than or equal to $min_value and less than or equal to $max_value"
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
res = bare_bones_prescriptive_renderer(
configuration=configuration,
runtime_configuration=runtime_configuration_with_eval,
)
assert len(res) == 3
# string template should remain constant
assert (
res[0].string_template["template"]
== "$column minimum value must be greater than or equal to $min_value and less than or equal to $max_value"
)
# params should contain our evaluation parameters
assert res[0].string_template["params"]["min_value"] == {
"$PARAMETER": "MIN_VAL_PARAM"
}
assert res[0].string_template["params"]["max_value"] == {
"$PARAMETER": "MAX_VAL_PARAM"
}
assert res[0].to_json_dict() == {
"content_block_type": "string_template",
"string_template": {
"template": "$column minimum value must be greater than or equal to $min_value and less than or equal to $max_value",
"params": {
"column": "live",
"min_value": {"$PARAMETER": "MIN_VAL_PARAM"},
"max_value": {"$PARAMETER": "MAX_VAL_PARAM"},
"result_format": "SUMMARY",
},
"styling": {
"default": {"classes": ["badge", "badge-secondary"]},
"params": {"column": {"classes": ["badge", "badge-primary"]}},
},
},
}
assert (
res[1].string_template["template"]
== "\n - $eval_param = $eval_param_value (at time of validation)."
)
# params should contain our evaluation parameters
assert res[1].string_template["params"]["eval_param"] == "MIN_VAL_PARAM"
assert res[1].string_template["params"]["eval_param_value"] == 15
assert res[1].to_json_dict() == {
"content_block_type": "string_template",
"string_template": {
"template": "\n - $eval_param = $eval_param_value (at time of validation).",
"params": {"eval_param": "MIN_VAL_PARAM", "eval_param_value": 15},
"styling": {
"default": {"classes": ["badge", "badge-secondary"]},
"params": {"column": {"classes": ["badge", "badge-primary"]}},
},
},
}
assert (
res[2].string_template["template"]
== "\n - $eval_param = $eval_param_value (at time of validation)."
)
# params should contain our evaluation parameters
assert res[2].string_template["params"]["eval_param"] == "MAX_VAL_PARAM"
assert res[2].string_template["params"]["eval_param_value"] == 20
assert res[2].to_json_dict() == {
"content_block_type": "string_template",
"string_template": {
"template": "\n - $eval_param = $eval_param_value (at time of validation).",
"params": {"eval_param": "MAX_VAL_PARAM", "eval_param_value": 20},
"styling": {
"default": {"classes": ["badge", "badge-secondary"]},
"params": {"column": {"classes": ["badge", "badge-primary"]}},
},
},
}
# with no runtime_configuration, throw an error
with pytest.raises(GreatExpectationsError):
res = bare_bones_prescriptive_renderer(
configuration=configuration, runtime_configuration={}
)
# configuration should always be of ExpectationConfiguration-type
with pytest.raises(AttributeError):
res = bare_bones_prescriptive_renderer(
configuration={}, runtime_configuration={}
)
# extra evaluation parameters will not have an effect
runtime_configuration_with_extra = {
"styling": {
"default": {"classes": ["badge", "badge-secondary"]},
"params": {"column": {"classes": ["badge", "badge-primary"]}},
},
"include_column_name": None,
"evaluation_parameters": {
"MIN_VAL_PARAM": 15,
"MAX_VAL_PARAM": 20,
"IAMEXTRA": "EXTRA",
},
}
res = bare_bones_prescriptive_renderer(
configuration=configuration,
runtime_configuration=runtime_configuration_with_extra,
)
assert len(res) == 3
# missing evaluation_parameters will not render (MAX_VAL_PARAM is missing)
runtime_configuration_with_missing = {
"styling": {
"default": {"classes": ["badge", "badge-secondary"]},
"params": {"column": {"classes": ["badge", "badge-primary"]}},
},
"include_column_name": None,
"evaluation_parameters": {"MIN_VAL_PARAM": 15},
}
res = bare_bones_prescriptive_renderer(
configuration=configuration,
runtime_configuration=runtime_configuration_with_missing,
)
assert len(res) == 2
def test_table_column_reflection_fallback(test_backends, sa):
include_sqlalchemy: bool = "sqlite" in test_backends
include_postgresql: bool = "postgresql" in test_backends
include_mysql: bool = "mysql" in test_backends
include_mssql: bool = "mssql" in test_backends
include_bigquery: bool = "bigquery" in test_backends
if not create_engine:
pytest.skip("Unable to import sqlalchemy.create_engine() -- skipping.")
test_backend_names: List[str] = build_test_backends_list_v3(
include_pandas=False,
include_spark=False,
include_sqlalchemy=include_sqlalchemy,
include_postgresql=include_postgresql,
include_mysql=include_mysql,
include_mssql=include_mssql,
include_bigquery=include_bigquery,
)
df: pd.DataFrame = pd.DataFrame(
{
"name": ["Frank", "Steve", "Jane", "Frank", "Michael"],
"age": [16, 21, 38, 22, 10],
"pet": ["fish", "python", "cat", "python", "frog"],
}
)
validators_config: Dict[str, Validator] = {}
validator: Validator
backend_name: str
table_name: str
for backend_name in test_backend_names:
if backend_name in ["sqlite", "postgresql", "mysql", "mssql"]:
table_name = generate_test_table_name()
validator = build_sa_validator_with_data(
df=df,
sa_engine_name=backend_name,
schemas=None,
caching=True,
table_name=table_name,
sqlite_db_path=None,
)
if validator is not None:
validators_config[table_name] = validator
engine: Engine
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
reflected_columns_list: List[Dict[str, str]]
reflected_column_config: Dict[str, str]
column_name: str
validation_result: ExpectationValidationResult
for table_name, validator in validators_config.items():
table_columns_metric, results = get_table_columns_metric(
engine=validator.execution_engine
)
metrics.update(results)
assert set(metrics[table_columns_metric.id]) == {"name", "age", "pet"}
selectable: Select = sqlalchemy.Table(
table_name,
sqlalchemy.MetaData(),
schema=None,
)
reflected_columns_list = column_reflection_fallback(
selectable=selectable,
dialect=validator.execution_engine.engine.dialect,
sqlalchemy_engine=validator.execution_engine.engine,
)
for column_name in [
reflected_column_config["name"]
for reflected_column_config in reflected_columns_list
]:
validation_result = validator.expect_column_to_exist(column=column_name)
assert validation_result.success
if validators_config:
validator = list(validators_config.values())[0]
validation_result = validator.expect_column_mean_to_be_between(
column="age", min_value=10
)
assert validation_result.success
validation_result = validator.expect_table_row_count_to_equal(value=5)
assert validation_result.success
validation_result = validator.expect_table_row_count_to_equal(value=3)
assert not validation_result.success
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy is not installed",
)
def test__generate_expectation_tests__with_test_backends():
expectation_type = "whatever"
data = TestData(stuff=[1, 2, 3, 4, 5])
test_case = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=[],
only_for=[],
)
test_backends = [
TestBackend(
backend="sqlalchemy",
dialects=["sqlite"],
),
]
test_data_cases = [
ExpectationTestDataCases(
data=data,
tests=[test_case],
test_backends=test_backends,
)
]
engines = ExpectationExecutionEngineDiagnostics(
PandasExecutionEngine=True,
SqlAlchemyExecutionEngine=True,
SparkDFExecutionEngine=False,
)
results = generate_expectation_tests(
expectation_type=expectation_type,
test_data_cases=test_data_cases,
execution_engine_diagnostics=engines,
raise_exceptions_for_backends=False,
)
backends_to_use = [r["backend"] for r in results]
assert backends_to_use == ["sqlite"]
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy is not installed",
)
def test__generate_expectation_tests__with_test_backends2():
expectation_type = "whatever"
data = TestData(stuff=[1, 2, 3, 4, 5])
test_case = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=[],
only_for=[],
)
test_backends = [
TestBackend(
backend="sqlalchemy",
dialects=["sqlite"],
),
TestBackend(
backend="pandas",
dialects=None,
),
]
test_data_cases = [
ExpectationTestDataCases(
data=data,
tests=[test_case],
test_backends=test_backends,
)
]
engines = ExpectationExecutionEngineDiagnostics(
PandasExecutionEngine=True,
SqlAlchemyExecutionEngine=True,
SparkDFExecutionEngine=False,
)
results = generate_expectation_tests(
expectation_type=expectation_type,
test_data_cases=test_data_cases,
execution_engine_diagnostics=engines,
raise_exceptions_for_backends=False,
)
backends_to_use = [r["backend"] for r in results]
assert sorted(backends_to_use) == ["pandas", "sqlite"]
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy is not installed",
)
def test__generate_expectation_tests__with_no_test_backends():
expectation_type = "whatever"
data = TestData(stuff=[1, 2, 3, 4, 5])
test_case = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=[],
only_for=[],
)
test_data_cases = [
ExpectationTestDataCases(
data=data,
tests=[test_case],
)
]
engines = ExpectationExecutionEngineDiagnostics(
PandasExecutionEngine=True,
SqlAlchemyExecutionEngine=True,
SparkDFExecutionEngine=False,
)
results = generate_expectation_tests(
expectation_type=expectation_type,
test_data_cases=test_data_cases,
execution_engine_diagnostics=engines,
raise_exceptions_for_backends=False,
)
backends_to_use = [r["backend"] for r in results]
# If another SQL backend is available wherever this test is being run, it will
# be included (i.e. postgresql)
assert "pandas" in backends_to_use
assert "sqlite" in backends_to_use
assert "spark" not in backends_to_use
def test__TestBackend__bad_backends():
with pytest.raises(AssertionError):
TestBackend(
backend="dogs",
dialects=None,
)
def test__TestBackend__bad_dialects():
with pytest.raises(AssertionError):
TestBackend(
backend="sqlalchemy",
dialects=None,
)
with pytest.raises(AssertionError):
TestBackend(
backend="sqlalchemy",
dialects=[],
)
with pytest.raises(AssertionError):
TestBackend(
backend="sqlalchemy",
dialects=["postgresql", "mysql", "ramen"],
)
with pytest.raises(AssertionError):
TestBackend(
backend="spark",
dialects=["sqlite"],
)
with pytest.raises(AssertionError):
TestBackend(
backend="pandas",
dialects=["sqlite"],
)
with pytest.raises(AssertionError):
TestBackend(
backend="sqlalchemy",
dialects="sqlite",
)
TestBackend(
backend="sqlalchemy",
dialects=["sqlite"],
)
def test__TestBackend__good_backends_and_dialects():
tb1 = TestBackend(
backend="pandas",
dialects=None,
)
tb2 = TestBackend(
backend="spark",
dialects=None,
)
tb3 = TestBackend(
backend="sqlalchemy",
dialects=["sqlite", "postgresql", "mysql"],
)
def test__should_we_generate_this_test__obvious():
test_case = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=[],
only_for=[],
)
backend = "spark"
assert should_we_generate_this_test(backend, test_case) == True
test_case2 = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=[],
only_for=["postgresql"],
)
backend2 = "sqlite"
assert should_we_generate_this_test(backend2, test_case2) == False
test_case3 = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=[],
only_for=["postgresql", "sqlite"],
)
backend3 = "sqlite"
assert should_we_generate_this_test(backend3, test_case3) == True
test_case4 = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=["sqlite"],
only_for=[],
)
backend4 = "sqlite"
assert should_we_generate_this_test(backend4, test_case4) == False
test_case5 = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=["postgresql", "mssql"],
only_for=["pandas", "sqlite"],
)
backend5 = "pandas"
assert should_we_generate_this_test(backend5, test_case5) == True
def test__should_we_generate_this_test__sqlalchemy():
test_case = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=[],
only_for=["sqlalchemy"],
)
backend = "mysql"
assert should_we_generate_this_test(backend, test_case) == True
test_case2 = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=[],
only_for=["sqlalchemy"],
)
backend2 = "postgresql"
assert should_we_generate_this_test(backend2, test_case2) == True
test_case3 = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=["mysql"],
only_for=["sqlalchemy"],
)
backend3 = "mysql"
assert should_we_generate_this_test(backend3, test_case3) == False
test_case4 = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=["sqlalchemy"],
only_for=[],
)
backend4 = "sqlite"
assert should_we_generate_this_test(backend4, test_case4) == False
test_case5 = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=["sqlalchemy"],
only_for=[],
)
backend5 = "spark"
assert should_we_generate_this_test(backend5, test_case5) == True
def test__should_we_generate_this_test__pandas():
"""
Our CI/CD runs tests against pandas versions 0.23.4, 0.25.3, and latest (1.x currently)
See: azure-pipelines.yml in project root
"""
major, minor, *_ = pd.__version__.split(".")
test_case = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=[],
only_for=[],
)
backend = "pandas"
assert should_we_generate_this_test(backend, test_case) == True
test_case2 = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=[],
only_for=["pandas", "spark"],
)
backend2 = "pandas"
assert should_we_generate_this_test(backend2, test_case2) == True
test_case3 = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=[],
only_for=["pandas>=024"],
)
backend3 = "pandas"
expected3 = False
if (major == "0" and int(minor) >= 24) or int(major) >= 1:
expected3 = True
assert should_we_generate_this_test(backend3, test_case3) == expected3
test_case4 = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=[],
only_for=["pandas_023"],
)
backend4 = "pandas"
expected4 = False
if major == "0" and minor == "23":
expected4 = True
assert should_we_generate_this_test(backend4, test_case4) == expected4
test_case5 = ExpectationTestCase(
title="",
input={},
output={},
exact_match_out=False,
include_in_gallery=False,
suppress_test_for=["pandas"],
only_for=[],
)
backend5 = "pandas"
assert should_we_generate_this_test(backend5, test_case5) == False
| great-expectations/great_expectations | tests/expectations/test_util.py | Python | apache-2.0 | 26,094 |
from typing import Type, Union
class MyClass:
pass
def expects_myclass_or_str1(x: Type[Union[MyClass, str]]):
pass
expects_myclass_or_str1(MyClass)
expects_myclass_or_str1(str)
expects_myclass_or_str1(<warning descr="Expected type 'Type[MyClass | str]', got 'Type[int]' instead">int</warning>)
expects_myclass_or_str1(<warning descr="Expected type 'Type[MyClass | str]', got 'int' instead">42</warning>)
def expects_myclass_or_str2(x: Union[Type[MyClass], Type[str]]):
pass
expects_myclass_or_str2(MyClass)
expects_myclass_or_str2(str)
expects_myclass_or_str2(<warning descr="Expected type 'Type[MyClass | str]', got 'Type[int]' instead">int</warning>)
expects_myclass_or_str2(<warning descr="Expected type 'Type[MyClass | str]', got 'int' instead">42</warning>)
| smmribeiro/intellij-community | python/testData/inspections/PyTypeCheckerInspection/ClassObjectTypeWithUnion.py | Python | apache-2.0 | 781 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import test_app.storages
class Migration(migrations.Migration):
dependencies = [
('test_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='testmodel',
name='a_file',
field=models.FileField(storage=test_app.storages.StubStorage1(), null=True, upload_to='a_file'),
),
migrations.AddField(
model_name='testmodel',
name='an_image',
field=models.ImageField(storage=test_app.storages.StubStorage2(), null=True, upload_to='an_image'),
),
]
| kezabelle/django-storagecellar | test_app/migrations/0002_auto_20151111_1408.py | Python | bsd-2-clause | 693 |
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the prioritisetransaction mining RPC."""
import time
from test_framework.messages import COIN, MAX_BLOCK_BASE_SIZE
from test_framework.test_framework import FujicoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts
class PrioritiseTransactionTest(FujicoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[
"-printpriority=1",
"-acceptnonstdtxn=1",
]] * self.num_nodes
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Test `prioritisetransaction` required parameters
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction)
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '')
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0)
# Test `prioritisetransaction` invalid extra parameters
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0, 0, 0)
# Test `prioritisetransaction` invalid `txid`
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].prioritisetransaction, txid='foo', fee_delta=0)
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000')", self.nodes[0].prioritisetransaction, txid='Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000', fee_delta=0)
# Test `prioritisetransaction` invalid `dummy`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-1, "JSON value is not a number as expected", self.nodes[0].prioritisetransaction, txid, 'foo', 0)
assert_raises_rpc_error(-8, "Priority is no longer supported, dummy argument to prioritisetransaction must be 0.", self.nodes[0].prioritisetransaction, txid, 1, 0)
# Test `prioritisetransaction` invalid `fee_delta`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].prioritisetransaction, txid=txid, fee_delta='foo')
self.txouts = gen_return_txouts()
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
utxo_count = 90
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
range_size = utxo_count // 3
for i in range(3):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
# more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert j in mempool
sizes[i] += mempool[j]['vsize']
assert sizes[i] > MAX_BLOCK_BASE_SIZE # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined
self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(3*base_fee*COIN))
self.nodes[0].generate(1)
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that prioritised transaction was mined")
assert txids[0][0] not in mempool
assert txids[0][1] in mempool
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert high_fee_tx is not None
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction so that it's now low fee).
self.nodes[0].prioritisetransaction(txid=high_fee_tx, fee_delta=-int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nodes[0].getrawmempool()
assert high_fee_tx in mempool
# Now verify the modified-high feerate transaction isn't mined before
# the other high fee transactions. Keep mining until our mempool has
# decreased by all the high fee size that we calculated above.
while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
self.nodes[0].generate(1)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that de-prioritised transaction is still in mempool")
assert high_fee_tx in mempool
for x in txids[2]:
if (x != high_fee_tx):
assert x not in mempool
# Create a free transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert len(utxo_list) > 0
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[0].getnewaddress()] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransactionwithwallet(raw_tx)["hex"]
tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"]
# This will raise an exception due to min relay fee not being met
assert_raises_rpc_error(-26, "min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex)
assert tx_id not in self.nodes[0].getrawmempool()
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000-byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=int(self.relayfee*COIN))
self.log.info("Assert that prioritised free transaction is accepted to mempool")
assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id)
assert tx_id in self.nodes[0].getrawmempool()
# Test that calling prioritisetransaction is sufficient to trigger
# getblocktemplate to (eventually) return a new block.
mock_time = int(time.time())
self.nodes[0].setmocktime(mock_time)
template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=-int(self.relayfee*COIN))
self.nodes[0].setmocktime(mock_time+10)
new_template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert template != new_template
if __name__ == '__main__':
PrioritiseTransactionTest().main()
| fujicoin/fujicoin | test/functional/mining_prioritisetransaction.py | Python | mit | 7,674 |
# -*- coding: utf-8 -*-
##
# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: nfvlabs@tid.es
##
'''
NFVO engine, implementing all the methods for the creation, deletion and management of vnfs, scenarios and instances
'''
__author__="Alfonso Tierno, Gerardo Garcia"
__date__ ="$16-sep-2014 22:05:01$"
import imp
#import json
import yaml
import os
from utils import auxiliary_functions as af
from nfvo_db import HTTP_Unauthorized, HTTP_Bad_Request, HTTP_Internal_Server_Error, HTTP_Not_Found,\
HTTP_Conflict
import console_proxy_thread as cli
global global_config
global vimconn_imported
vimconn_imported={} #dictionary with VIM type as key, loaded module as value
def get_flavorlist(mydb, vnf_id, nfvo_tenant=None):
'''Obtain flavorList
return result, content:
<0, error_text upon error
nb_records, flavor_list on success
'''
WHERE_dict={}
WHERE_dict['vnf_id'] = vnf_id
if nfvo_tenant is not None:
WHERE_dict['nfvo_tenant_id'] = nfvo_tenant
#result, content = mydb.get_table(FROM='vms join vnfs on vms.vnf_id = vnfs.uuid',SELECT=('uuid'),WHERE=WHERE_dict )
#result, content = mydb.get_table(FROM='vms',SELECT=('vim_flavor_id',),WHERE=WHERE_dict )
result, content = mydb.get_table(FROM='vms join flavors on vms.flavor_id=flavors.uuid',SELECT=('flavor_id',),WHERE=WHERE_dict )
if result < 0:
print "nfvo.get_flavorlist error %d %s" % (result, content)
return -result, content
print "get_flavor_list result:", result
print "get_flavor_list content:", content
flavorList=[]
for flavor in content:
flavorList.append(flavor['flavor_id'])
return result, flavorList
def get_imagelist(mydb, vnf_id, nfvo_tenant=None):
'''Obtain imageList
return result, content:
<0, error_text upon error
nb_records, flavor_list on success
'''
WHERE_dict={}
WHERE_dict['vnf_id'] = vnf_id
if nfvo_tenant is not None:
WHERE_dict['nfvo_tenant_id'] = nfvo_tenant
#result, content = mydb.get_table(FROM='vms join vnfs on vms-vnf_id = vnfs.uuid',SELECT=('uuid'),WHERE=WHERE_dict )
result, content = mydb.get_table(FROM='vms join images on vms.image_id=images.uuid',SELECT=('image_id',),WHERE=WHERE_dict )
if result < 0:
print "nfvo.get_imagelist error %d %s" % (result, content)
return -result, content
print "get_image_list result:", result
print "get_image_list content:", content
imageList=[]
for image in content:
imageList.append(image['image_id'])
return result, imageList
def get_vim(mydb, nfvo_tenant=None, datacenter_id=None, datacenter_name=None, vim_tenant=None):
'''Obtain a dictionary of VIM (datacenter) classes with some of the input parameters
return result, content:
<0, error_text upon error
NUMBER, dictionary with datacenter_id: vim_class with these keys:
'nfvo_tenant_id','datacenter_id','vim_tenant_id','vim_url','vim_url_admin','datacenter_name','type','user','passwd'
'''
WHERE_dict={}
if nfvo_tenant is not None: WHERE_dict['nfvo_tenant_id'] = nfvo_tenant
if datacenter_id is not None: WHERE_dict['d.uuid'] = datacenter_id
if datacenter_name is not None: WHERE_dict['d.name'] = datacenter_name
if vim_tenant is not None: WHERE_dict['vt.vim_tenant_id'] = vim_tenant
if nfvo_tenant or vim_tenant:
from_= 'tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join vim_tenants as vt on td.vim_tenant_id=vt.uuid'
select_ = ('type','config','d.uuid as datacenter_id', 'vim_url', 'vim_url_admin', 'd.name as datacenter_name',
'vt.uuid as vim_tenants_uuid','vt.vim_tenant_name as vim_tenant_name','vt.vim_tenant_id as vim_tenant_id',
'user','passwd')
else:
from_ = 'datacenters as d'
select_ = ('type','config','d.uuid as datacenter_id', 'vim_url', 'vim_url_admin', 'd.name as datacenter_name')
result, content = mydb.get_table(FROM=from_, SELECT=select_, WHERE=WHERE_dict )
if result < 0:
print "nfvo.get_vim error %d %s" % (result, content)
return result, content
elif result==0:
print "nfvo.get_vim not found a valid VIM with the input params " + str(WHERE_dict)
return -HTTP_Not_Found, "datacenter not found for " + str(WHERE_dict)
#print content
vim_dict={}
for vim in content:
extra={'vim_tenants_uuid': vim.get('vim_tenants_uuid')}
if vim["config"] != None:
extra.update(yaml.load(vim["config"]))
if vim["type"] not in vimconn_imported:
module_info=None
try:
module = "vimconn_" + vim["type"]
module_info = imp.find_module(module)
vim_conn = imp.load_module(vim["type"], *module_info)
vimconn_imported[vim["type"]] = vim_conn
except (IOError, ImportError) as e:
if module_info and module_info[0]:
file.close(module_info[0])
print "Cannot open VIM module '%s.py'; %s: %s" % ( module, type(e).__name__, str(e))
return -HTTP_Bad_Request, "Unknown vim type %s" % vim["type"]
try:
tenant=vim.get('vim_tenant_id')
if not tenant:
tenant=vim.get('vim_tenant_name')
#if not tenant:
# return -HTTP_Bad_Request, "You must provide a valid tenant name or uuid for VIM %s" % ( vim["type"])
vim_dict[ vim['datacenter_id'] ] = vimconn_imported[ vim["type"] ].vimconnector(
uuid=vim['datacenter_id'], name=vim['datacenter_name'],
tenant=tenant,
url=vim['vim_url'], url_admin=vim['vim_url_admin'],
user=vim.get('user'), passwd=vim.get('passwd'),
config=extra
)
except Exception as e:
return -HTTP_Internal_Server_Error, "Error at VIM %s; %s: %s" % ( vim["type"], type(e).__name__, str(e))
return len(vim_dict), vim_dict
def rollback(mydb, vims, rollback_list):
undeleted_items=[]
#delete things by reverse order
for i in range(len(rollback_list)-1, -1, -1):
item = rollback_list[i]
if item["where"]=="vim":
if item["vim_id"] not in vims:
continue
vim=vims[ item["vim_id"] ]
if item["what"]=="image":
result, message = vim.delete_tenant_image(item["uuid"])
if result < 0:
print "Error in rollback. Not possible to delete VIM image '%s'. Message: %s" % (item["uuid"],message)
undeleted_items.append("image %s from VIM %s" % (item["uuid"],vim["name"]))
else:
result, message = mydb.delete_row_by_dict(FROM="datacenters_images", WEHRE={"datacenter_id": vim["id"], "vim_id":item["uuid"]})
if result < 0:
print "Error in rollback. Not possible to delete image '%s' from DB.dacenters_images. Message: %s" % (item["uuid"],message)
elif item["what"]=="flavor":
result, message = vim.delete_tenant_flavor(item["uuid"])
if result < 0:
print "Error in rollback. Not possible to delete VIM flavor '%s'. Message: %s" % (item["uuid"],message)
undeleted_items.append("flavor %s from VIM %s" % (item["uuid"],vim["name"]))
else:
result, message = mydb.delete_row_by_dict(FROM="datacenters_flavos", WEHRE={"datacenter_id": vim["id"], "vim_id":item["uuid"]})
if result < 0:
print "Error in rollback. Not possible to delete flavor '%s' from DB.dacenters_flavors. Message: %s" % (item["uuid"],message)
elif item["what"]=="network":
result, message = vim.delete_tenant_network(item["uuid"])
if result < 0:
print "Error in rollback. Not possible to delete VIM network '%s'. Message: %s" % (item["uuid"],message)
undeleted_items.append("network %s from VIM %s" % (item["uuid"],vim["name"]))
elif item["what"]=="vm":
result, message = vim.delete_tenant_vminstance(item["uuid"])
if result < 0:
print "Error in rollback. Not possible to delete VIM VM '%s'. Message: %s" % (item["uuid"],message)
undeleted_items.append("VM %s from VIM %s" % (item["uuid"],vim["name"]))
else: # where==mano
if item["what"]=="image":
result, message = mydb.delete_row_by_dict(FROM="images", WEHRE={"uuid": item["uuid"]})
if result < 0:
print "Error in rollback. Not possible to delete image '%s' from DB.images. Message: %s" % (item["uuid"],message)
undeleted_items.append("image %s" % (item["uuid"]))
elif item["what"]=="flavor":
result, message = mydb.delete_row_by_dict(FROM="flavors", WEHRE={"uuid": item["uuid"]})
if result < 0:
print "Error in rollback. Not possible to delete flavor '%s' from DB.flavors. Message: %s" % (item["uuid"],message)
undeleted_items.append("flavor %s" % (item["uuid"]))
if len(undeleted_items)==0:
return True," Rollback successful."
else:
return False," Rollback fails to delete: " + str(undeleted_items)
def check_vnf_descriptor(vnf_descriptor):
global global_config
#create a dictionary with vnfc-name: vnfc:interface-list key:values pairs
vnfc_interfaces={}
for vnfc in vnf_descriptor["vnf"]["VNFC"]:
name_list = []
#dataplane interfaces
for numa in vnfc.get("numas",() ):
for interface in numa.get("interfaces",()):
if interface["name"] in name_list:
return -HTTP_Bad_Request, "Error at vnf:VNFC[name:'%s']:numas:interfaces:name, interface name '%s' already used in this VNFC" %(vnfc["name"], interface["name"])
name_list.append( interface["name"] )
#bridge interfaces
for interface in vnfc.get("bridge-ifaces",() ):
if interface["name"] in name_list:
return -HTTP_Bad_Request, "Error at vnf:VNFC[name:'%s']:bridge-ifaces:name, interface name '%s' already used in this VNFC" %(vnfc["name"], interface["name"])
name_list.append( interface["name"] )
vnfc_interfaces[ vnfc["name"] ] = name_list
#check if the info in external_connections matches with the one in the vnfcs
name_list=[]
for external_connection in vnf_descriptor["vnf"].get("external-connections",() ):
if external_connection["name"] in name_list:
return -HTTP_Bad_Request, "Error at vnf:external-connections:name, value '%s' already used as an external-connection" %(external_connection["name"])
name_list.append(external_connection["name"])
if external_connection["VNFC"] not in vnfc_interfaces:
return -HTTP_Bad_Request, "Error at vnf:external-connections[name:'%s']:VNFC, value '%s' does not match any VNFC" %(external_connection["name"], external_connection["VNFC"])
if external_connection["local_iface_name"] not in vnfc_interfaces[ external_connection["VNFC"] ]:
return -HTTP_Bad_Request, "Error at vnf:external-connections[name:'%s']:local_iface_name, value '%s' does not match any interface of this VNFC" %(external_connection["name"], external_connection["local_iface_name"])
#check if the info in internal_connections matches with the one in the vnfcs
name_list=[]
for internal_connection in vnf_descriptor["vnf"].get("internal-connections",() ):
if internal_connection["name"] in name_list:
return -HTTP_Bad_Request, "Error at vnf:internal-connections:name, value '%s' already used as an internal-connection" %(internal_connection["name"])
name_list.append(internal_connection["name"])
#We should check that internal-connections of type "ptp" have only 2 elements
if len(internal_connection["elements"])>2 and internal_connection["type"] == "ptp":
return -HTTP_Bad_Request, "Error at vnf:internal-connections[name:'%s']:elements, size must be 2 for a type:'ptp'" %(internal_connection["name"])
for port in internal_connection["elements"]:
if port["VNFC"] not in vnfc_interfaces:
return -HTTP_Bad_Request, "Error at vnf:internal-connections[name:'%s']:elements[]:VNFC, value '%s' does not match any VNFC" %(internal_connection["name"], port["VNFC"])
if port["local_iface_name"] not in vnfc_interfaces[ port["VNFC"] ]:
return -HTTP_Bad_Request, "Error at vnf:internal-connections[name:'%s']:elements[]:local_iface_name, value '%s' does not match any interface of this VNFC" %(internal_connection["name"], port["local_iface_name"])
#check if the path where we should store the YAML file already exists. In that case, we should return error.
vnf_filename=global_config['vnf_repository'] + "/" +vnf_descriptor['vnf']['name'] + ".vnfd"
if os.path.exists(vnf_filename):
print "WARNING: The VNF descriptor already exists in the VNF repository"
return 200, None
def create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False):
#look if image exist
if only_create_at_vim:
image_mano_id = image_dict['uuid']
else:
res,content = mydb.get_table(FROM="images", WHERE={'location':image_dict['location'], 'metadata':image_dict['metadata']})
if res>=1:
image_mano_id = content[0]['uuid']
elif res<0:
return res, content
else:
#create image
temp_image_dict={'name':image_dict['name'], 'description':image_dict.get('description',None),
'location':image_dict['location'], 'metadata':image_dict.get('metadata',None)
}
res,content = mydb.new_row('images', temp_image_dict, tenant_id=None, add_uuid=True)
if res>0:
image_mano_id= content
rollback_list.append({"where":"mano", "what":"image","uuid":image_mano_id})
else:
return res if res<0 else -1, content
#create image at every vim
for vim_id,vim in vims.iteritems():
image_created="false"
#look at database
res_db,image_db = mydb.get_table(FROM="datacenters_images", WHERE={'datacenter_id':vim_id, 'image_id':image_mano_id})
if res_db<0:
return res_db, image_db
#look at VIM if this image exist
res_vim, image_vim_id = vim.get_image_id_from_path(image_dict['location'])
if res_vim < 0:
print "Error contacting VIM to know if the image %s existed previously." %image_vim_id
continue
elif res_vim==0:
#Create the image in VIM
result, image_vim_id = vim.new_tenant_image(image_dict)
if result < 0:
print "Error creating image at VIM: %s." %image_vim_id
continue
else:
rollback_list.append({"where":"vim", "vim_id": vim_id, "what":"image","uuid":image_vim_id})
image_created="true"
#if reach here the image has been create or exist
if res_db==0:
#add new vim_id at datacenters_images
mydb.new_row('datacenters_images', {'datacenter_id':vim_id, 'image_id':image_mano_id, 'vim_id': image_vim_id, 'created':image_created})
elif image_db[0]["vim_id"]!=image_vim_id:
#modify existing vim_id at datacenters_images
mydb.update_rows('datacenters_images', UPDATE={'vim_id':image_vim_id}, WHERE={'datacenter_id':vim_id, 'image_id':image_mano_id})
return 1, image_vim_id if only_create_at_vim else image_mano_id
def create_or_use_flavor(mydb, vims, flavor_dict, rollback_list, only_create_at_vim=False):
temp_flavor_dict= {'disk':flavor_dict.get('disk',1),
'ram':flavor_dict.get('ram'),
'vcpus':flavor_dict.get('vcpus'),
}
if 'extended' in flavor_dict and flavor_dict['extended']==None:
del flavor_dict['extended']
if 'extended' in flavor_dict:
temp_flavor_dict['extended']=yaml.safe_dump(flavor_dict['extended'],default_flow_style=True,width=256)
#look if flavor exist
if only_create_at_vim:
flavor_mano_id = flavor_dict['uuid']
else:
res,content = mydb.get_table(FROM="flavors", WHERE=temp_flavor_dict)
if res>=1:
flavor_mano_id = content[0]['uuid']
elif res<0:
return res, content
else:
#create flavor
#create one by one the images of aditional disks
dev_image_list=[] #list of images
if 'extended' in flavor_dict and flavor_dict['extended']!=None:
dev_nb=0
for device in flavor_dict['extended'].get('devices',[]):
if "image" not in device:
continue
image_dict={'location':device['image'], 'name':flavor_dict['name']+str(dev_nb)+"-img", 'description':flavor_dict.get('description')}
image_metadata_dict = device.get('image metadata', None)
image_metadata_str = None
if image_metadata_dict != None:
image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
image_dict['metadata']=image_metadata_str
res, image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
if res < 0:
return res, image_id + rollback(mydb, vims, rollback_list)[1]
print "Additional disk image id for VNFC %s: %s" % (flavor_dict['name']+str(dev_nb)+"-img", image_id)
dev_image_list.append(image_id)
dev_nb += 1
temp_flavor_dict['name'] = flavor_dict['name']
temp_flavor_dict['description'] = flavor_dict.get('description',None)
res,content = mydb.new_row('flavors', temp_flavor_dict, tenant_id=None, add_uuid=True)
if res>0:
flavor_mano_id= content
rollback_list.append({"where":"mano", "what":"flavor","uuid":flavor_mano_id})
else:
return res if res<0 else -1, content
#create flavor at every vim
if 'uuid' in flavor_dict:
del flavor_dict['uuid']
flavor_vim_id=None
for vim_id,vim in vims.items():
flavor_created="false"
#look at database
res_db,flavor_db = mydb.get_table(FROM="datacenters_flavors", WHERE={'datacenter_id':vim_id, 'flavor_id':flavor_mano_id})
if res_db<0:
return res_db, flavor_db
#look at VIM if this flavor exist SKIPPED
#res_vim, flavor_vim_id = vim.get_flavor_id_from_path(flavor_dict['location'])
#if res_vim < 0:
# print "Error contacting VIM to know if the flavor %s existed previously." %flavor_vim_id
# continue
#elif res_vim==0:
#Create the flavor in VIM
#Translate images at devices from MANO id to VIM id
error=False
if 'extended' in flavor_dict and flavor_dict['extended']!=None and "devices" in flavor_dict['extended']:
#make a copy of original devices
devices_original=[]
for device in flavor_dict["extended"].get("devices",[]):
dev={}
dev.update(device)
devices_original.append(dev)
if 'image' in device:
del device['image']
if 'image metadata' in device:
del device['image metadata']
dev_nb=0
for index in range(0,len(devices_original)) :
device=devices_original[index]
if "image" not in device:
continue
image_dict={'location':device['image'], 'name':flavor_dict['name']+str(dev_nb)+"-img", 'description':flavor_dict.get('description')}
image_metadata_dict = device.get('image metadata', None)
image_metadata_str = None
if image_metadata_dict != None:
image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
image_dict['metadata']=image_metadata_str
r,image_mano_id=create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False)
if r<0:
print "Error creating device image for flavor: %s." %image_mano_id
error=True
break
image_dict["uuid"]=image_mano_id
r,image_vim_id=create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=True)
if r<0:
print "Error creating device image for flavor at VIM: %s." %image_vim_id
error=True
break
flavor_dict["extended"]["devices"][index]['imageRef']=image_vim_id
dev_nb += 1
if error:
continue
if res_db>0:
#check that this vim_id exist in VIM, if not create
flavor_vim_id=flavor_db[0]["vim_id"]
result, _ = vim.get_tenant_flavor(flavor_vim_id)
if result>=0: #flavor exist
continue
#create flavor at vim
print "nfvo.create_or_use_flavor() adding flavor to VIM %s" % vim["name"]
result, flavor_vim_id = vim.new_tenant_flavor(flavor_dict)
if result < 0:
print "Error creating flavor at VIM %s: %s." %(vim["name"], flavor_vim_id)
continue
else:
rollback_list.append({"where":"vim", "vim_id": vim_id, "what":"flavor","uuid":flavor_vim_id})
flavor_created="true"
#if reach here the flavor has been create or exist
if res_db==0:
#add new vim_id at datacenters_flavors
mydb.new_row('datacenters_flavors', {'datacenter_id':vim_id, 'flavor_id':flavor_mano_id, 'vim_id': flavor_vim_id, 'created':flavor_created})
elif flavor_db[0]["vim_id"]!=flavor_vim_id:
#modify existing vim_id at datacenters_flavors
mydb.update_rows('datacenters_flavors', UPDATE={'vim_id':flavor_vim_id}, WHERE={'datacenter_id':vim_id, 'flavor_id':flavor_mano_id})
return 1, flavor_vim_id if only_create_at_vim else flavor_mano_id
def new_vnf(mydb,nfvo_tenant,vnf_descriptor,public=True,physical=False,datacenter=None,vim_tenant=None):
global global_config
# With future versions of the VNFD, different code might be applied for each version.
# Depending on the new structure of the VNFD (identified by version in vnf_descriptor), we should have separate code for each version, or integrated code with small changes.
# Step 1. Check the VNF descriptor
result, message = check_vnf_descriptor(vnf_descriptor)
if result < 0:
print "new_vnf error: %s" %message
return result, message
print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
# Step 2. Get the URL of the VIM from the nfvo_tenant and the datacenter
result, vims = get_vim(mydb, nfvo_tenant, datacenter, None, vim_tenant)
if result < 0:
print "nfvo.new_vnf() error. Datacenter not found"
return result, vims
# Step 4. Review the descriptor and add missing fields
#print vnf_descriptor
print "Refactoring VNF descriptor with fields: description, physical (default: false), public (default: true)"
vnf_name = vnf_descriptor['vnf']['name']
vnf_descriptor['vnf']['description'] = vnf_descriptor['vnf'].get("description", vnf_name)
vnf_descriptor['vnf']['physical'] = vnf_descriptor['vnf'].get("physical", False)
vnf_descriptor['vnf']['public'] = vnf_descriptor['vnf'].get("public", True)
print vnf_descriptor
#TODO:
#If VNF is public, we should take it into account when creating images and flavors
# Step 5. Check internal connections
# TODO: to be moved to step 1????
internal_connections=vnf_descriptor['vnf'].get('internal_connections',[])
for ic in internal_connections:
if len(ic['elements'])>2 and ic['type']=='ptp':
return -HTTP_Bad_Request, "Mismatch 'type':'ptp' with %d elements at 'vnf':'internal-conections'['name':'%s']. Change 'type' to 'data'" %(len(ic), ic['name'])
elif len(ic['elements'])==2 and ic['type']=='data':
return -HTTP_Bad_Request, "Mismatch 'type':'data' with 2 elements at 'vnf':'internal-conections'['name':'%s']. Change 'type' to 'ptp'" %(ic['name'])
# Step 6. For each VNFC in the descriptor, flavors and images are created in the VIM
print 'BEGIN creation of VNF "%s"' % vnf_name
print "VNF %s: consisting of %d VNFC(s)" % (vnf_name,len(vnf_descriptor['vnf']['VNFC']))
#For each VNFC, we add it to the VNFCDict and we create a flavor.
VNFCDict = {} # Dictionary, key: VNFC name, value: dict with the relevant information to create the VNF and VMs in the MANO database
rollback_list = [] # It will contain the new images created in mano. It is used for rollback
try:
print "Creating additional disk images and new flavors in the VIM for each VNFC"
for vnfc in vnf_descriptor['vnf']['VNFC']:
VNFCitem={}
VNFCitem["name"] = vnfc['name']
VNFCitem["description"] = vnfc.get("description", 'VM %s of the VNF %s' %(vnfc['name'],vnf_name))
print "Flavor name: %s. Description: %s" % (VNFCitem["name"]+"-flv", VNFCitem["description"])
myflavorDict = {}
myflavorDict["name"] = vnfc['name']+"-flv"
myflavorDict["description"] = VNFCitem["description"]
myflavorDict["ram"] = vnfc.get("ram", 0)
myflavorDict["vcpus"] = vnfc.get("vcpus", 0)
myflavorDict["disk"] = vnfc.get("disk", 1)
myflavorDict["extended"] = {}
devices = vnfc.get("devices")
if devices != None:
myflavorDict["extended"]["devices"] = devices
# TODO:
# Mapping from processor models to rankings should be available somehow in the NFVO. They could be taken from VIM or directly from a new database table
# Another option is that the processor in the VNF descriptor specifies directly the ranking of the host
# Previous code has been commented
#if vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" :
# myflavorDict["flavor"]['extended']['processor_ranking'] = 200
#elif vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz" :
# myflavorDict["flavor"]['extended']['processor_ranking'] = 300
#else:
# result2, message = rollback(myvim, myvimURL, myvim_tenant, flavorList, imageList)
# if result2:
# print "Error creating flavor: unknown processor model. Rollback successful."
# return -HTTP_Bad_Request, "Error creating flavor: unknown processor model. Rollback successful."
# else:
# return -HTTP_Bad_Request, "Error creating flavor: unknown processor model. Rollback fail: you need to access VIM and delete the following %s" % message
myflavorDict['extended']['processor_ranking'] = 100 #Hardcoded value, while we decide when the mapping is done
if 'numas' in vnfc and len(vnfc['numas'])>0:
myflavorDict['extended']['numas'] = vnfc['numas']
#print myflavorDict
# Step 6.2 New flavors are created in the VIM
res, flavor_id = create_or_use_flavor(mydb, vims, myflavorDict, rollback_list)
if res < 0:
return res, flavor_id + rollback(mydb, vims, rollback_list)[1]
print "Flavor id for VNFC %s: %s" % (vnfc['name'],flavor_id)
VNFCitem["flavor_id"] = flavor_id
VNFCDict[vnfc['name']] = VNFCitem
print "Creating new images in the VIM for each VNFC"
# Step 6.3 New images are created in the VIM
#For each VNFC, we must create the appropriate image.
#This "for" loop might be integrated with the previous one
#In case this integration is made, the VNFCDict might become a VNFClist.
for vnfc in vnf_descriptor['vnf']['VNFC']:
print "Image name: %s. Description: %s" % (vnfc['name']+"-img", VNFCDict[vnfc['name']]['description'])
image_dict={'location':vnfc['VNFC image'], 'name':vnfc['name']+"-img", 'description':VNFCDict[vnfc['name']]['description']}
image_metadata_dict = vnfc.get('image metadata', None)
image_metadata_str = None
if image_metadata_dict is not None:
image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
image_dict['metadata']=image_metadata_str
#print "create_or_use_image", mydb, vims, image_dict, rollback_list
res, image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
if res < 0:
return res, image_id + rollback(mydb, vims, rollback_list)[1]
print "Image id for VNFC %s: %s" % (vnfc['name'],image_id)
VNFCDict[vnfc['name']]["image_id"] = image_id
VNFCDict[vnfc['name']]["image_path"] = vnfc['VNFC image']
except KeyError as e:
print "Error while creating a VNF. KeyError: " + str(e)
_, message = rollback(mydb, vims, rollback_list)
return -HTTP_Internal_Server_Error, "Error while creating a VNF. KeyError " + str(e) + "." + message
# Step 7. Storing the VNF in the repository
print "Storing YAML file of the VNF"
vnf_descriptor_filename = global_config['vnf_repository'] + "/" + vnf_name + ".vnfd"
if not os.path.exists(vnf_descriptor_filename):
f = file(vnf_descriptor_filename, "w")
yaml.safe_dump(vnf_descriptor, stream=f, indent=4, explicit_start=True, default_flow_style=False)
f.close()
# Step 8. Adding the VNF to the NFVO DB
try:
result, vnf_id = mydb.new_vnf_as_a_whole(nfvo_tenant,vnf_name,vnf_descriptor_filename,vnf_descriptor,VNFCDict)
except KeyError as e:
print "Error while creating a VNF. KeyError: " + str(e)
_, message = rollback(mydb, vims, rollback_list)
return -HTTP_Internal_Server_Error, "Error while creating a VNF. KeyError " + str(e) + "." + message
if result < 0:
_, message = rollback(mydb, vims, rollback_list)
return result, vnf_id + "." + message
return 200,vnf_id
def delete_vnf(mydb,nfvo_tenant,vnf_id,datacenter=None,vim_tenant=None):
print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
result, vims = get_vim(mydb, nfvo_tenant, datacenter, None, vim_tenant)
if result < 0:
return -HTTP_Unauthorized, "delete_vnf error. No VIM found for tenant '%s'" % nfvo_tenant
print "Checking if it is a valid uuid and, if not, getting the uuid assuming that the name was provided"
if not af.check_valid_uuid(vnf_id):
result,vnf_id = mydb.get_uuid_from_name('vnfs',vnf_id)
if result==0:
return -HTTP_Not_Found, "No VNF found for tenant. '%s'" % vnf_id
elif result<0:
return -HTTP_Internal_Server_Error, "delete_vnf error. Internal server error. %s" % vnf_id
elif result>1:
return -HTTP_Not_Found, "Found more than one VNF. %s" % vnf_id
print "Getting the list of flavors and tenants of the VNF"
result,flavorList = get_flavorlist(mydb, vnf_id)
if result < 0:
print flavorList
elif result==0:
print "delete_vnf error. No flavors found for the VNF id '%s'" % vnf_id
result,imageList = get_imagelist(mydb, vnf_id)
print "imageList", imageList
if result < 0:
print imageList
elif result==0:
print "delete_vnf error. No images found for the VNF id '%s'" % vnf_id
result, content = mydb.delete_row('vnfs', vnf_id, nfvo_tenant)
if result == 0:
return -HTTP_Not_Found, content
elif result >0:
print content
else:
print "delete_vnf error",result, content
return result, content
undeletedItems = []
for flavor in flavorList:
#check if flavor is used by other vnf
r,c = mydb.get_table(FROM='vms', WHERE={'flavor_id':flavor} )
if r < 0:
print 'delete_vnf_error. Not possible to delete VIM flavor "%s". %s' % (flavor,c)
undeletedItems.append("flavor "+ flavor["flavor_id"])
elif r > 0:
print 'Flavor %s not deleted because it is being used by another VNF %s' %(flavor,str(c))
continue
#flavor not used, must be deleted
#delelte at VIM
r,c = mydb.get_table(FROM='datacenters_flavors', WHERE={'flavor_id':flavor})
if r>0:
for flavor_vim in c:
if flavor_vim["datacenter_id"] not in vims:
continue
if flavor_vim['created']=='false': #skip this flavor because not created by openmano
continue
myvim=vims[ flavor_vim["datacenter_id"] ]
result, message = myvim.delete_tenant_flavor(flavor_vim["vim_id"])
if result < 0:
print 'delete_vnf_error. Not possible to delete VIM flavor "%s". Message: %s' % (flavor,message)
if result != -HTTP_Not_Found:
undeletedItems.append("flavor %s from VIM %s" % (flavor_vim["vim_id"], flavor_vim["datacenter_id"] ))
#delete flavor from Database, using table flavors and with cascade foreign key also at datacenters_flavors
result, content = mydb.delete_row('flavors', flavor)
if result <0:
undeletedItems.append("flavor %s" % flavor)
for image in imageList:
#check if image is used by other vnf
r,c = mydb.get_table(FROM='vms', WHERE={'image_id':image} )
if r < 0:
print 'delete_vnf_error. Not possible to delete VIM image "%s". %s' % (image,c)
undeletedItems.append("image "+ image["image_id"])
elif r > 0:
print 'Image %s not deleted because it is being used by another VNF %s' %(image,str(c))
continue
#image not used, must be deleted
#delelte at VIM
r,c = mydb.get_table(FROM='datacenters_images', WHERE={'image_id':image})
if r>0:
for image_vim in c:
if image_vim["datacenter_id"] not in vims:
continue
if image_vim['created']=='false': #skip this image because not created by openmano
continue
myvim=vims[ image_vim["datacenter_id"] ]
result, message = myvim.delete_tenant_image(image_vim["vim_id"])
if result < 0:
print 'delete_vnf_error. Not possible to delete VIM image "%s". Message: %s' % (image,message)
if result != -HTTP_Not_Found:
undeletedItems.append("image %s from VIM %s" % (image_vim["vim_id"], image_vim["datacenter_id"] ))
#delete image from Database, using table images and with cascade foreign key also at datacenters_images
result, content = mydb.delete_row('images', image)
if result <0:
undeletedItems.append("image %s" % image)
if undeletedItems:
return 200, "delete_vnf error. Undeleted: %s" %(undeletedItems)
return 200,vnf_id
def get_hosts_info(mydb, nfvo_tenant_id, datacenter_name=None):
result, vims = get_vim(mydb, nfvo_tenant_id, None, datacenter_name)
if result < 0:
return result, vims
myvim = vims.values()[0]
result,servers = myvim.get_hosts_info()
if result < 0:
return result, servers
topology = {'name':myvim['name'] , 'servers': servers}
return result, topology
def get_hosts(mydb, nfvo_tenant_id):
result, vims = get_vim(mydb, nfvo_tenant_id)
if result < 0:
return result, vims
myvim = vims.values()[0]
result,hosts = myvim.get_hosts()
if result < 0:
return result, hosts
print '==================='
print 'hosts '+ yaml.safe_dump(hosts, indent=4, default_flow_style=False)
datacenter = {'Datacenters': [ {'name':myvim['name'],'servers':[]} ] }
for host in hosts:
server={'name':host['name'], 'vms':[]}
for vm in host['instances']:
#get internal name and model
r,c = mydb.get_table(SELECT=('name',), FROM='instance_vms as iv join vms on iv.vm_id=vms.uuid',\
WHERE={'vim_vm_id':vm['id']} )
if r==0:
print "nfvo.get_hosts virtual machine at VIM (%s) not found at tidnfvo" % vm['id']
continue
if r<0:
print "nfvo.get_hosts virtual machine at VIM (%s) error %d %s" % (vm['id'], r, c)
continue
server['vms'].append( {'name':vm['name'] , 'model':c[0]['name']} )
datacenter['Datacenters'][0]['servers'].append(server)
#return -400, "en construccion"
#print 'datacenters '+ json.dumps(datacenter, indent=4)
return result, datacenter
def new_scenario(mydb, nfvo_tenant_id, topo):
# TODO: With future versions of the NSD, different code might be applied for each version.
# Depending on the new structure of the NSD (identified by version in topo), we should have separate code for each version, or integrated code with small changes.
# result, vims = get_vim(mydb, nfvo_tenant_id)
# if result < 0:
# return result, vims
#1: parse input
#1.1: get VNFs and external_networks (other_nets).
vnfs={}
other_nets={} #external_networks, bridge_networks and data_networkds
nodes = topo['topology']['nodes']
for k in nodes.keys():
if nodes[k]['type'] == 'VNF':
vnfs[k] = nodes[k]
vnfs[k]['ifaces'] = {}
elif nodes[k]['type'] == 'other_network' or nodes[k]['type'] == 'external_network':
other_nets[k] = nodes[k]
other_nets[k]['external']=True
elif nodes[k]['type'] == 'network':
other_nets[k] = nodes[k]
other_nets[k]['external']=False
#1.2: Check that VNF are present at database table vnfs. Insert uuid, desctiption and external interfaces
for name,vnf in vnfs.items():
WHERE_={}
error_text = ""
error_pos = "'topology':'nodes':'" + name + "'"
if 'vnf_id' in vnf:
error_text += " 'vnf_id' " + vnf['vnf_id']
WHERE_['uuid'] = vnf['vnf_id']
if 'VNF model' in vnf:
error_text += " 'VNF model' " + vnf['VNF model']
WHERE_['name'] = vnf['VNF model']
if len(WHERE_) == 0:
return -HTTP_Bad_Request, "needed a 'vnf_id' or 'VNF model' at " + error_pos
r,vnf_db = mydb.get_table(SELECT=('uuid','name','description'), FROM='vnfs', WHERE=WHERE_)
if r<0:
print "nfvo.new_scenario Error getting vnfs",r,vnf_db
elif r==0:
print "nfvo.new_scenario Error" + error_text + " is not present at database"
return -HTTP_Bad_Request, "unknown" + error_text + " at " + error_pos
elif r>1:
print "nfvo.new_scenario Error more than one" + error_text + " are present at database"
return -HTTP_Bad_Request, "more than one" + error_text + " at " + error_pos + " Concrete with 'vnf_id'"
vnf['uuid']=vnf_db[0]['uuid']
vnf['description']=vnf_db[0]['description']
#get external interfaces
r,ext_ifaces = mydb.get_table(SELECT=('external_name as name','i.uuid as iface_uuid', 'i.type as type'),
FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces as i on vms.uuid=i.vm_id',
WHERE={'vnfs.uuid':vnf['uuid']}, WHERE_NOTNULL=('external_name',) )
if r<0:
print "nfvo.new_scenario Error getting external interfaces of vnfs",r,ext_ifaces
return -HTTP_Internal_Server_Error, "Error getting external interfaces of vnfs: " + ext_ifaces
for ext_iface in ext_ifaces:
vnf['ifaces'][ ext_iface['name'] ] = {'uuid':ext_iface['iface_uuid'], 'type':ext_iface['type']}
#1.4 get list of connections
conections = topo['topology']['connections']
conections_list = []
for k in conections.keys():
if type(conections[k]['nodes'])==dict: #dict with node:iface pairs
ifaces_list = conections[k]['nodes'].items()
elif type(conections[k]['nodes'])==list: #list with dictionary
ifaces_list=[]
conection_pair_list = map(lambda x: x.items(), conections[k]['nodes'] )
for k2 in conection_pair_list:
ifaces_list += k2
conections_list.append(set(ifaces_list)) #from list to set to operate as a set (this conversion removes elements that are repeated in a list)
#print set(ifaces_list)
#check valid VNF and iface names
for iface in ifaces_list:
if iface[0] not in vnfs and iface[0] not in other_nets :
return -HTTP_Bad_Request, "format error. Invalid VNF name at 'topology':'connections':'%s':'nodes':'%s'" % (str(k), iface[0])
if iface[0] in vnfs and iface[1] not in vnfs[ iface[0] ]['ifaces']:
return -HTTP_Bad_Request, "format error. Invalid interface name at 'topology':'connections':'%s':'nodes':'%s':'%s'" % (str(k), iface[0], iface[1])
#1.5 unify connections from the pair list to a consolidated list
index=0
while index < len(conections_list):
index2 = index+1
while index2 < len(conections_list):
if len(conections_list[index] & conections_list[index2])>0: #common interface, join nets
conections_list[index] |= conections_list[index2]
del conections_list[index2]
else:
index2 += 1
conections_list[index] = list(conections_list[index]) # from set to list again
index += 1
#for k in conections_list:
# print k
#1.6 Delete non external nets
# for k in other_nets.keys():
# if other_nets[k]['model']=='bridge' or other_nets[k]['model']=='dataplane_net' or other_nets[k]['model']=='bridge_net':
# for con in conections_list:
# delete_indexes=[]
# for index in range(0,len(con)):
# if con[index][0] == k: delete_indexes.insert(0,index) #order from higher to lower
# for index in delete_indexes:
# del con[index]
# del other_nets[k]
#1.7: Check external_ports are present at database table datacenter_nets
for k,net in other_nets.items():
error_pos = "'topology':'nodes':'" + k + "'"
if net['external']==False:
if 'name' not in net:
net['name']=k
if 'model' not in net:
return -HTTP_Bad_Request, "needed a 'model' at " + error_pos
if net['model']=='bridge_net':
net['type']='bridge';
elif net['model']=='dataplane_net':
net['type']='data';
else:
return -HTTP_Bad_Request, "unknown 'model' '"+ net['model'] +"' at " + error_pos
else: #external
error_text = ""
WHERE_={}
if 'net_id' in net:
error_text += " 'net_id' " + net['net_id']
WHERE_['uuid'] = net['net_id']
if 'model' in net:
error_text += " 'model' " + net['model']
WHERE_['name'] = net['model']
if len(WHERE_) == 0:
return -HTTP_Bad_Request, "needed a 'net_id' or 'model' at " + error_pos
r,net_db = mydb.get_table(SELECT=('uuid','name','description','type','shared'),
FROM='datacenter_nets', WHERE=WHERE_ )
if r<0:
print "nfvo.new_scenario Error getting datacenter_nets",r,net_db
elif r==0:
print "nfvo.new_scenario Error" +error_text+ " is not present at database"
return -HTTP_Bad_Request, "unknown " +error_text+ " at " + error_pos
#elif r>1:
# print "nfvo.new_scenario Error more than one external_network for " +error_text+ " is present at database"
# return -HTTP_Bad_Request, "more than one external_network for " +error_text+ "at "+ error_pos + " Concrete with 'net_id'"
other_nets[k].update(net_db[0])
net_list={}
net_nb=0 #Number of nets
for con in conections_list:
#check if this is connected to a external net
other_net_index=-1
#print
#print "con", con
for index in range(0,len(con)):
#check if this is connected to a external net
for net_key in other_nets.keys():
if con[index][0]==net_key:
if other_net_index>=0:
error_text="There is some interface connected both to net '%s' and net '%s'" % (con[other_net_index][0], net_key)
print "nfvo.new_scenario " + error_text
return -HTTP_Bad_Request, error_text
else:
other_net_index = index
net_target = net_key
break
#print "other_net_index", other_net_index
try:
if other_net_index>=0:
del con[other_net_index]
if other_nets[net_target]['external'] :
type_='data' if len(con)>1 else 'ptp' #an external net is connected to a external port, so it is ptp if only one connection is done to this net
if type_=='data' and other_nets[net_target]['type']=="ptp":
error_text = "Error connecting %d nodes on a not multipoint net %s" % (len(con), net_target)
print "nfvo.new_scenario " + error_text
return -HTTP_Bad_Request, error_text
for iface in con:
vnfs[ iface[0] ]['ifaces'][ iface[1] ]['net_key'] = net_target
else:
#create a net
net_type_bridge=False
net_type_data=False
net_target = "__-__net"+str(net_nb)
net_list[net_target] = {'name': "net-"+str(net_nb), 'description':"net-%s in scenario %s" %(net_nb,topo['name']),
'external':False}
for iface in con:
vnfs[ iface[0] ]['ifaces'][ iface[1] ]['net_key'] = net_target
iface_type = vnfs[ iface[0] ]['ifaces'][ iface[1] ]['type']
if iface_type=='mgmt' or iface_type=='bridge':
net_type_bridge = True
else:
net_type_data = True
if net_type_bridge and net_type_data:
error_text = "Error connection interfaces of bridge type with data type. Firs node %s, iface %s" % (iface[0], iface[1])
print "nfvo.new_scenario " + error_text
return -HTTP_Bad_Request, error_text
elif net_type_bridge:
type_='bridge'
else:
type_='data' if len(con)>2 else 'ptp'
net_list[net_target]['type'] = type_
net_nb+=1
except Exception:
error_text = "Error connection node %s : %s does not match any VNF or interface" % (iface[0], iface[1])
print "nfvo.new_scenario " + error_text
#raise e
return -HTTP_Bad_Request, error_text
#1.8: Connect to management net all not already connected interfaces of type 'mgmt'
#1.8.1 obtain management net
r,mgmt_net = mydb.get_table(SELECT=('uuid','name','description','type','shared'),
FROM='datacenter_nets', WHERE={'name':'mgmt'} )
#1.8.2 check all interfaces from all vnfs
if r>0:
add_mgmt_net = False
for vnf in vnfs.values():
for iface in vnf['ifaces'].values():
if iface['type']=='mgmt' and 'net_key' not in iface:
#iface not connected
iface['net_key'] = 'mgmt'
add_mgmt_net = True
if add_mgmt_net and 'mgmt' not in net_list:
net_list['mgmt']=mgmt_net[0]
net_list['mgmt']['external']=True
net_list['mgmt']['graph']={'visible':False}
net_list.update(other_nets)
print
print 'net_list', net_list
print
print 'vnfs', vnfs
print
#2: insert scenario. filling tables scenarios,sce_vnfs,sce_interfaces,sce_nets
r,c = mydb.new_scenario( { 'vnfs':vnfs, 'nets':net_list,
'nfvo_tenant_id':nfvo_tenant_id, 'name':topo['name'], 'description':topo.get('description',topo['name']) } )
return r,c
def edit_scenario(mydb, nfvo_tenant_id, scenario_id, data):
data["uuid"] = scenario_id
data["nfvo_tenant_id"] = nfvo_tenant_id
r,c = mydb.edit_scenario( data )
return r,c
def start_scenario(mydb, nfvo_tenant, scenario_id, instance_scenario_name, instance_scenario_description, datacenter=None,vim_tenant=None, startvms=True):
print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
datacenter_id = None
datacenter_name=None
if datacenter != None:
if af.check_valid_uuid(datacenter):
datacenter_id = datacenter
else:
datacenter_name = datacenter
result, vims = get_vim(mydb, nfvo_tenant, datacenter_id, datacenter_name, vim_tenant)
if result < 0:
print "start_scenario error. Datacenter not found"
return result, vims
elif result > 1:
print "start_scenario error. Several datacenters available, must be identify"
return -HTTP_Bad_Request, "Several datacenters available, must be identify"
myvim = vims.values()[0]
myvim_tenant = myvim['tenant']
datacenter_id = myvim['id']
datacenter_name = myvim['name']
vim_tenants_uuid = myvim['config']['vim_tenants_uuid']
rollbackList=[]
print "Checking that the scenario_id exists and getting the scenario dictionary"
result, scenarioDict = mydb.get_scenario(scenario_id, nfvo_tenant, datacenter_id)
if result < 0:
print "start_scenario error. Error interacting with NFVO DB"
return result, scenarioDict
elif result == 0:
print "start_scenario error. Scenario not found"
return result, scenarioDict
scenarioDict['vim_tenant_id'] = myvim_tenant
scenarioDict['datacenter_id'] = datacenter_id
print '================scenarioDict======================='
#print json.dumps(scenarioDict, indent=4)
print 'BEGIN launching instance scenario "%s" based on "%s"' % (instance_scenario_name,scenarioDict['name'])
print "Scenario %s: consisting of %d VNF(s)" % (scenarioDict['name'],len(scenarioDict['vnfs']))
print yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False)
auxNetDict = {} #Auxiliar dictionary. First key:'scenario' or sce_vnf uuid. Second Key: uuid of the net/sce_net. Value: vim_net_id
auxNetDict['scenario'] = {}
print "1. Creating new nets (sce_nets) in the VIM"
for sce_net in scenarioDict['nets']:
print "Net name: %s. Description: %s" % (sce_net["name"], sce_net["description"])
myNetName = "%s-%s" % (scenarioDict['name'],sce_net['name'])
myNetName = myNetName[0:36] #limit length
myNetType = sce_net['type']
myNetDict = {}
myNetDict["name"] = myNetName
myNetDict["type"] = myNetType
myNetDict["tenant_id"] = myvim_tenant
#TODO:
#We should use the dictionary as input parameter for new_tenant_network
print myNetDict
if not sce_net["external"]:
result, network_id = myvim.new_tenant_network(myNetName, myNetType)
if result < 0:
print "Error creating network: %s." %network_id
_, message = rollback(mydb, vims, rollbackList)
return result, "Error creating network: "+ network_id + "."+message
print "New VIM network created for scenario %s. Network id: %s" % (scenarioDict['name'],network_id)
sce_net['vim_id'] = network_id
auxNetDict['scenario'][sce_net['uuid']] = network_id
rollbackList.append({'what':'network','where':'vim','vim_id':datacenter_id,'uuid':network_id})
else:
if sce_net['vim_id'] == None:
error_text = "Error, datacenter '%s' does not have external network '%s'." % (datacenter_name, sce_net['name'])
_, message = rollback(mydb, vims, rollbackList)
print "nfvo.start_scenario: " + error_text
return -HTTP_Bad_Request, error_text
print "Using existent VIM network for scenario %s. Network id %s" % (scenarioDict['name'],sce_net['vim_id'])
auxNetDict['scenario'][sce_net['uuid']] = sce_net['vim_id']
print "2. Creating new nets (vnf internal nets) in the VIM"
#For each vnf net, we create it and we add it to instanceNetlist.
for sce_vnf in scenarioDict['vnfs']:
for net in sce_vnf['nets']:
print "Net name: %s. Description: %s" % (net["name"], net["description"])
myNetName = "%s-%s" % (scenarioDict['name'],net['name'])
myNetName = myNetName[0:36] #limit length
myNetType = net['type']
myNetDict = {}
myNetDict["name"] = myNetName
myNetDict["type"] = myNetType
myNetDict["tenant_id"] = myvim_tenant
print myNetDict
#TODO:
#We should use the dictionary as input parameter for new_tenant_network
result, network_id = myvim.new_tenant_network(myNetName, myNetType)
if result < 0:
error_text="Error creating network: %s." % network_id
_, message = rollback(mydb, vims, rollbackList)
error_text += message
print "start_scenario: " + error_text
return result, error_text
print "VIM network id for scenario %s: %s" % (scenarioDict['name'],network_id)
net['vim_id'] = network_id
if sce_vnf['uuid'] not in auxNetDict:
auxNetDict[sce_vnf['uuid']] = {}
auxNetDict[sce_vnf['uuid']][net['uuid']] = network_id
rollbackList.append({'what':'network','where':'vim','vim_id':datacenter_id,'uuid':network_id})
print "auxNetDict:"
print yaml.safe_dump(auxNetDict, indent=4, default_flow_style=False)
print "3. Creating new vm instances in the VIM"
#myvim.new_tenant_vminstance(self,vimURI,tenant_id,name,description,image_id,flavor_id,net_dict)
i = 0
for sce_vnf in scenarioDict['vnfs']:
for vm in sce_vnf['vms']:
i += 1
myVMDict = {}
#myVMDict['name'] = "%s-%s-%s" % (scenarioDict['name'],sce_vnf['name'], vm['name'])
myVMDict['name'] = "%s-%s-VM%d" % (scenarioDict['name'],sce_vnf['name'],i)
#myVMDict['description'] = vm['description']
myVMDict['description'] = myVMDict['name'][0:99]
if not startvms:
myVMDict['start'] = "no"
myVMDict['name'] = myVMDict['name'][0:36] #limit name length
print "VM name: %s. Description: %s" % (myVMDict['name'], myVMDict['name'])
#create image at vim in case it not exist
res, image_dict = mydb.get_table_by_uuid_name("images", vm['image_id'])
if res<0:
print "start_scenario error getting image", image_dict
return res, image_dict
res, image_id = create_or_use_image(mydb, vims, image_dict, [], True)
if res < 0:
print "start_scenario error adding image to VIM", image_dict
return res, image_id
vm['vim_image_id'] = image_id
#create flavor at vim in case it not exist
res, flavor_dict = mydb.get_table_by_uuid_name("flavors", vm['flavor_id'])
if res<0:
print "start_scenario error getting flavor", flavor_dict
return res, flavor_dict
if flavor_dict['extended']!=None:
flavor_dict['extended']= yaml.load(flavor_dict['extended'])
res, flavor_id = create_or_use_flavor(mydb, vims, flavor_dict, [], True)
if res < 0:
print "start_scenario error adding flavor to VIM", flavor_dict
return res, flavor_id
vm['vim_flavor_id'] = flavor_id
myVMDict['imageRef'] = vm['vim_image_id']
myVMDict['flavorRef'] = vm['vim_flavor_id']
myVMDict['networks'] = []
for iface in vm['interfaces']:
netDict = {}
if iface['type']=="data":
netDict['type'] = iface['model']
elif "model" in iface and iface["model"]!=None:
netDict['model']=iface['model']
#TODO in future, remove this because mac_address will not be set, and the type of PV,VF is obtained from iterface table model
#discover type of interface looking at flavor
for numa in flavor_dict.get('extended',{}).get('numas',[]):
for flavor_iface in numa.get('interfaces',[]):
if flavor_iface.get('name') == iface['internal_name']:
if flavor_iface['dedicated'] == 'yes':
netDict['type']="PF" #passthrough
elif flavor_iface['dedicated'] == 'no':
netDict['type']="VF" #siov
elif flavor_iface['dedicated'] == 'yes:sriov':
netDict['type']="VFnotShared" #sriov but only one sriov on the PF
netDict["mac_address"] = flavor_iface.get("mac_address")
break;
netDict["use"]=iface['type']
if netDict["use"]=="data" and not netDict.get("type"):
#print "netDict", netDict
#print "iface", iface
e_text = "Cannot determine the interface type PF or VF of VNF '%s' VM '%s' iface '%s'" %(sce_vnf['name'], vm['name'], iface['internal_name'])
if flavor_dict.get('extended')==None:
return -HTTP_Conflict, e_text + "After database migration some information is not available. \
Try to delete and create the scenarios and VNFs again"
else:
return -HTTP_Internal_Server_Error, e_text
if netDict["use"]=="mgmt" or netDict["use"]=="bridge":
netDict["type"]="virtual"
if "vpci" in iface and iface["vpci"] is not None:
netDict['vpci'] = iface['vpci']
netDict['name'] = iface['internal_name']
if iface['net_id'] is None:
for vnf_iface in sce_vnf["interfaces"]:
print iface
print vnf_iface
if vnf_iface['interface_id']==iface['uuid']:
netDict['net_id'] = auxNetDict['scenario'][ vnf_iface['sce_net_id'] ]
break
else:
netDict['net_id'] = auxNetDict[ sce_vnf['uuid'] ][ iface['net_id'] ]
#skip bridge ifaces not connected to any net
#if 'net_id' not in netDict or netDict['net_id']==None:
# continue
myVMDict['networks'].append(netDict)
print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
print myVMDict['name']
print "networks", yaml.safe_dump(myVMDict['networks'], indent=4, default_flow_style=False)
print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False)
print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
result, vm_id = myvim.new_tenant_vminstance(myVMDict['name'],myVMDict['description'],myVMDict.get('start', None),
myVMDict['imageRef'],myVMDict['flavorRef'],myVMDict['networks'])
if result < 0:
error_text = "Error creating vm instance: %s." % vm_id
_, message = rollback(mydb, vims, rollbackList)
error_text += message
print "start_scenario: " + error_text
return result, error_text
print "VIM vm instance id (server id) for scenario %s: %s" % (scenarioDict['name'],vm_id)
vm['vim_id'] = vm_id
rollbackList.append({'what':'vm','where':'vim','vim_id':datacenter_id,'uuid':vm_id})
#put interface uuid back to scenario[vnfs][vms[[interfaces]
for net in myVMDict['networks']:
if "vim_id" in net:
for iface in vm['interfaces']:
if net["name"]==iface["internal_name"]:
iface["vim_id"]=net["vim_id"]
break
print "==================Deployment done=========="
scenarioDict['vim_tenants_uuid'] = vim_tenants_uuid
print yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False)
#r,c = mydb.new_instance_scenario_as_a_whole(nfvo_tenant,scenarioDict['name'],scenarioDict)
result,c = mydb.new_instance_scenario_as_a_whole(nfvo_tenant,instance_scenario_name, instance_scenario_description, scenarioDict)
if result <0:
error_text = c + "."
_, message = rollback(mydb, vims, rollbackList)
error_text += message
print "start_scenario: " + error_text
return result, error_text
return mydb.get_instance_scenario(c)
def delete_instance(mydb,nfvo_tenant,instance_id):
print "Checking that the instance_id exists and getting the instance dictionary"
result, instanceDict = mydb.get_instance_scenario(instance_id, nfvo_tenant)
if result < 0:
print "nfvo.delete_instance() error. Error getting info from database"
return result, instanceDict
elif result == 0:
print "delete_instance error. Instance not found"
return result, instanceDict
print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
result, vims = get_vim(mydb, nfvo_tenant, instanceDict['datacenter_id'])
if result < 0:
print "nfvo.delete_instance() error. Datacenter not found"
return result, vims
myvim = vims.values()[0]
#1. Delete from Database
#result,c = mydb.delete_row('instance_scenarios', instance_id, nfvo_tenant)
result,c = mydb.delete_instance_scenario(instance_id, nfvo_tenant)
if result<0:
return result, c
#2. delete from VIM
error_msg = ""
#2.1 deleting VMs
#vm_fail_list=[]
for sce_vnf in instanceDict['vnfs']:
for vm in sce_vnf['vms']:
result, vm_id = myvim.delete_tenant_vminstance(vm['vim_vm_id'])
if result < 0:
error_msg+="\n Error: " + str(-result) + " VM id=" + vm['vim_vm_id']
#if result != -HTTP_Not_Found: vm_fail_list.append(vm)
print "Error " + str(-result) + " deleting VM instance '" + vm['name'] + "', uuid '" + vm['uuid'] + "', VIM id '" + vm['vim_vm_id'] + "', from VNF_id '" + sce_vnf['vnf_id'] + "':" + vm_id
#2.2 deleting NETS
#net_fail_list=[]
for net in instanceDict['nets']:
if net['external']:
continue #skip not created nets
result, net_id = myvim.delete_tenant_network(net['vim_net_id'])
if result < 0:
error_msg += "\n Error: " + str(-result) + " NET id=" + net['vim_net_id']
#if result == -HTTP_Not_Found: net_fail_list.append(net)
print "Error " + str(-result) + " deleting NET uuid '" + net['uuid'] + "', VIM id '" + net['vim_net_id'] + "':" + net_id
if len(error_msg)>0:
return 1, 'instance ' + instance_id + ' deleted but some elements could not be deleted, or already deleted (error: 404) from VIM: ' + error_msg
else:
return 1, 'instance ' + instance_id + ' deleted'
def refresh_instance(mydb, nfvo_tenant, instanceDict, datacenter=None, vim_tenant=None):
'''Refreshes a scenario instance. It modifies instanceDict'''
'''Returns:
- result: <0 if there is any unexpected error, n>=0 if no errors where n is the number of vms and nets that couldn't be updated in the database
- error_msg
'''
# Assumption: nfvo_tenant and instance_id were checked before entering into this function
print "nfvo.refresh_instance begins"
#print json.dumps(instanceDict, indent=4)
print "Getting the VIM URL and the VIM tenant_id"
result, vims = get_vim(mydb, nfvo_tenant, instanceDict['datacenter_id'])
if result < 0:
print "nfvo.refresh_instance() error. Datacenter not found"
return result, vims
myvim = vims.values()[0]
# 1. Getting the status of all VMs
vmDict = {}
netDict = {}
for sce_vnf in instanceDict['vnfs']:
for vm in sce_vnf['vms']:
vmDict[vm['vim_vm_id']]=vm['status']
# 2. Getting the status of all nets
# TODO: update nets inside a vnf
for net in instanceDict['nets']:
#if net['external']:
# continue #skip not created nets
netDict[net['vim_net_id']]=net['status']
# 3. Refresh the status of VMs and nets from VIM. IT updates vmDict and netDict
result, refresh_message = myvim.refresh_tenant_vms_and_nets(vmDict, netDict)
if result < 0:
return result, refresh_message
# 4. Update the status of VMs in the instanceDict, while collects the VMs whose status changed
vms_updated = {} #Dictionary of VM instance uuids in openmano that were updated
for sce_vnf in instanceDict['vnfs']:
for vm in sce_vnf['vms']:
status = vmDict[vm['vim_vm_id']]
if vm['status']!=status:
vm['status']=status
vms_updated[vm['uuid']]=status
# 5. Update the status of nets in the instanceDict, while collects the nets whose status changed
nets_updated = {} #Dictionary of net instance uuids in openmano that were updated
# TODO: update nets inside a vnf
for net in instanceDict['nets']:
#if net['external']:
# continue #skip not created nets
status = netDict[net['vim_net_id']]
if net['status']!=status:
net['status']=status
nets_updated[net['uuid']]=status
# 6. Update in openmano DB the VMs whose status changed
vms_notupdated=[]
for vm in vms_updated:
result2, _ = mydb.update_rows('instance_vms', UPDATE={'status':vms_updated[vm]},
WHERE={'uuid':vm})
if result2<0:
vms_updated.pop(vm)
vms_notupdated.append(vm)
elif result2==0:
print "WARNING: status of vm instance %s should have been updated to %s" %(vm,vms_updated[vm])
# 7. Update in openmano DB the nets whose status changed
nets_notupdated=[]
for net in nets_updated:
result2, _ = mydb.update_rows('instance_nets', UPDATE={'status':nets_updated[net]},
WHERE={'uuid':net})
if result2<0:
nets_updated.pop(net)
nets_notupdated.append(net)
elif result2==0:
print "WARNING: status of net instance %s should have been updated to %s" %(net,nets_updated[net])
# Returns appropriate output
print "nfvo.refresh_instance finishes"
print "VMs updated in the database: %s; nets updated in the database %s; VMs not updated: %s; nets not updated: %s" \
% (str(vms_updated), str(nets_updated), str(vms_notupdated), str(nets_notupdated))
instance_id = instanceDict['uuid']
error_msg=refresh_message
if len(vms_notupdated)+len(nets_notupdated)>0:
if len(refresh_message)>0:
error_msg += "; "
error_msg += "VMs not updated: " + str(vms_notupdated) + "; nets not updated: " + str(nets_notupdated)
return len(vms_notupdated)+len(nets_notupdated), 'Scenario instance ' + instance_id + ' refreshed but some elements could not be updated in the database: ' + error_msg
return 0, 'Scenario instance ' + instance_id + ' refreshed. ' + error_msg
def instance_action(mydb,nfvo_tenant,instance_id, action_dict):
print "Checking that the instance_id exists and getting the instance dictionary"
result, instanceDict = mydb.get_instance_scenario(instance_id, nfvo_tenant)
if result < 0:
print "nfvo.instance_action() error. Error getting info from database"
return result, instanceDict
elif result == 0:
print "instance_action error. Instance not found"
return -HTTP_Not_Found, "instance %s not found" % instance_id
#print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
result, vims = get_vim(mydb, nfvo_tenant, instanceDict['datacenter_id'])
if result < 0:
print "nfvo.instance_action() error. Datacenter not found"
return result, vims
myvim = vims.values()[0]
input_vnfs = action_dict.pop("vnfs", [])
input_vms = action_dict.pop("vms", [])
action_over_all = True if len(input_vnfs)==0 and len (input_vms)==0 else False
vm_result = {}
vm_error = 0
vm_ok = 0
for sce_vnf in instanceDict['vnfs']:
for vm in sce_vnf['vms']:
if not action_over_all:
if sce_vnf['uuid'] not in input_vnfs and sce_vnf['vnf_name'] not in input_vnfs and \
vm['uuid'] not in input_vms and vm['name'] not in input_vms:
continue
result, data = myvim.action_tenant_vminstance(vm['vim_vm_id'], action_dict)
if result < 0:
vm_result[ vm['uuid'] ] = {"vim_result": -result, "name":vm['name'], "description": data}
vm_error+=1
else:
if "console" in action_dict:
if data["server"]=="127.0.0.1" or data["server"]=="localhost":
vm_result[ vm['uuid'] ] = {"vim_result": -HTTP_Unauthorized,
"description": "this console is only reachable by local interface",
"name":vm['name']
}
vm_error+=1
continue
#print "console data", data
r2, console_thread = create_or_use_console_proxy_thread(data["server"], data["port"])
if r2<0:
vm_result[ vm['uuid'] ] = {"vim_result": -r2, "name":vm['name'], "description": console_thread}
else:
vm_result[ vm['uuid'] ] = {"vim_result": result,
"description": "%s//%s:%d/%s" %(data["protocol"], console_thread.host, console_thread.port, data["suffix"]),
"name":vm['name']
}
vm_ok +=1
else:
vm_result[ vm['uuid'] ] = {"vim_result": result, "description": "ok", "name":vm['name']}
vm_ok +=1
if vm_ok==0: #all goes wrong
return 1, vm_result
else:
return 1, vm_result
def create_or_use_console_proxy_thread(console_server, console_port):
#look for a non-used port
console_thread_key = console_server + ":" + str(console_port)
if console_thread_key in global_config["console_thread"]:
#global_config["console_thread"][console_thread_key].start_timeout()
return 1, global_config["console_thread"][console_thread_key]
for port in global_config["console_port_iterator"]():
print "create_or_use_console_proxy_thread() port:", port
if port in global_config["console_ports"]:
continue
try:
clithread = cli.ConsoleProxyThread(global_config['http_host'], port, console_server, console_port)
clithread.start()
global_config["console_thread"][console_thread_key] = clithread
global_config["console_ports"][port] = console_thread_key
return 1, clithread
except cli.ConsoleProxyExceptionPortUsed as e:
#port used, try with onoher
continue
except cli.ConsoleProxyException as e:
return -1, str(e)
return -1, "Not found any free 'http_console_ports'"
def check_tenant(mydb, tenant_id):
'''check that tenant exists at database'''
result, _ = mydb.get_table(FROM='nfvo_tenants', SELECT=('uuid',), WHERE={'uuid': tenant_id})
if result<=0: return False
return True
def new_tenant(mydb, tenant_dict):
result, tenant_id = mydb.new_row("nfvo_tenants", tenant_dict, None, add_uuid=True, log=True)
if result < 0:
return result, tenant_id
return 200,tenant_id
def delete_tenant(mydb, tenant):
#get nfvo_tenant info
result,tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', tenant, 'tenant')
if result < 0:
return result, tenant_dict
result, tenant_id = mydb.delete_row("nfvo_tenants", tenant_dict['uuid'], None)
if result < 0:
return result, tenant_id
return 200, tenant_dict['uuid']
def new_datacenter(mydb, datacenter_descriptor):
if "config" in datacenter_descriptor:
datacenter_descriptor["config"]=yaml.safe_dump(datacenter_descriptor["config"],default_flow_style=True,width=256)
result, datacenter_id = mydb.new_row("datacenters", datacenter_descriptor, None, add_uuid=True, log=True)
if result < 0:
return result, datacenter_id
return 200,datacenter_id
def edit_datacenter(mydb, datacenter_id_name, datacenter_descriptor):
#obtain data, check that only one exist
result, content = mydb.get_table_by_uuid_name('datacenters', datacenter_id_name)
if result < 0:
return result, content
#edit data
datacenter_id = content['uuid']
where={'uuid': content['uuid']}
if "config" in datacenter_descriptor:
if datacenter_descriptor['config']!=None:
try:
new_config_dict = datacenter_descriptor["config"]
config_dict = yaml.load(content["config"])
config_dict.update(new_config_dict)
#delete null fields
for k in config_dict:
if config_dict[k]==None:
del config_dict[k]
except Exception,e:
return -HTTP_Bad_Request, "Bad format at datacenter:config " + str(e)
datacenter_descriptor["config"]= yaml.safe_dump(config_dict,default_flow_style=True,width=256) if len(config_dict)>0 else None
result, content = mydb.update_rows('datacenters', datacenter_descriptor, where)
if result < 0:
return result, datacenter_id
return 200, datacenter_id
def delete_datacenter(mydb, datacenter):
#get nfvo_tenant info
result,datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter, 'datacenter')
if result < 0:
return result, datacenter_dict
result, datacenter_id = mydb.delete_row("datacenters", datacenter_dict['uuid'], None)
if result < 0:
return result, datacenter_id
return 200, datacenter_dict['uuid']
def associate_datacenter_to_tenant(mydb, nfvo_tenant, datacenter, vim_tenant_id=None, vim_tenant_name=None, vim_username=None, vim_password=None):
#get datacenter info
if af.check_valid_uuid(datacenter):
result, vims = get_vim(mydb, datacenter_id=datacenter)
else:
result, vims = get_vim(mydb, datacenter_name=datacenter)
if result < 0:
print "nfvo.associate_datacenter_to_tenant() error. Datacenter not found"
return result, vims
elif result>1:
print "nfvo.associate_datacenter_to_tenant() error. Several datacenters found"
#print result, vims
return -HTTP_Conflict, "More than one datacenters found, try to identify with uuid"
datacenter_id=vims.keys()[0]
myvim=vims[datacenter_id]
datacenter_name=myvim["name"]
#get nfvo_tenant info
result,tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', nfvo_tenant)
if result < 0:
return result, tenant_dict
if vim_tenant_name==None:
vim_tenant_name=tenant_dict['name']
#check that this association does not exist before
tenants_datacenter_dict={"nfvo_tenant_id":tenant_dict['uuid'], "datacenter_id":datacenter_id }
result,content = mydb.get_table(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
if result>0:
return -HTTP_Conflict, "datacenter %s and tenant %s are already attached" %(datacenter_id, tenant_dict['uuid'])
elif result<0:
return result, content
vim_tenant_id_exist_atdb=False
if vim_tenant_id!=None or vim_tenant_name!=None:
where_={"datacenter_id": datacenter_id}
if vim_tenant_id!=None:
where_["vim_tenant_id"] = vim_tenant_id
if vim_tenant_name!=None:
where_["vim_tenant_name"] = vim_tenant_name
#check if vim_tenant_id is already at database
result,vim_tenants_dict = mydb.get_table(FROM='vim_tenants', WHERE=where_)
if result < 0:
return result, vim_tenants_dict
elif result>=1:
vim_tenants_dict = vim_tenants_dict[0]
vim_tenant_id_exist_atdb=True
#TODO check if a field has changed and edit entry at vim_tenants at DB
else: #result=0
vim_tenants_dict = {}
#insert at table vim_tenants
else: #if vim_tenant_id==None:
#create tenant at VIM if not provided
res, vim_tenant_id = myvim.new_tenant(vim_tenant_name, "created by openmano for datacenter "+datacenter_name)
if res < 0:
return res, "Not possible to create vim_tenant in VIM " + vim_tenant_id
vim_tenants_dict = {}
vim_tenants_dict["created"]="true"
#fill vim_tenants table
if not vim_tenant_id_exist_atdb:
vim_tenants_dict["vim_tenant_id"] = vim_tenant_id
vim_tenants_dict["vim_tenant_name"] = vim_tenant_name
vim_tenants_dict["user"] = vim_username
vim_tenants_dict["passwd"] = vim_password
vim_tenants_dict["datacenter_id"] = datacenter_id
res,id_ = mydb.new_row('vim_tenants', vim_tenants_dict, tenant_dict['uuid'], True, True)
if res<1:
return -HTTP_Bad_Request, "Not possible to add %s to database vim_tenants table %s " %(vim_tenant_id, id_)
vim_tenants_dict["uuid"] = id_
#fill tenants_datacenters table
tenants_datacenter_dict["vim_tenant_id"]=vim_tenants_dict["uuid"]
res,id_ = mydb.new_row('tenants_datacenters', tenants_datacenter_dict, tenant_dict['uuid'], False, True)
if res<1:
return -HTTP_Bad_Request, "Not possible to create vim_tenant at database " + id_
return 200, datacenter_id
def deassociate_datacenter_to_tenant(mydb, nfvo_tenant, datacenter, vim_tenant_id=None):
#get datacenter info
if af.check_valid_uuid(datacenter):
result, vims = get_vim(mydb, datacenter_id=datacenter)
else:
result, vims = get_vim(mydb, datacenter_name=datacenter)
if result < 0:
print "nfvo.associate_datacenter_to_tenant() error. Datacenter not found"
return result, vims
elif result>1:
print "nfvo.associate_datacenter_to_tenant() error. Several datacenters found"
return -HTTP_Conflict, "More than one datacenters found, try to identify with uuid"
datacenter_id=vims.keys()[0]
myvim=vims[datacenter_id]
#get nfvo_tenant info
result,tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', nfvo_tenant)
if result < 0:
return result, tenant_dict
#check that this association exist before
tenants_datacenter_dict={"nfvo_tenant_id":tenant_dict['uuid'], "datacenter_id":datacenter_id }
result,tenant_datacenter_list = mydb.get_table(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
if result==0:
return -HTTP_Not_Found, "datacenter %s and tenant %s are not attached" %(datacenter_id, tenant_dict['uuid'])
elif result<0:
return result, tenant_datacenter_list
#delete this association
result,data = mydb.delete_row_by_dict(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
if result<0:
return result,data
#get vim_tenant info and deletes
warning=''
result,vim_tenant_dict = mydb.get_table_by_uuid_name('vim_tenants', tenant_datacenter_list[0]['vim_tenant_id'])
if result > 0:
#try to delete vim:tenant
result,data = mydb.delete_row('vim_tenants', tenant_datacenter_list[0]['vim_tenant_id'], tenant_dict['uuid'])
if result<0:
pass #the error will be caused because dependencies, vim_tenant can not be deleted
elif vim_tenant_dict['created']=='true':
#delete tenant at VIM if created by NFVO
res, vim_tenant_id = myvim.delete_tenant(vim_tenant_dict['vim_tenant_id'])
if res < 0:
warning = " Not possible to delete vim_tenant %s from VIM: %s " % (vim_tenant_dict['vim_tenant_id'], vim_tenant_id)
print res, warning
return 200, "datacenter %s detached.%s" %(datacenter_id, warning)
def datacenter_action(mydb, tenant_id, datacenter, action_dict):
#get datacenter info
if af.check_valid_uuid(datacenter):
result, vims = get_vim(mydb, nfvo_tenant=tenant_id, datacenter_id=datacenter)
else:
result, vims = get_vim(mydb, nfvo_tenant=tenant_id, datacenter_name=datacenter)
if result < 0:
print "nfvo.associate_datacenter_to_tenant() error. Datacenter not found"
return result, vims
elif result>1:
print "nfvo.associate_datacenter_to_tenant() error. Several datacenters found"
return -HTTP_Conflict, "More than one datacenters found, try to identify with uuid"
datacenter_id=vims.keys()[0]
myvim=vims[datacenter_id]
if 'net-update' in action_dict:
result, content = myvim.get_network_list(filter_dict={'shared': True, 'admin_state_up': True, 'status': 'ACTIVE'})
print content
if result < 0:
print " Not possible to get_network_list from VIM: %s " % (content)
return -HTTP_Internal_Server_Error, content
#update nets Change from VIM format to NFVO format
net_list=[]
for net in content:
net_nfvo={'datacenter_id': datacenter_id}
net_nfvo['name'] = net['name']
#net_nfvo['description']= net['name']
net_nfvo['vim_net_id'] = net['id']
net_nfvo['type'] = net['type'][0:6] #change from ('ptp','data','bridge_data','bridge_man') to ('bridge','data','ptp')
net_nfvo['shared'] = net['shared']
net_nfvo['multipoint'] = False if net['type']=='ptp' else True
net_list.append(net_nfvo)
result, content = mydb.update_datacenter_nets(datacenter_id, net_list)
if result < 0:
return -HTTP_Internal_Server_Error, content
print "Inserted %d nets, deleted %d old nets" % (result, content)
return 200, result
elif 'net-edit' in action_dict:
net = action_dict['net-edit'].pop('net')
what = 'vim_net_id' if af.check_valid_uuid(net) else 'name'
result, content = mydb.update_rows('datacenter_nets', action_dict['net-edit'],
WHERE={'datacenter_id':datacenter_id, what: net})
return result, content
elif 'net-delete' in action_dict:
net = action_dict['net-delete'].get('net')
what = 'vim_net_id' if af.check_valid_uuid(net) else 'name'
result, content = mydb.delete_row_by_dict(FROM='datacenter_nets',
WHERE={'datacenter_id':datacenter_id, what: net})
return result, content
else:
return -HTTP_Bad_Request, "Unknown action " + str(action_dict)
| 312v/openmano | openmano/nfvo.py | Python | apache-2.0 | 86,385 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.