code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from neutron.openstack.common.cache import backends
from neutron.openstack.common import lockutils
from neutron.openstack.common import timeutils
class MemoryBackend(backends.BaseCache):
def __init__(self, parsed_url, options=None):
super(MemoryBackend, self).__init__(parsed_url, options)
self._clear()
def _set_unlocked(self, key, value, ttl=0):
expires_at = 0
if ttl != 0:
expires_at = timeutils.utcnow_ts() + ttl
self._cache[key] = (expires_at, value)
if expires_at:
self._keys_expires[expires_at].add(key)
def _set(self, key, value, ttl=0, not_exists=False):
with lockutils.lock(key):
# NOTE(flaper87): This is needed just in `set`
# calls, hence it's not in `_set_unlocked`
if not_exists and self._exists_unlocked(key):
return False
self._set_unlocked(key, value, ttl)
return True
def _get_unlocked(self, key, default=None):
now = timeutils.utcnow_ts()
try:
timeout, value = self._cache[key]
except KeyError:
return (0, default)
if timeout and now >= timeout:
# NOTE(flaper87): Record expired,
# remove it from the cache but catch
# KeyError and ValueError in case
# _purge_expired removed this key already.
try:
del self._cache[key]
except KeyError:
pass
try:
# NOTE(flaper87): Keys with ttl == 0
# don't exist in the _keys_expires dict
self._keys_expires[timeout].remove(key)
except (KeyError, ValueError):
pass
return (0, default)
return (timeout, value)
def _get(self, key, default=None):
with lockutils.lock(key):
return self._get_unlocked(key, default)[1]
def _exists_unlocked(self, key):
now = timeutils.utcnow_ts()
try:
timeout = self._cache[key][0]
return not timeout or now <= timeout
except KeyError:
return False
def __contains__(self, key):
with lockutils.lock(key):
return self._exists_unlocked(key)
def _incr_append(self, key, other):
with lockutils.lock(key):
timeout, value = self._get_unlocked(key)
if value is None:
return None
ttl = timeutils.utcnow_ts() - timeout
new_value = value + other
self._set_unlocked(key, new_value, ttl)
return new_value
def _incr(self, key, delta):
if not isinstance(delta, int):
raise TypeError('delta must be an int instance')
return self._incr_append(key, delta)
def _append_tail(self, key, tail):
return self._incr_append(key, tail)
def _purge_expired(self):
"""Removes expired keys from the cache."""
now = timeutils.utcnow_ts()
for timeout in sorted(self._keys_expires.keys()):
# NOTE(flaper87): If timeout is greater
# than `now`, stop the iteration, remaining
# keys have not expired.
if now < timeout:
break
# NOTE(flaper87): Unset every key in
# this set from the cache if its timeout
# is equal to `timeout`. (The key might
# have been updated)
for subkey in self._keys_expires.pop(timeout):
try:
if self._cache[subkey][0] == timeout:
del self._cache[subkey]
except KeyError:
continue
def __delitem__(self, key):
self._purge_expired()
# NOTE(flaper87): Delete the key. Using pop
# since it could have been deleted already
value = self._cache.pop(key, None)
if value:
try:
# NOTE(flaper87): Keys with ttl == 0
# don't exist in the _keys_expires dict
self._keys_expires[value[0]].remove(value[1])
except (KeyError, ValueError):
pass
def _clear(self):
self._cache = {}
self._keys_expires = collections.defaultdict(set)
def _get_many(self, keys, default):
return super(MemoryBackend, self)._get_many(keys, default)
def _set_many(self, data, ttl=0):
return super(MemoryBackend, self)._set_many(data, ttl)
def _unset_many(self, keys):
return super(MemoryBackend, self)._unset_many(keys)
| samsu/neutron | openstack/common/cache/_backends/memory.py | Python | apache-2.0 | 5,240 |
import unittest
import logging
from StringIO import StringIO
from SimpleForwarder import *
from mock import Mock, MagicMock
from botocore.exceptions import ClientError
from test_util import *
import copy
class testSESEmail(unittest.TestCase):
def setUp(self):
self._ses_mock = Mock()
self._ses_mock.send_raw_email.return_value = {
'MessageId': 'some_message_id'
}
self._s3_mock = Mock()
self._read_dict = {'Body': MagicMock(spec=file, wraps=StringIO(TEST_EMAIL_BODY))}
self._get_mock = MagicMock()
self._get_mock.__getitem__.side_effect = self._read_dict.__getitem__
self._s3_mock.Object.return_value.get.return_value = self._get_mock
def test_event_ok(self):
self.assertIsNone(lambda_handler(TEST_EVENT, {},
self._ses_mock,
self._s3_mock,
TEST_CONFIG))
destinations = self._ses_mock.send_raw_email.call_args[1]['Destinations']
original = self._ses_mock.send_raw_email.call_args[1]['Source']
raw_message = self._ses_mock.send_raw_email.call_args[1]['RawMessage']['Data']
self.assertTrue('user1@example.com' in destinations)
self.assertTrue('user2@example.com' in destinations)
self.assertTrue('user3@example.com' in destinations)
self.assertTrue('info@example.com' in original)
self.assertEqual(TEST_SEND_EMAIL, raw_message)
def test_no_config(self):
self.assertIsNone(lambda_handler(TEST_EVENT, {},
self._ses_mock,
self._s3_mock))
self.assertFalse(self._ses_mock.send_raw_email.called)
if __name__ == '__main__':
unittest.main()
| eamonnsullivan/simple-email-forwarder | unittests/test_lambda_handler.py | Python | mit | 1,824 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# WPSeku: Wordpress Security Scanner
#
# @url: https://github.com/m4ll0k/WPSeku
# @author: Momo Outaadi (M4ll0k)
#
# WPSeku is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation version 3 of the License.
#
# WPSeku is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WPSeku; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from lib import wphttp
from lib import wpprint
class wpcrossdomain:
check = wphttp.check()
printf = wpprint.wpprint()
def __init__(self,agent,proxy,redirect,url):
self.url = url
self.req = wphttp.wphttp(agent=agent,proxy=proxy,redirect=redirect)
def run(self):
self.printf.test('Checking crossdomain...')
try:
url = self.check.checkurl(self.url,'crossdomain.xml')
resp = self.req.send(url)
if resp.read() and resp.getcode() == 200:
self.printf.plus('crossdomain.xml available under: %s'%(url))
else:
self.printf.erro('crossdomain.xml not available')
except Exception as error:
pass | Yukinoshita47/Yuki-Chan-The-Auto-Pentest | Module/WPSeku/modules/discovery/generic/wpcrossdomain.py | Python | mit | 1,415 |
import os
import json
import string
from datetime import datetime
from src.config.config import config
def create_empty_file(filepath):
# Creates an empty file, override the file if it already exists
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
with open(filepath, "w"):
pass
def rename_file(filepath, new_filename):
if check_file_exist(filepath):
if check_file_exist(os.path.dirname(filepath) + '/' + new_filename):
os.remove(os.path.dirname(filepath) + '/' + new_filename)
os.rename(filepath, os.path.dirname(filepath) + '/' + new_filename)
def append_to_file(filepath, text_to_append, use_date=False, use_time=False):
# Add text to the end of a file
if not check_file_exist(filepath):
create_empty_file(filepath)
if use_date or use_time:
time = datetime.now().time()
date = datetime.now().date()
stamp = ""
if use_date:
stamp += ("0" if date.month < 10 else "") + str(date.month) \
+ ("/0" if date.day < 10 else "/") + str(date.day) \
+ "/"+str(date.year)
if use_time:
stamp += " "
if use_time:
stamp += ("0" if time.hour < 10 else "") + str(time.hour) \
+(":0" if time.minute < 10 else ":") + str(time.minute)
text_to_append = "[" + stamp + "] " + text_to_append + "\n"
with open(filepath, 'a') as file:
file.write(text_to_append)
def check_file_exist(filepath):
return (os.path.isfile(filepath) and os.access(filepath, os.R_OK))
def write_json(filepath, data):
if check_file_exist(filepath):
with open(filepath, 'w') as f:
json.dump(data, f, indent=4, sort_keys=True)
else:
create_empty_file(filepath)
with open(filepath, 'w') as f:
json.dump(data, f, indent=4, sort_keys=True)
def read_json(filepath):
if check_file_exist(filepath):
with open(filepath, 'r') as f:
try:
data = json.load(f)
except ValueError, e:
if e.message != "No JSON object could be decoded":
raise ValueError, e
else:
if config['debug']:
message = '-- Invalid json file: ' + filepath
print(message)
if config['save_log']:
append_to_file(config['save_log_filepath'], message, use_time=True)
data = {}
else:
data = {}
return data
| 901/Gwent-twitch-bot | src/lib/fileHandler.py | Python | gpl-3.0 | 2,644 |
# -*- coding: utf-8 -*-
from Caballero import Caballero
from Orco import Orco
from Choza import Choza
from Funciones import *
import random
import textwrap
class Juego:
def __init__(self):
self.chozas = [] #Inicializamos el número de chozas que tendremos dentro de nuestro juego
self.jugador = None #En primer lugar el jugador no tendrá una instancia definida
def get_ocupantes(self):
return [x.Tipo_de_ocupante() for x in self.chozas]
def Mostrar_mision(self):
print_bold("Ataca a los Orcos V1.0.0")
msg = ("La guerra entre los humanos y sus arqueros enemigos, los Orcos, estaba en el aire."
"Un enorme ejército de orcos se dirigía hacia los territos de los humanos. Destruían"
"prácticamente todo en su camino. Los grandes reyes de la raza humana, se unieron para"
" derrotar a su peor enemigo, era la gran batalla de su tiempo. Sir Gandorel, uno de los "
"valientes caballeros que guardan las llanuras meridionales, inició un largo viaje hacia el este"
", a través de un desconocido bosque espeso. Durante dos días y dos noches, se movió con cautela "
"a través del grueso bosque. En su camino, vio un pequeño asentamiento aislado. Cansado y con "
"la esperanza de reponer su stock de alimentos, decidió tomar un desvío. Cuando se acercó al pueblo,"
"vio cinco chozas. No había nadie alrededor. En ese instante, decidió entrar en un choza...")
print(textwrap.fill(msg, width = 72))
print_bold("Misión:")
print(" 1. Lucha contra el enemigo.")
print(" 2. Conquista cada una de las chozas hasta que estén bajo tu control")
print("-"*72)
def _procesar_decision(self):
verifying_choice = True
idx = 0
print("Ocupantes actuales: %s" % self.get_ocupantes())
while verifying_choice:
user_choice = input("Elige un número de choza para entrar (1-5): ")
try:
idx = int(user_choice)
except ValueError as e:
print("Entrada no válida: %s \n" %e.args)
continue
try:
if self.chozas[idx-1].conquistada:
print("Esta choza ya está conquistada")
print_bold("<INFO: No puedes curarte en las choza que hayas conquistado.>")
else:
verifying_choice = False
except IndexError:
print("Entrada no aceptada: ", idx)
print("El número debe estar entre 1 y 5.Inténtalo de nuevo")
continue
return idx
def _ocupar_chozas(self):
for i in range(5):
ocupantes = ['enemigo','amigo',None]
eleccion_aleatoria = random.choice(ocupantes)
if eleccion_aleatoria == 'enemigo':
nombre = 'Enemigo-'+ str(i+1) #Colocamos el numero del enemigo como identificador
self.chozas.append(Choza(i+1, Orco(nombre)))
elif eleccion_aleatoria == 'amigo':
nombre = 'Caballero-'+ str(i+1)
self.chozas.append(Choza(i+1, Caballero(nombre)))
else:
self.chozas.append(Choza(i+1, eleccion_aleatoria))
def play(self):
self.jugador = Caballero()
self._ocupar_chozas()
Contador_chozas_conquistadas = 0
self.Mostrar_mision()
self.jugador.mostrar_salud(bold=True)
while Contador_chozas_conquistadas < 5:
idx = self._procesar_decision()
self.jugador.Conquistar_choza(self.chozas[idx-1])
if self.jugador.Medidor_salud <=0:
print_bold("Sir Gandorel, esperamos que la próxima vez tenga más suerte")
break
if self.chozas[idx-1].conquistada:
Contador_chozas_conquistadas +=1
if Contador_chozas_conquistadas == 5:
print_bold("¡Enhorabuena! Sir Gandorel ha conquistado la aldea")
| tidus747/Tutoriales_juegos_Python | Ataca a los orcos V1.1.0/Ataca_a_los_orcos_V1_0_0.py | Python | gpl-3.0 | 4,063 |
"""
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import ast
import re
from gnuradio import gr
from .. base.Param import Param as _Param
from .. gui.Param import Param as _GUIParam
import Constants
from Constants import VECTOR_TYPES, COMPLEX_TYPES, REAL_TYPES, INT_TYPES
from gnuradio import eng_notation
_check_id_matcher = re.compile('^[a-z|A-Z]\w*$')
_show_id_matcher = re.compile('^(variable\w*|parameter|options|notebook|epy_module)$')
#blacklist certain ids, its not complete, but should help
import __builtin__
ID_BLACKLIST = ['self', 'options', 'gr', 'blks2', 'wxgui', 'wx', 'math', 'forms', 'firdes'] + \
filter(lambda x: not x.startswith('_'), dir(gr.top_block())) + dir(__builtin__)
def num_to_str(num):
""" Display logic for numbers """
if isinstance(num, COMPLEX_TYPES):
num = complex(num) #cast to python complex
if num == 0: return '0' #value is zero
elif num.imag == 0: return '%s'%eng_notation.num_to_str(num.real) #value is real
elif num.real == 0: return '%sj'%eng_notation.num_to_str(num.imag) #value is imaginary
elif num.imag < 0: return '%s-%sj'%(eng_notation.num_to_str(num.real), eng_notation.num_to_str(abs(num.imag)))
else: return '%s+%sj'%(eng_notation.num_to_str(num.real), eng_notation.num_to_str(num.imag))
else: return str(num)
class Param(_Param, _GUIParam):
def __init__(self, **kwargs):
_Param.__init__(self, **kwargs)
_GUIParam.__init__(self)
self._init = False
self._hostage_cells = list()
def get_types(self): return (
'raw', 'enum',
'complex', 'real', 'float', 'int',
'complex_vector', 'real_vector', 'float_vector', 'int_vector',
'hex', 'string', 'bool',
'file_open', 'file_save', '_multiline', '_multiline_python_external',
'id', 'stream_id',
'grid_pos', 'notebook', 'gui_hint',
'import',
)
def __repr__(self):
"""
Get the repr (nice string format) for this param.
Returns:
the string representation
"""
##################################################
# truncate helper method
##################################################
def _truncate(string, style=0):
max_len = max(27 - len(self.get_name()), 3)
if len(string) > max_len:
if style < 0: #front truncate
string = '...' + string[3-max_len:]
elif style == 0: #center truncate
string = string[:max_len/2 -3] + '...' + string[-max_len/2:]
elif style > 0: #rear truncate
string = string[:max_len-3] + '...'
return string
##################################################
# simple conditions
##################################################
if not self.is_valid(): return _truncate(self.get_value())
if self.get_value() in self.get_option_keys(): return self.get_option(self.get_value()).get_name()
##################################################
# split up formatting by type
##################################################
truncate = 0 #default center truncate
e = self.get_evaluated()
t = self.get_type()
if isinstance(e, bool): return str(e)
elif isinstance(e, COMPLEX_TYPES): dt_str = num_to_str(e)
elif isinstance(e, VECTOR_TYPES): #vector types
if len(e) > 8:
dt_str = self.get_value() #large vectors use code
truncate = 1
else: dt_str = ', '.join(map(num_to_str, e)) #small vectors use eval
elif t in ('file_open', 'file_save'):
dt_str = self.get_value()
truncate = -1
else: dt_str = str(e) #other types
##################################################
# done
##################################################
return _truncate(dt_str, truncate)
def get_color(self):
"""
Get the color that represents this param's type.
Returns:
a hex color code.
"""
try:
return {
#number types
'complex': Constants.COMPLEX_COLOR_SPEC,
'real': Constants.FLOAT_COLOR_SPEC,
'float': Constants.FLOAT_COLOR_SPEC,
'int': Constants.INT_COLOR_SPEC,
#vector types
'complex_vector': Constants.COMPLEX_VECTOR_COLOR_SPEC,
'real_vector': Constants.FLOAT_VECTOR_COLOR_SPEC,
'float_vector': Constants.FLOAT_VECTOR_COLOR_SPEC,
'int_vector': Constants.INT_VECTOR_COLOR_SPEC,
#special
'bool': Constants.INT_COLOR_SPEC,
'hex': Constants.INT_COLOR_SPEC,
'string': Constants.BYTE_VECTOR_COLOR_SPEC,
'id': Constants.ID_COLOR_SPEC,
'stream_id': Constants.ID_COLOR_SPEC,
'grid_pos': Constants.INT_VECTOR_COLOR_SPEC,
'notebook': Constants.INT_VECTOR_COLOR_SPEC,
'raw': Constants.WILDCARD_COLOR_SPEC,
}[self.get_type()]
except: return _Param.get_color(self)
def get_hide(self):
"""
Get the hide value from the base class.
Hide the ID parameter for most blocks. Exceptions below.
If the parameter controls a port type, vlen, or nports, return part.
If the parameter is an empty grid position, return part.
These parameters are redundant to display in the flow graph view.
Returns:
hide the hide property string
"""
hide = _Param.get_hide(self)
if hide: return hide
#hide ID in non variable blocks
if self.get_key() == 'id' and not _show_id_matcher.match(self.get_parent().get_key()): return 'part'
#hide port controllers for type and nports
if self.get_key() in ' '.join(map(
lambda p: ' '.join([p._type, p._nports]), self.get_parent().get_ports())
): return 'part'
#hide port controllers for vlen, when == 1
if self.get_key() in ' '.join(map(
lambda p: p._vlen, self.get_parent().get_ports())
):
try:
if int(self.get_evaluated()) == 1: return 'part'
except: pass
#hide empty grid positions
if self.get_key() in ('grid_pos', 'notebook') and not self.get_value(): return 'part'
return hide
def validate(self):
"""
Validate the param.
A test evaluation is performed
"""
_Param.validate(self) #checks type
self._evaluated = None
try: self._evaluated = self.evaluate()
except Exception, e: self.add_error_message(str(e))
def get_evaluated(self): return self._evaluated
def evaluate(self):
"""
Evaluate the value.
Returns:
evaluated type
"""
self._init = True
self._lisitify_flag = False
self._stringify_flag = False
self._hostage_cells = list()
t = self.get_type()
v = self.get_value()
#########################
# Enum Type
#########################
if self.is_enum(): return v
#########################
# Numeric Types
#########################
elif t in ('raw', 'complex', 'real', 'float', 'int', 'hex', 'bool'):
#raise exception if python cannot evaluate this value
try: e = self.get_parent().get_parent().evaluate(v)
except Exception, e: raise Exception, 'Value "%s" cannot be evaluated:\n%s'%(v, e)
#raise an exception if the data is invalid
if t == 'raw': return e
elif t == 'complex':
if not isinstance(e, COMPLEX_TYPES):
raise Exception, 'Expression "%s" is invalid for type complex.'%str(e)
return e
elif t == 'real' or t == 'float':
if not isinstance(e, REAL_TYPES):
raise Exception, 'Expression "%s" is invalid for type float.'%str(e)
return e
elif t == 'int':
if not isinstance(e, INT_TYPES):
raise Exception, 'Expression "%s" is invalid for type integer.'%str(e)
return e
elif t == 'hex': return hex(e)
elif t == 'bool':
if not isinstance(e, bool):
raise Exception, 'Expression "%s" is invalid for type bool.'%str(e)
return e
else: raise TypeError, 'Type "%s" not handled'%t
#########################
# Numeric Vector Types
#########################
elif t in ('complex_vector', 'real_vector', 'float_vector', 'int_vector'):
if not v: v = '()' #turn a blank string into an empty list, so it will eval
#raise exception if python cannot evaluate this value
try: e = self.get_parent().get_parent().evaluate(v)
except Exception, e: raise Exception, 'Value "%s" cannot be evaluated:\n%s'%(v, e)
#raise an exception if the data is invalid
if t == 'complex_vector':
if not isinstance(e, VECTOR_TYPES):
self._lisitify_flag = True
e = [e]
if not all([isinstance(ei, COMPLEX_TYPES) for ei in e]):
raise Exception, 'Expression "%s" is invalid for type complex vector.'%str(e)
return e
elif t == 'real_vector' or t == 'float_vector':
if not isinstance(e, VECTOR_TYPES):
self._lisitify_flag = True
e = [e]
if not all([isinstance(ei, REAL_TYPES) for ei in e]):
raise Exception, 'Expression "%s" is invalid for type float vector.'%str(e)
return e
elif t == 'int_vector':
if not isinstance(e, VECTOR_TYPES):
self._lisitify_flag = True
e = [e]
if not all([isinstance(ei, INT_TYPES) for ei in e]):
raise Exception, 'Expression "%s" is invalid for type integer vector.'%str(e)
return e
#########################
# String Types
#########################
elif t in ('string', 'file_open', 'file_save', '_multiline', '_multiline_python_external'):
#do not check if file/directory exists, that is a runtime issue
try:
e = self.get_parent().get_parent().evaluate(v)
if not isinstance(e, str):
raise Exception()
except:
self._stringify_flag = True
e = str(v)
if t == '_multiline_python_external':
ast.parse(e) # raises SyntaxError
return e
#########################
# Unique ID Type
#########################
elif t == 'id':
#can python use this as a variable?
if not _check_id_matcher.match(v):
raise Exception, 'ID "%s" must begin with a letter and may contain letters, numbers, and underscores.'%v
ids = [param.get_value() for param in self.get_all_params(t)]
if ids.count(v) > 1: #id should only appear once, or zero times if block is disabled
raise Exception, 'ID "%s" is not unique.'%v
if v in ID_BLACKLIST:
raise Exception, 'ID "%s" is blacklisted.'%v
return v
#########################
# Stream ID Type
#########################
elif t == 'stream_id':
#get a list of all stream ids used in the virtual sinks
ids = [param.get_value() for param in filter(
lambda p: p.get_parent().is_virtual_sink(),
self.get_all_params(t),
)]
#check that the virtual sink's stream id is unique
if self.get_parent().is_virtual_sink():
if ids.count(v) > 1: #id should only appear once, or zero times if block is disabled
raise Exception, 'Stream ID "%s" is not unique.'%v
#check that the virtual source's steam id is found
if self.get_parent().is_virtual_source():
if v not in ids:
raise Exception, 'Stream ID "%s" is not found.'%v
return v
#########################
# GUI Position/Hint
#########################
elif t == 'gui_hint':
if ':' in v: tab, pos = v.split(':')
elif '@' in v: tab, pos = v, ''
else: tab, pos = '', v
if '@' in tab: tab, index = tab.split('@')
else: index = '?'
widget_str = ({
(True, True): 'self.%(tab)s_grid_layout_%(index)s.addWidget(%(widget)s, %(pos)s)',
(True, False): 'self.%(tab)s_layout_%(index)s.addWidget(%(widget)s)',
(False, True): 'self.top_grid_layout.addWidget(%(widget)s, %(pos)s)',
(False, False): 'self.top_layout.addWidget(%(widget)s)',
}[bool(tab), bool(pos)])%{'tab': tab, 'index': index, 'widget': '%s', 'pos': pos}
# FIXME: Move replace(...) into the make template of the qtgui blocks and return a string here
class GuiHint(object):
def __init__(self, ws):
self._ws = ws
def __call__(self, w):
return (self._ws.replace('addWidget', 'addLayout') if 'layout' in w else self._ws) % w
def __str__(self):
return self._ws
return GuiHint(widget_str)
#########################
# Grid Position Type
#########################
elif t == 'grid_pos':
if not v: return '' #allow for empty grid pos
e = self.get_parent().get_parent().evaluate(v)
if not isinstance(e, (list, tuple)) or len(e) != 4 or not all([isinstance(ei, int) for ei in e]):
raise Exception, 'A grid position must be a list of 4 integers.'
row, col, row_span, col_span = e
#check row, col
if row < 0 or col < 0:
raise Exception, 'Row and column must be non-negative.'
#check row span, col span
if row_span <= 0 or col_span <= 0:
raise Exception, 'Row and column span must be greater than zero.'
#get hostage cell parent
try: my_parent = self.get_parent().get_param('notebook').evaluate()
except: my_parent = ''
#calculate hostage cells
for r in range(row_span):
for c in range(col_span):
self._hostage_cells.append((my_parent, (row+r, col+c)))
#avoid collisions
params = filter(lambda p: p is not self, self.get_all_params('grid_pos'))
for param in params:
for parent, cell in param._hostage_cells:
if (parent, cell) in self._hostage_cells:
raise Exception, 'Another graphical element is using parent "%s", cell "%s".'%(str(parent), str(cell))
return e
#########################
# Notebook Page Type
#########################
elif t == 'notebook':
if not v: return '' #allow for empty notebook
#get a list of all notebooks
notebook_blocks = filter(lambda b: b.get_key() == 'notebook', self.get_parent().get_parent().get_enabled_blocks())
#check for notebook param syntax
try: notebook_id, page_index = map(str.strip, v.split(','))
except: raise Exception, 'Bad notebook page format.'
#check that the notebook id is valid
try: notebook_block = filter(lambda b: b.get_id() == notebook_id, notebook_blocks)[0]
except: raise Exception, 'Notebook id "%s" is not an existing notebook id.'%notebook_id
#check that page index exists
if int(page_index) not in range(len(notebook_block.get_param('labels').evaluate())):
raise Exception, 'Page index "%s" is not a valid index number.'%page_index
return notebook_id, page_index
#########################
# Import Type
#########################
elif t == 'import':
n = dict() #new namespace
try: exec v in n
except ImportError: raise Exception, 'Import "%s" failed.'%v
except Exception: raise Exception, 'Bad import syntax: "%s".'%v
return filter(lambda k: str(k) != '__builtins__', n.keys())
#########################
else: raise TypeError, 'Type "%s" not handled'%t
def to_code(self):
"""
Convert the value to code.
For string and list types, check the init flag, call evaluate().
This ensures that evaluate() was called to set the xxxify_flags.
Returns:
a string representing the code
"""
v = self.get_value()
t = self.get_type()
if t in ('string', 'file_open', 'file_save', '_multiline', '_multiline_python_external'): # string types
if not self._init: self.evaluate()
if self._stringify_flag: return '"%s"'%v.replace('"', '\"')
else: return v
elif t in ('complex_vector', 'real_vector', 'float_vector', 'int_vector'): #vector types
if not self._init: self.evaluate()
if self._lisitify_flag: return '(%s, )'%v
else: return '(%s)'%v
else: return v
def get_all_params(self, type):
"""
Get all the params from the flowgraph that have the given type.
Args:
type: the specified type
Returns:
a list of params
"""
return sum([filter(lambda p: p.get_type() == type, block.get_params()) for block in self.get_parent().get_parent().get_enabled_blocks()], [])
| analogdevicesinc/gnuradio | grc/python/Param.py | Python | gpl-3.0 | 18,960 |
"""
Test cases for codeop.py
Nick Mathewson
"""
import unittest
from test.support import run_unittest, is_jython
from codeop import compile_command, PyCF_DONT_IMPLY_DEDENT
import io
if is_jython:
import sys
def unify_callables(d):
for n,v in d.items():
if hasattr(v, '__call__'):
d[n] = True
return d
class CodeopTests(unittest.TestCase):
def assertValid(self, str, symbol='single'):
'''succeed iff str is a valid piece of code'''
if is_jython:
code = compile_command(str, "<input>", symbol)
self.assertTrue(code)
if symbol == "single":
d,r = {},{}
saved_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
exec(code, d)
exec(compile(str,"<input>","single"), r)
finally:
sys.stdout = saved_stdout
elif symbol == 'eval':
ctx = {'a': 2}
d = { 'value': eval(code,ctx) }
r = { 'value': eval(str,ctx) }
self.assertEqual(unify_callables(r),unify_callables(d))
else:
expected = compile(str, "<input>", symbol, PyCF_DONT_IMPLY_DEDENT)
self.assertEqual(compile_command(str, "<input>", symbol), expected)
def assertIncomplete(self, str, symbol='single'):
'''succeed iff str is the start of a valid piece of code'''
self.assertEqual(compile_command(str, symbol=symbol), None)
def assertInvalid(self, str, symbol='single', is_syntax=1):
'''succeed iff str is the start of an invalid piece of code'''
try:
compile_command(str,symbol=symbol)
self.fail("No exception thrown for invalid code")
except SyntaxError:
self.assertTrue(is_syntax)
except OverflowError:
self.assertTrue(not is_syntax)
def test_valid(self):
av = self.assertValid
# special case
if not is_jython:
self.assertEqual(compile_command(""),
compile("pass", "<input>", 'single',
PyCF_DONT_IMPLY_DEDENT))
self.assertEqual(compile_command("\n"),
compile("pass", "<input>", 'single',
PyCF_DONT_IMPLY_DEDENT))
else:
av("")
av("\n")
av("a = 1")
av("\na = 1")
av("a = 1\n")
av("a = 1\n\n")
av("\n\na = 1\n\n")
av("def x():\n pass\n")
av("if 1:\n pass\n")
av("\n\nif 1: pass\n")
av("\n\nif 1: pass\n\n")
av("def x():\n\n pass\n")
av("def x():\n pass\n \n")
av("def x():\n pass\n \n")
av("pass\n")
av("3**3\n")
av("if 9==3:\n pass\nelse:\n pass\n")
av("if 1:\n pass\n if 1:\n pass\n else:\n pass\n")
av("#a\n#b\na = 3\n")
av("#a\n\n \na=3\n")
av("a=3\n\n")
av("a = 9+ \\\n3")
av("3**3","eval")
av("(lambda z: \n z**3)","eval")
av("9+ \\\n3","eval")
av("9+ \\\n3\n","eval")
av("\n\na**3","eval")
av("\n \na**3","eval")
av("#a\n#b\na**3","eval")
av("\n\na = 1\n\n")
av("\n\nif 1: a=1\n\n")
av("if 1:\n pass\n if 1:\n pass\n else:\n pass\n")
av("#a\n\n \na=3\n\n")
av("\n\na**3","eval")
av("\n \na**3","eval")
av("#a\n#b\na**3","eval")
av("def f():\n try: pass\n finally: [x for x in (1,2)]\n")
av("def f():\n pass\n#foo\n")
av("@a.b.c\ndef f():\n pass\n")
def test_incomplete(self):
ai = self.assertIncomplete
ai("(a **")
ai("(a,b,")
ai("(a,b,(")
ai("(a,b,(")
ai("a = (")
ai("a = {")
ai("b + {")
ai("if 9==3:\n pass\nelse:")
ai("if 9==3:\n pass\nelse:\n")
ai("if 9==3:\n pass\nelse:\n pass")
ai("if 1:")
ai("if 1:\n")
ai("if 1:\n pass\n if 1:\n pass\n else:")
ai("if 1:\n pass\n if 1:\n pass\n else:\n")
ai("if 1:\n pass\n if 1:\n pass\n else:\n pass")
ai("def x():")
ai("def x():\n")
ai("def x():\n\n")
ai("def x():\n pass")
ai("def x():\n pass\n ")
ai("def x():\n pass\n ")
ai("\n\ndef x():\n pass")
ai("a = 9+ \\")
ai("a = 'a\\")
ai("a = '''xy")
ai("","eval")
ai("\n","eval")
ai("(","eval")
ai("(\n\n\n","eval")
ai("(9+","eval")
ai("9+ \\","eval")
ai("lambda z: \\","eval")
ai("if True:\n if True:\n if True: \n")
ai("@a(")
ai("@a(b")
ai("@a(b,")
ai("@a(b,c")
ai("@a(b,c,")
ai("from a import (")
ai("from a import (b")
ai("from a import (b,")
ai("from a import (b,c")
ai("from a import (b,c,")
ai("[");
ai("[a");
ai("[a,");
ai("[a,b");
ai("[a,b,");
ai("{");
ai("{a");
ai("{a:");
ai("{a:b");
ai("{a:b,");
ai("{a:b,c");
ai("{a:b,c:");
ai("{a:b,c:d");
ai("{a:b,c:d,");
ai("a(")
ai("a(b")
ai("a(b,")
ai("a(b,c")
ai("a(b,c,")
ai("a[")
ai("a[b")
ai("a[b,")
ai("a[b:")
ai("a[b:c")
ai("a[b:c:")
ai("a[b:c:d")
ai("def a(")
ai("def a(b")
ai("def a(b,")
ai("def a(b,c")
ai("def a(b,c,")
ai("(")
ai("(a")
ai("(a,")
ai("(a,b")
ai("(a,b,")
ai("if a:\n pass\nelif b:")
ai("if a:\n pass\nelif b:\n pass\nelse:")
ai("while a:")
ai("while a:\n pass\nelse:")
ai("for a in b:")
ai("for a in b:\n pass\nelse:")
ai("try:")
ai("try:\n pass\nexcept:")
ai("try:\n pass\nfinally:")
ai("try:\n pass\nexcept:\n pass\nfinally:")
ai("with a:")
ai("with a as b:")
ai("class a:")
ai("class a(")
ai("class a(b")
ai("class a(b,")
ai("class a():")
ai("[x for")
ai("[x for x in")
ai("[x for x in (")
ai("(x for")
ai("(x for x in")
ai("(x for x in (")
def test_invalid(self):
ai = self.assertInvalid
ai("a b")
ai("a @")
ai("a b @")
ai("a ** @")
ai("a = ")
ai("a = 9 +")
ai("def x():\n\npass\n")
ai("\n\n if 1: pass\n\npass")
ai("a = 9+ \\\n")
ai("a = 'a\\ ")
ai("a = 'a\\\n")
ai("a = 1","eval")
ai("a = (","eval")
ai("]","eval")
ai("())","eval")
ai("[}","eval")
ai("9+","eval")
ai("lambda z:","eval")
ai("a b","eval")
ai("return 2.3")
ai("if (a == 1 and b = 2): pass")
ai("del 1")
ai("del ()")
ai("del (1,)")
ai("del [1]")
ai("del '1'")
ai("[i for i in range(10)] = (1, 2, 3)")
def test_filename(self):
self.assertEqual(compile_command("a = 1\n", "abc").co_filename,
compile("a = 1\n", "abc", 'single').co_filename)
self.assertNotEqual(compile_command("a = 1\n", "abc").co_filename,
compile("a = 1\n", "def", 'single').co_filename)
def test_main():
run_unittest(CodeopTests)
if __name__ == "__main__":
test_main()
| invisiblek/python-for-android | python3-alpha/python3-src/Lib/test/test_codeop.py | Python | apache-2.0 | 7,630 |
#! /usr/bin/env python3
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
sys.path.insert(0, os.getcwd())
import re
import sys
import shutil
import glob
import argparse
import datetime
import json
from scripts.utils import Tree, SortingCriteria, get_system_type
SCENARIO_PERF_RES_METADATA = {
# scenario: (result regex, SortingCriteria)
"Offline": (r"Samples per second: (\d+\.?\d*e?[-+]?\d*)", SortingCriteria.Higher),
"Server": (r"99\.00 percentile latency \(ns\) : (\d+\.?\d*e?[-+]?\d*)", SortingCriteria.Lower),
"SingleStream": (r"90th percentile latency \(ns\) : (\d+\.?\d*e?[-+]?\d*)", SortingCriteria.Lower),
"MultiStream": (r"99\.00 percentile latency \(ns\) : (\d+\.?\d*e?[-+]?\d*)", SortingCriteria.Lower),
}
def sort_perf_list(perf_file_list, scenario):
perf_vals = []
for perf_file in perf_file_list:
summary_file = perf_file.replace("_accuracy.json", "_summary.txt")
found_perf = False
with open(summary_file) as f:
log = f.read().split("\n")
for line in log:
matches = re.match(SCENARIO_PERF_RES_METADATA[scenario][0], line)
if matches is None:
continue
perf_vals.append((perf_file, float(matches.group(1))))
found_perf = True
break
if not found_perf:
raise Exception("Could not find perf value in file: " + summary_file)
sorted_perf_vals = sorted(perf_vals, key=lambda k: k[1],
reverse=(SCENARIO_PERF_RES_METADATA[scenario][1] == SortingCriteria.Lower))
return [ k[0] for k in sorted_perf_vals ]
def find_valid_runs(input_list, scenario):
# Check for query constraints documented in https://github.com/mlperf/inference_policies/blob/master/inference_rules.adoc#scenarios
QUERY_METRIC_CONSTRAINTS = {
"Offline": (r"samples_per_query : (\d+\.?\d*e?[-+]?\d*)", 24576),
"Server": (r"min_query_count : (\d+\.?\d*e?[-+]?\d*)", 270336),
"MultiStream": (r"min_query_count : (\d+\.?\d*e?[-+]?\d*)", 270336),
"SingleStream": (r"min_query_count : (\d+\.?\d*e?[-+]?\d*)", 1024),
}
perf_list = []
accu_list = []
for input_file in input_list:
# Check if this is Accuracy run or Performance run.
if os.path.getsize(input_file) > 4:
accu_list.append(input_file)
# Check for valid perf run
is_valid = False
satisfies_query_constraint = False
summary = input_file.replace("_accuracy.json", "_summary.txt")
with open(summary) as f:
for line in f:
# Result validity check
match = re.match(r"Result is : (VALID|INVALID)", line)
if match is not None and match.group(1) == "VALID":
is_valid = True
# Query constraint check
match = re.match(QUERY_METRIC_CONSTRAINTS[scenario][0], line)
if match is not None and float(match.group(1)) >= QUERY_METRIC_CONSTRAINTS[scenario][1]:
satisfies_query_constraint = True
if is_valid and satisfies_query_constraint:
perf_list.append(input_file)
return perf_list, accu_list
def process_results(args, system_ids, metadata):
time_now = str(datetime.datetime.utcnow())
result_id = args.result_id if args.result_id is not None else "manual-{:}".format(time_now)
for system_id in system_ids:
system_type = get_system_type(system_id)
for benchmark in system_ids[system_id]:
# Skip DLRM and BERT-99.9 for Edge
if system_type == "edge" and (benchmark.startswith("dlrm") or benchmark == "bert-99.9"):
print("{:} is an edge system. Skipping {:}".format(system_id, benchmark))
continue
# Skip SSD MobileNet for datacenter
if system_type == "datacenter" and benchmark == "ssd-mobilenet":
print("{:} is a datacenter system. Skipping {:}".format(system_id, benchmark))
continue
for scenario in system_ids[system_id][benchmark]:
# Skip Server for Edge systems
if system_type == "edge" and scenario in {"Server"}:
print("{:} is an edge system. Skipping Server scenario".format(system_id))
continue
# Skip SingleStream and MultiStream for Datacenter systems
if system_type == "datacenter" and scenario in {"SingleStream", "MultiStream"}:
print("{:} is a datacenter system. Skipping {:} scenario".format(system_id, scenario))
continue
print(">>>>>>>> Processing {:}-{:}-{:} <<<<<<<<".format(system_id, benchmark, scenario))
input_list = system_ids[system_id][benchmark][scenario]
print("Found {:} log files".format(len(input_list)))
perf_list, accu_list = find_valid_runs(input_list, scenario)
# For DLRM and 3d-UNET, the 99.9% and 99% accuracy targets use the same engines. We use the same
# logs here to make it more prominent that they are the same
if benchmark in { "dlrm-99", "3d-unet-99" }:
perf_list, accu_list = find_valid_runs(system_ids[system_id][benchmark + ".9"][scenario], scenario)
print("\t{:} perf logs".format(len(perf_list)))
print("\t{:} acc logs".format(len(accu_list)))
metadata.insert([system_id, benchmark, scenario, "accuracy", "count"], len(accu_list))
metadata.insert([system_id, benchmark, scenario, "performance", "count"], len(perf_list))
#### Update accuracy run
if len(accu_list) == 0:
print("WARNING: Cannot find valid accuracy run.")
if args.abort_missing_accuracy:
return
else:
if len(accu_list) > 1:
print("WARNING: Found {:d} accuracy runs, which is more than needed. Empirically choose the last one.".format(len(accu_list)))
print(accu_list)
output_dir = os.path.join(args.output_dir, system_id, benchmark, scenario, "accuracy")
if not args.dry_run:
os.makedirs(output_dir, exist_ok=True)
for suffix in ["_accuracy.json", "_detail.txt", "_summary.txt"]:
input_file = accu_list[-1].replace("_accuracy.json", suffix)
output_file = os.path.join(output_dir, "mlperf_log{:}".format(suffix))
print("Copy {:} -> {:}".format(input_file, output_file))
if not args.dry_run:
shutil.copy(input_file, output_file)
input_file = os.path.join(os.path.dirname(input_file), "accuracy.txt")
output_file = os.path.join(output_dir, "accuracy.txt")
print("Copy {:} -> {:}".format(input_file, output_file))
if not args.dry_run:
shutil.copy(input_file, output_file)
#### Update perf run
perf_count = 1 if scenario != "Server" else 5
if len(perf_list) < perf_count:
print("WARNING: Cannot find enough passing perf runs. Only found {:d} runs.".format(len(perf_list)))
if args.abort_insufficient_runs:
return
elif len(perf_list) > perf_count:
print("WARNING: Found {:d} passing perf runs, which is more than needed. Choosing the highest perf one(s).".format(len(perf_list)))
perf_list = sort_perf_list(perf_list, scenario)[-perf_count:]
starting_idx = metadata.get([system_id, benchmark, scenario, "performance", "last_updated"])
if starting_idx is None:
starting_idx = 0
else:
# Starting idx is in range 1..perf_count, whereas actual indices are 0..perf_count-1. We wish the
# first index we modify to be the one after Starting idx, so taking (N mod perf_count) works.
starting_idx = starting_idx % perf_count
for run_idx in range(0, len(perf_list)):
run_num = ((run_idx + starting_idx) % perf_count) + 1
output_dir = os.path.join(args.output_dir, system_id, benchmark, scenario, "performance", "run_{:d}".format(run_num))
if not args.dry_run:
os.makedirs(output_dir, exist_ok=True)
for suffix in ["_accuracy.json", "_detail.txt", "_summary.txt"]:
input_file = perf_list[run_idx].replace("_accuracy.json", suffix)
output_file = os.path.join(output_dir, "mlperf_log{:}".format(suffix))
print("Copy {:} -> {:}".format(input_file, output_file))
if not args.dry_run:
shutil.copy(input_file, output_file)
metadata.insert([system_id, benchmark, scenario, "performance", "last_updated"], run_num)
metadata.insert([ system_id, benchmark, scenario, "results_export_timestamp" ], time_now)
metadata.insert([ system_id, benchmark, scenario, "result_id" ], result_id)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir", "-d",
help="Specifies the directory containing the logs.",
default="build/logs"
)
parser.add_argument(
"--output_dir", "-o",
help="Specifies the directory to output the results/ entries to",
default="results"
)
parser.add_argument(
"--result_id",
help="Specifies a unique ID to use for this result",
default=None
)
parser.add_argument(
"--abort_insufficient_runs",
help="Abort instead if there are not enough perf runs to be considered valid",
action="store_true"
)
parser.add_argument(
"--abort_missing_accuracy",
help="Abort instead if there isn't a valid accuracy run",
action="store_true"
)
parser.add_argument(
"--dry_run",
help="Don't actually copy files, just log the actions taken.",
action="store_true"
)
parser.add_argument(
"--metadata_file",
help="File that stores metadata about these results",
default="results_metadata.json"
)
parser.add_argument(
"--add_metadata",
help="Save a field as part of metadata to the results directory. Format period.separated.key:value",
action="append"
)
return parser.parse_args()
def main():
args = get_args()
glob_to_logs = os.path.join(args.input_dir, "**", "mlperf_log_accuracy.json")
print("Looking for logs in {:}".format(glob_to_logs))
all_logs = glob.glob(glob_to_logs, recursive=True)
print("Found {:} mlperf_log entries".format(len(all_logs)))
# Loop through input_list to find all the system_ids
system_ids = Tree()
for entry in all_logs:
parts = entry.split("/")
system_id = parts[-4] # [input_dir]/<timestamp>/system_id/benchmark/scenario/*.json
benchmark = parts[-3]
scenario = parts[-2]
system_ids.insert([system_id, benchmark, scenario], entry, append=True)
metadata = None
if os.path.exists(args.metadata_file):
with open(args.metadata_file) as f:
metadata = json.load(f)
metadata = Tree(starting_val=metadata)
process_results(args, system_ids, metadata)
# Write out custom metadata
if args.add_metadata:
for md in args.add_metadata:
tmp = md.split(":")
if len(tmp) != 2:
print("WARNING: Invalid metadata \"{:}\"".format(md))
continue
keyspace = tmp[0].split(".")
value = tmp[1]
metadata.insert(keyspace, value)
if not args.dry_run:
with open(args.metadata_file, 'w') as f:
json.dump(metadata.tree, f, indent=4, sort_keys=True)
else:
print(json.dumps(metadata.tree, indent=4, sort_keys=True))
print("Done!")
if __name__ == '__main__':
main()
| mlperf/inference_results_v0.7 | open/NVIDIA/scripts/update_results.py | Python | apache-2.0 | 13,020 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# Copyright (C) 2012 Travis Shirk <travis@pobox.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
################################################################################
from eyed3.id3 import Tag
from eyed3.id3 import ID3_V1_0, ID3_V1_1, ID3_V2_3, ID3_V2_4
import logging
from eyed3 import log
log.setLevel(logging.DEBUG)
t = Tag()
t.artist = u"M.O.P."
t.title = u"How About Some Hardcore"
t.album = u"To The Death"
t.genre = u"Hip-Hop"
t.track_num = (3,5)
t.disc_num = (1,1)
t.original_release_date = "1994-04-07"
t.release_date = "1994-04-07"
t.encoding_date = "2002-03"
t.recording_date = 1996
t.tagging_date = "2012-2-5"
t.comments.set(u"Gritty, yo!")
t.comments.set(u"Brownsville, Brooklyn", u"Origin")
t.user_text_frames.set(u"****", u"Rating")
t.artist_url = b"http://allmusic.com/artist/mop-p194909"
t.user_url_frames.set(b"http://eyed3.nicfit.net/")
t.bpm = 187
t.play_count = 125
t.unique_file_ids.set(b"43e888e067ea107f964916af6259cbe7", "md5sum")
t.cd_id = b"\x3c\x33\x4d\x41\x43\x59\x3c\x33"
t.privates.set("Secrets", "Billy Danzenie")
t.terms_of_use = u"Blunted"
t.lyrics.set(u"""
[ Billy Danzenie ]
How about some hardcore?
(Yeah, we like it raw!) (4x)
How about some hardcore?
[ VERSE 1: Billy Danzenie ]
(Yeah, we like it raw in the streets)
For the fellas on the corner posted up 20 deep
With your ifth on your hip, ready to flip
Whenever you empty your clip, dip, trip your sidekick
You got skill, you best manage to chill
And do yourself a favor, don`t come nowhere near the Hill
With that bullshit, word, money grip, it`ll cost ya
Make you reminisce of Frank Nitty `The Enforcer`
I move with M.O.P.`s Last Generation
Straight up and down, act like you want a confrontation
I packs my gat, I gotta stay strapped
I bust mines, don`t try to sneak up on me from behind
Don`t sleep, I get deep when I creep
I see right now I got to show you it ain`t nothin sweet
Go get your muthaf**kin hammer
And act like you want drama
I send a message to your mama
`Hello, do you know your one son left?
I had license to kill and he had been marked for death
He`s up the Hill in the back of the building with two in the dome
I left him stiffer than a tombstone`
[ Li`l Fame ]
How about some hardcore?
(Yeah, we like it raw!) (4x)
How about some hardcore?
[ VERSE 2: Billy Danzenie ]
(Yeah, we like it rugged in the ghetto)
I used to pack sling shots, but now I`m packin heavy metal
A rugged underground freestyler
Is Li`l Fame, muthaf**ka, slap, Li`l Mallet
When I let off, it`s a burning desire
Niggas increase the peace cause when I release it be rapid fire
For the cause I drop niggas like drawers
Niggas`ll hit the floors from the muthaf**kin .44`s
I`m talkin titles when it`s showtime
f**k around, I have niggas call the injury help line
I bust words in my verse that`ll serve
Even on my first nerve I put herbs to curbs
I ain`t about givin niggas a chance
And I still raise sh*t to make my brother wanna get up and dance
Front, I make it a thrill to kill
Bringin the ruckus, it`s the neighborhood hoods for the Hill that`s real
Me and mics, that`s unlike niggas and dykes
So who wanna skate, cause I`m puttin niggas on ice
Whatever I drop must be rough, rugged and hard more
(Yeah!)
[ Billy Danzenie ]
How about some hardcore?
(Yeah, we like it raw!) (4x)
[ VERSE 3: Billy Danzenie ]
Yo, here I am (So what up?) Get it on, cocksucker
That nigga Bill seem to be a ill black brother
I gets dough from the way I flow
And before I go
You muthaf**kas gonna know
That I ain`t nothin to f**k with - duck quick
I squeeze when I`m stressed
Them teflons`ll tear through your vest
I love a bloodbath (niggas know the half)
You can feel the wrath (Saratoga/St. Marks Ave.)
B-i-l-l-y D-a-n-z-e
n-i-e, me, Billy Danzenie
(Knock, knock) Who`s there? (Li`l Fame)
Li`l Fame who? (Li`l Fame, your nigga)
Boom! Ease up off the trigger
It`s aight, me and shorty go to gunfights
Together we bring the ruckus, right?
We trump tight, aight?
I earned mine, so I`m entitled to a title
(7 f**kin 30) that means I`m homicidal
[ Li`l Fame ]
How about some hardcore?
(Yeah, we like it raw!) (4x)
[ VERSE 4: Li`l Fame ]
Yo, I scream on niggas like a rollercoaster
To them wack muthaf**kas, go hang it up like a poster
Niggas get excited, but don`t excite me
Don`t invite me, I`m splittin niggas` heads where the white be
Try to trash this, this little bastard`ll blast it
Only puttin niggas in comas and caskets
I ain`t a phoney, I put the `mack` in a -roni
I leave you lonely (Yeah, yeah, get on his ass, homie)
Up in your anus, I pack steel that`s stainless
We came to claim this, and Li`l Fame`ll make you famous
I mack hoes, rock shows and stack dough
Cause I`m in effect, knockin muthaf**kas like five-o
I`m catchin other niggas peepin, shit, I ain`t sleepin
I roll deep like a muthaf**kin Puerto-Rican
So when I write my competition looks sadly
For broke-ass niggas I make it happen like Mariah Carey
I got sh*t for niggas that roll bold
Li`l Fame is like a orthopedic shoe, I got mad soul
I`ma kill em before I duck em
Because yo, mother made em, mother had em and muthaf**k em
[ Li`l Fame ]
Knowmsayin?
Li`l Fame up in this muthaf**ka
Givin shoutouts to my man D/R Period
[Name]
Lazy Laz
My man Broke As* Moe
The whole Saratoga Ave.
Youknowmsayin?
Representin for Brooklyn
Most of all my cousin Prince Leroy, Big Mal, rest in peace
[ Billy Danzenie ]
Danzenie up in this muthaf**ka
I`d like to say what`s up to the whole M.O.P.
Brooklyn, period
Them niggas that just don`t give a f**k
[ O.G. Bu-Bang
Bet yo ass, nigga
Hey yo, this muthaf**kin Babyface [Name]
Aka O.G. Bu-Bang
Yo, I wanna say what`s up to the whole muthaf**kin M.O.P. boyyeee
""")
t.save("example-v2_4.id3", version=ID3_V2_4)
t.save("example-v2_3.id3", version=ID3_V2_3)
# Loss of the release date month and day.
# Loss of the comment with description.
t.save("example-v1_1.id3", version=ID3_V1_1)
# Loses what v1.1 loses, and the track #
t.save("example-v1_0.id3", version=ID3_V1_0)
'''
from eyed3.id3.tag import TagTemplate
template = "$artist/"\
"$best_release_date:year - $album/"\
"$artist - $track:num - $title.$file:ext"
print TagTemplate(template).substitute(t, zeropad=True)
'''
| daltonsena/eyed3 | examples/tag_example.py | Python | gpl-2.0 | 6,993 |
import importlib
from collections import defaultdict
from modeful.keymap.common import Common
class Parser(Common):
def parse_keymap(self, keymap_dict):
keymap = {}
for mode, keymap_lines in keymap_dict.items():
keymap[mode] = defaultdict(dict)
for func, shortcut in keymap_lines:
event_name = func.upper().replace('.', '_')
keymap_tmp = keymap[mode]
keys = self._parse_shortcut(shortcut)
for k in keys[:-1]:
keymap_tmp = keymap_tmp[k]
keymap_tmp[keys[-1]] = event_name
return keymap
def _parse_shortcut(self, shortcuts):
s = []
for shortcut in shortcuts.split():
keys = shortcut.split('-')
m = self._get_modifier_code(keys[:-1])
s.append((ord(keys[-1]), m))
return s
| Modeful/poc | modeful/keymap/parser.py | Python | gpl-3.0 | 946 |
import datetime
import logging
import json
from pylons import request, response, session, tmpl_context as c
from zkpylons.lib.helpers import redirect_to
from pylons.controllers.util import Response, redirect
from pylons.decorators import validate, jsonify
from pylons.decorators.rest import dispatch_on
from formencode import validators, htmlfill, ForEach, Invalid
from formencode.variabledecode import NestedVariables
from zkpylons.lib.base import BaseController, render
from zkpylons.lib.ssl_requirement import enforce_ssl
from zkpylons.lib.validators import BaseSchema, ProductValidator, ExistingPersonValidator, ExistingInvoiceValidator
import zkpylons.lib.helpers as h
from authkit.authorize.pylons_adaptors import authorize
from authkit.permissions import ValidAuthKitUser
from zkpylons.lib.mail import email
from zkpylons.model import meta, Invoice, InvoiceItem, Registration, ProductCategory, Product, URLHash
from zkpylons.model.payment import Payment
from zkpylons.model.payment_received import PaymentReceived
from zkpylons.config.lca_info import lca_info
from zkpylons.config.zkpylons_config import file_paths
import zkpylons.lib.pdfgen as pdfgen
import zkpylons.lib.pxpay as pxpay
log = logging.getLogger(__name__)
class RemindSchema(BaseSchema):
# message = validators.String(not_empty=True)
invoices = ForEach(ExistingInvoiceValidator())
class ExistingInvoiceValidator(validators.FancyValidator):
def _to_python(self, value, state):
invoice = Invoice.find_by_id(int(value), False)
if invoice is None:
raise Invalid("Unknown invoice ID.", value, state)
else:
return invoice
def _from_python(self, value, state):
return value.id
class PayInvoiceSchema(BaseSchema):
payment_id = validators.Int(min=1)
class FakePerson():
firstname = "John"
lastname = "Doe"
email_address = "john.doe@example.com"
class InvoiceController(BaseController):
@enforce_ssl(required_all=True)
@authorize(h.auth.Or(h.auth.is_valid_user, h.auth.has_unique_key()))
def __before__(self, **kwargs):
pass
@authorize(h.auth.has_organiser_role)
@dispatch_on(POST="_new")
def new(self):
c.product_categories = ProductCategory.find_all()
return render("/invoice/new.mako")
@jsonify
def _new(self):
data = json.loads(request.params['invoice'])
person_id = int(data['person_id'])
due_date = datetime.datetime.strptime(data['due_date'], '%d/%m/%Y')
invoice = Invoice(person_id=person_id, due_date=due_date, manual=True, void=None)
for item in data['items']:
invoice_item = InvoiceItem()
if item.has_key('product_id') and item['product_id']:
product = Product.find_by_id(item['product_id'])
category = product.category
invoice_item.product = product
invoice_item.description = product.category.name + ' - ' + product.description
else:
invoice_item.description = item['description']
invoice_item.cost = int(item['cost'])
invoice_item.qty = int(item['qty'])
invoice.items.append(invoice_item)
meta.Session.add(invoice)
meta.Session.commit()
return dict(r=dict(invoice_id=invoice.id))
def generate_hash(self, id):
if not h.auth.authorized(h.auth.Or(h.auth.is_same_zkpylons_attendee(id), h.auth.has_organiser_role, h.auth.has_unique_key())):
# Raise a no_auth error
h.auth.no_role()
url = h.url_for(action='view', id=id)
c.hash = URLHash.find_by_url(url=url)
if c.hash is None:
c.hash = URLHash()
c.hash.url = url
meta.Session.add(c.hash)
meta.Session.commit()
# create an entry for the payment page (not needed)
# TODO: depending on how the gateway works, you may need to make sure you have permissions for the page you get redirected to
#c.hash = URLHash()
#c.hash.url = h.url_for(action='pay')
#meta.Session.add(c.hash)
#meta.Session.commit()
return render('/invoice/generate_url.mako')
def view(self, id):
if not h.auth.authorized(h.auth.Or(h.auth.is_same_zkpylons_attendee(id), h.auth.has_organiser_role, h.auth.has_unique_key())):
# Raise a no_auth error
h.auth.no_role()
c.printable = False
c.invoice = Invoice.find_by_id(id, True)
c.payment_received = None
c.payment = None
if c.invoice.is_paid and c.invoice.total > 0:
c.payment_received = c.invoice.good_payments[0]
c.payment = c.payment_received.payment
return render('/invoice/view.mako')
def printable(self, id):
if not h.auth.authorized(h.auth.Or(h.auth.is_same_zkpylons_attendee(id), h.auth.has_organiser_role, h.auth.has_unique_key())):
# Raise a no_auth error
h.auth.no_role()
c.printable = True
c.invoice = Invoice.find_by_id(id, True)
c.payment_received = None
c.payment = None
if c.invoice.is_paid and c.invoice.total > 0:
c.payment_received = c.invoice.good_payments[0]
c.payment = c.payment_received.payment
return render('/invoice/view_printable.mako')
@authorize(h.auth.has_organiser_role)
def index(self):
c.can_edit = True
c.invoice_collection = Invoice.find_all()
return render('/invoice/list.mako')
@authorize(h.auth.has_organiser_role)
@dispatch_on(POST="_remind")
def remind(self):
c.invoice_collection = meta.Session.query(Invoice).filter(Invoice.is_paid==False).filter(Invoice.is_void==False).all()
# create dummy person for example:
c.recipient = FakePerson()
return render('/invoice/remind.mako')
@validate(schema=RemindSchema(), form='remind', post_only=True, on_get=True, variable_decode=True)
def _remind(self):
results = self.form_result
for i in results['invoices']:
c.invoice = i
c.recipient = i.person
email(c.recipient.email_address, render('invoice/remind_email.mako'))
h.flash('Email sent to ' + c.recipient.firstname + ' ' + c.recipient.lastname + ' <' + c.recipient.email_address + '>')
redirect_to(action='remind')
def _check_invoice(self, person, invoice, ignore_overdue = False):
c.invoice = invoice
if person.invoices:
if invoice.is_paid or len(invoice.bad_payments) > 0:
c.status = []
if invoice.total==0:
c.status.append('zero balance')
if len(invoice.good_payments) > 0:
c.status.append('paid')
if len(invoice.good_payments)>1:
c.status[-1] += ' (%d times)' % len(invoice.good_payments)
if len(invoice.bad_payments) > 0:
c.status.append('tried to pay')
if len(invoice.bad_payments)>1:
c.status[-1] += ' (%d times)' % len(invoice.bad_payments)
c.status = ' and '.join(c.status)
return render('/invoice/already.mako')
if invoice.is_void:
c.signed_in_person = h.signed_in_person()
return render('/invoice/invalid.mako')
if not ignore_overdue and invoice.is_overdue:
for ii in invoice.items:
if ii.product and not ii.product.available():
return render('/invoice/expired.mako')
return None # All fine
@dispatch_on(POST="_pay")
def pay(self, id):
"""Request confirmation from user
"""
invoice = Invoice.find_by_id(id, True)
person = invoice.person
if not h.auth.authorized(h.auth.Or(h.auth.is_same_zkpylons_user(person.id), h.auth.has_organiser_role, h.auth.has_unique_key())):
# Raise a no_auth error
h.auth.no_role()
#return render('/registration/really_closed.mako')
error = self._check_invoice(person, invoice)
if error is not None:
return error
c.payment = Payment()
c.payment.amount = invoice.total
c.payment.invoice = invoice
meta.Session.commit()
return render("/invoice/payment.mako")
@authorize(h.auth.has_organiser_role)
@jsonify
def get_invoice(self, id):
"""
Returns a JSON representation of an existing invoice
"""
invoice = Invoice.find_by_id(id, True)
obj = {
'id': invoice.id,
'person_id': invoice.person_id,
'manual': invoice.manual,
'void': invoice.void,
'issue_date': invoice.issue_date.strftime('%d/%m/%Y'),
'due_date': invoice.due_date.strftime('%d/%m/%Y'),
'items': [
{
'product_id': item.product_id,
'description': item.description,
'qty': item.qty,
'cost': item.cost,
} for item in invoice.items],
}
return dict(r=dict(invoice=obj))
@authorize(h.auth.has_organiser_role)
@jsonify
def pay_invoice(self, id):
"""
Pay an invoice via the new angular.js interface
Expects: invoice_id. Assumes total amount is to be paid.
TODO: Validation??
"""
invoice = Invoice.find_by_id(id, True)
person = invoice.person
if not invoice.is_paid:
payment = Payment()
payment.amount = invoice.total
payment.invoice = invoice
payment_received = PaymentReceived(
approved=True,
payment=payment,
invoice_id=invoice.id,
success_code='0',
amount_paid=payment.amount,
currency_used='AUD',
response_text='Approved',
client_ip_zookeepr='127.1.0.1',
client_ip_gateway='127.0.0.1',
email_address=person.email_address,
gateway_ref='Rego Desk Cash'
)
meta.Session.add(payment)
meta.Session.add(payment_received)
meta.Session.commit()
return dict(r=dict(message="Payment recorded"))
else:
return dict(r=dict(message="A payment has already been recorded for this invoice"))
@validate(schema=PayInvoiceSchema(), form='pay', post_only=True, on_get=True, variable_decode=True)
def _pay(self, id):
payment = Payment.find_by_id(self.form_result['payment_id'])
c.invoice = payment.invoice
person = c.invoice.person
if not h.auth.authorized(h.auth.Or(h.auth.is_same_zkpylons_user(person.id), h.auth.has_organiser_role, h.auth.has_unique_key())):
# Raise a no_auth error
h.auth.no_role()
error = self._check_invoice(person, c.invoice)
if error is not None:
return error
client_ip = request.environ['REMOTE_ADDR']
if 'HTTP_X_FORWARDED_FOR' in request.environ:
client_ip = request.environ['HTTP_X_FORWARDED_FOR']
# Prepare fields for PxPay
params = {
'payment_id': payment.id,
'amount': h.integer_to_currency(payment.amount),
'invoice_id': payment.invoice.id,
'email_address': payment.invoice.person.email_address,
'client_ip' : client_ip,
'return_url' : 'https://conf.linux.org.au/payment/new',
}
(valid, uri) = pxpay.generate_request(params)
if valid != '1':
c.error_msg = "PxPay Generate Request error: " + uri
return render("/payment/gateway_error.mako")
else:
redirect(uri)
@authorize(h.auth.has_organiser_role)
@dispatch_on(POST="_new")
def refund(self, id):
invoice = Invoice.find_by_id(id)
try:
c.invoice_person = invoice.person.id
except:
c.invoice_person = ''
c.due_date = datetime.date.today().strftime("%d/%m/%Y")
c.product_categories = ProductCategory.find_all()
# The form adds one to the count by default, so we need to decrement it
c.item_count = len(invoice.items) - 1
defaults = dict()
defaults['invoice.person' ] = c.invoice_person
defaults['invoice.due_date'] = c.due_date
for i in range(len(invoice.items)):
item = invoice.items[i]
if item.product:
defaults['invoice.items-' + str(i) + '.product'] = item.product.id
else:
defaults['invoice.items-' + str(i) + '.description'] = item.description
defaults['invoice.items-' + str(i) + '.qty'] = -item.qty
defaults['invoice.items-' + str(i) + '.cost'] = item.cost
form = render("/invoice/new.mako")
return htmlfill.render(form, defaults, use_all_keys=True)
@authorize(h.auth.has_organiser_role)
@dispatch_on(POST="_pay_manual")
def pay_manual(self, id):
"""Request confirmation from user
"""
invoice = Invoice.find_by_id(id, True)
person = invoice.person
error = self._check_invoice(person, invoice, ignore_overdue=True)
if error is not None:
return error
c.payment = Payment()
c.payment.amount = invoice.total
c.payment.invoice = invoice
meta.Session.commit()
return redirect_to(controller='payment', id=c.payment.id, action='new_manual')
def pdf(self, id):
if not h.auth.authorized(h.auth.Or(h.auth.is_same_zkpylons_attendee(id), h.auth.has_organiser_role, h.auth.has_unique_key())):
# Raise a no_auth error
h.auth.no_role()
c.invoice = Invoice.find_by_id(id, True)
xml_s = render('/invoice/pdf.mako')
xsl_f = file_paths['zk_root'] + '/zkpylons/templates/invoice/pdf.xsl'
pdf_data = pdfgen.generate_pdf(xml_s, xsl_f)
filename = lca_info['event_shortname'] + '_' + str(c.invoice.id) + '.pdf'
return pdfgen.wrap_pdf_response(pdf_data, filename)
def void(self, id):
if not h.auth.authorized(h.auth.Or(h.auth.is_same_zkpylons_attendee(id), h.auth.has_organiser_role)):
# Raise a no_auth error
h.auth.no_role()
c.invoice = Invoice.find_by_id(id, True)
if c.invoice.is_void:
h.flash("Invoice was already voided.")
return redirect_to(action='view', id=c.invoice.id)
elif len(c.invoice.payment_received) and h.auth.authorized(h.auth.has_organiser_role):
h.flash("Invoice has a payment applied to it, do you want to " + h.link_to('Refund', h.url_for(action='refund')) + " instead?")
return redirect_to(action='view', id=c.invoice.id)
elif len(c.invoice.payment_received):
h.flash("Cannot void a paid invoice.")
return redirect_to(action='view', id=c.invoice.id)
elif h.auth.authorized(h.auth.has_organiser_role):
c.invoice.void = "Administration Change"
meta.Session.commit()
h.flash("Invoice was voided.")
return redirect_to(action='view', id=c.invoice.id)
else:
c.invoice.void = "User cancellation"
c.person = c.invoice.person
meta.Session.commit()
email(lca_info['contact_email'], render('/invoice/user_voided.mako'))
h.flash("Previous invoice was voided.")
return redirect_to(controller='registration', action='pay', id=c.person.registration.id)
@authorize(h.auth.has_organiser_role)
def unvoid(self, id):
c.invoice = Invoice.find_by_id(id, True)
c.invoice.void = None
c.invoice.manual = True
meta.Session.commit()
h.flash("Invoice was un-voided.")
return redirect_to(action='view', id=c.invoice.id)
@authorize(h.auth.has_organiser_role)
def extend(self, id):
c.invoice = Invoice.find_by_id(id, True)
if c.invoice.is_overdue:
c.invoice.due_date = datetime.datetime.now() + datetime.timedelta(days=1)
else:
c.invoice.due_date = c.invoice.due_date + ((c.invoice.due_date - datetime.datetime.now()) * 2)
meta.Session.commit()
return redirect_to(action='view')
| neillc/zookeepr | zkpylons/controllers/invoice.py | Python | gpl-2.0 | 16,775 |
##########################################################################
#
# Copyright (c) 2015, Esteban Tovagliari. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferUITest
import GafferScene
import GafferSceneUI
import GafferAppleseed
import GafferAppleseedUI
class DocumentationTest( GafferUITest.TestCase ) :
def test( self ) :
self.maxDiff = None
self.assertNodesAreDocumented(
GafferAppleseed,
additionalTerminalPlugTypes = ( GafferScene.ScenePlug, )
)
if __name__ == "__main__":
unittest.main()
| chippey/gaffer | python/GafferAppleseedUITest/DocumentationTest.py | Python | bsd-3-clause | 2,164 |
#!/usr/bin/env python3
import ezpygame
from Box2D import b2World, b2PolygonShape
import random
import shapes
from game import *
class DemoScene(ezpygame.Scene):
def __init__(self):
# Called once per game, when game starts
self.world = b2World() # default gravity is (0,-10) and doSleep is True
# terrain_locations = [(1, 0),
# (2, 0),
# (5, 0,),
# (10, 0)]
#
# self.terrain = shapes.create_terrain('water', terrain_locations, self.world)
# Create an object that moves in the box2d world and can be rendered to the screen
#self.many_shapes = [ shapes.LLeftShape(self.world, (random()*100 - 50, random()*30)) for x in range(1,100) ]
self.demo_shape = shapes.LLeftShape(self.world, (5, 5))
# A box2d object that doesn't move and isn't rendered to screen
body_bottom_wall = self.world.CreateStaticBody(
position=(0, -10),
shapes=b2PolygonShape(box=(SCREEN_WIDTH / 2, 5)))
def on_enter(self, previous_scene):
# Called every time the game switches to this scene
pass
def handle_event(self, event):
# Called every time a pygame event is fired
# Processing keyboard input here gives one event per key press
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
# Kick a shape
#kick_shape = self.many_shapes[int(random() * len(self.many_shapes))]
self.demo_shape.body.ApplyLinearImpulse((0, 100), self.demo_shape.body.position, True)
def draw(self, screen):
# Called once per frame, to draw to the screen
screen.fill(black)
#for shape in self.many_shapes:
# shape.draw(screen)
self.demo_shape.draw(screen)
def update(self, dt):
# Called once per frame, to update the state of the game
# Processing keyboard events here lets you track which keys are being held down
# keys = pygame.key.get_pressed()
# if keys[pygame.K_SPACE]:
# self.demo_shape.body.ApplyLinearImpulse((0, 30), self.demo_shape.body.position, True)
# Box2d physics step
self.world.Step(DT_SCALE * dt, VELOCITY_ITERATIONS, POSITION_ITERATIONS)
self.world.ClearForces()
if __name__ == '__main__':
app = ezpygame.Application(title='The Game', resolution=(SCREEN_WIDTH, SCREEN_HEIGHT), update_rate=FPS)
app.run(DemoScene())
| AndrewJamesTurner/Every-Womans-Ground | demo.py | Python | gpl-3.0 | 2,536 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def data_migration(apps, schema_editor):
MicroServiceChainLink = apps.get_model('main', 'MicroServiceChainLink')
# Update delete bagged files to continue processing if there's an error
MicroServiceChainLink.objects.filter(id='63f35161-ba76-4a43-8cfa-c38c6a2d5b2f').update(defaultnextchainlink='7c44c454-e3cc-43d4-abe0-885f93d693c6')
MicroServiceChainLink.objects.filter(id='746b1f47-2dad-427b-8915-8b0cb7acccd8').update(defaultnextchainlink='7c44c454-e3cc-43d4-abe0-885f93d693c6')
class Migration(migrations.Migration):
dependencies = [
('main', '0013_upload_archivesspace_inherit_notes'),
]
operations = [
migrations.RunPython(data_migration),
]
| eckardm/archivematica | src/dashboard/src/main/migrations/0014_aic_fixes.py | Python | agpl-3.0 | 806 |
################################################################################
#
# Copyright (c) 2012 The MadGraph Development team and Contributors
#
# This file is a part of the MadGraph 5 project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph license which should accompany this
# distribution.
#
# For more information, please visit: http://madgraph.phys.ucl.ac.be
#
################################################################################
""" This part is not part of the UFO Model but only of MG5 suite.
This files defines how the restrict card can be build automatically """
import models.build_restriction_lib as build_restrict_lib
all_categories = []
first_category = build_restrict_lib.Category('sm customization')
all_categories.append(first_category)
first_category.add_options(name='light mass = 0 (u d s c e mu)', # name
default=True, # default
rules=[('MASS',[1], 0.0),
('MASS',[2], 0.0),
('MASS',[3], 0.0),
('MASS',[11], 0.0),
('MASS',[13], 0.0)]
)
first_category.add_options(name='b mass = 0',
default=False,
rules=[('MASS',[5], 0.0)]
)
first_category.add_options(name='tau mass = 0',
default=False,
rules=[('MASS',[15], 0.0)]
)
sec_category = build_restrict_lib.Category('mssm customization')
all_categories.append(sec_category)
sec_category.add_options(name='diagonal usqmix matrices',
default=False, # default
rules=[('USQMIX',[1,1], 1.0),
('USQMIX',[2,2], 1.0),
('USQMIX',[3,3], 1.0),
('USQMIX',[4,4], 1.0),
('USQMIX',[5,5], 1.0),
('USQMIX',[6,6], 1.0),
('USQMIX',[3,6], 0.0),
('USQMIX',[6,3], 0.0)]
)
sec_category.add_options(name='diagonal dsqmix matrices',
default=False, # default
rules=[('DSQMIX',[1,1], 1.0),
('DSQMIX',[2,2], 1.0),
('DSQMIX',[3,3], 1.0),
('DSQMIX',[4,4], 1.0),
('DSQMIX',[5,5], 1.0),
('DSQMIX',[6,6], 1.0),
('DSQMIX',[3,6], 0.0),
('DSQMIX',[6,3], 0.0)]
)
sec_category.add_options(name='diagonal selmix matrices',
default=False, # default
rules=[('SELMIX',[1,1], 1.0),
('SELMIX',[2,2], 1.0),
('SELMIX',[3,3], 1.0),
('SELMIX',[4,4], 1.0),
('SELMIX',[5,5], 1.0),
('SELMIX',[6,6], 1.0),
('SELMIX',[3,6], 0.0),
('SELMIX',[6,3], 0.0)]
)
| cms-externals/sherpa | Examples/UFO_MSSM/MSSM/build_restrict.py | Python | gpl-3.0 | 3,624 |
import gc
from unittest import mock
import pytest
from aiohttp.connector import Connection
@pytest.fixture
def key():
return object()
@pytest.fixture
def loop():
return mock.Mock()
@pytest.fixture
def connector():
return mock.Mock()
@pytest.fixture
def protocol():
return mock.Mock(should_close=False)
def test_ctor(connector, key, protocol, loop) -> None:
conn = Connection(connector, key, protocol, loop)
with pytest.warns(DeprecationWarning):
assert conn.loop is loop
assert conn.protocol is protocol
conn.close()
def test_callbacks_on_close(connector, key, protocol, loop) -> None:
conn = Connection(connector, key, protocol, loop)
notified = False
def cb():
nonlocal notified
notified = True
conn.add_callback(cb)
conn.close()
assert notified
def test_callbacks_on_release(connector, key, protocol, loop) -> None:
conn = Connection(connector, key, protocol, loop)
notified = False
def cb():
nonlocal notified
notified = True
conn.add_callback(cb)
conn.release()
assert notified
def test_callbacks_exception(connector, key, protocol, loop) -> None:
conn = Connection(connector, key, protocol, loop)
notified = False
def cb1():
raise Exception
def cb2():
nonlocal notified
notified = True
conn.add_callback(cb1)
conn.add_callback(cb2)
conn.close()
assert notified
def test_del(connector, key, protocol, loop) -> None:
loop.is_closed.return_value = False
conn = Connection(connector, key, protocol, loop)
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
with pytest.warns(ResourceWarning):
del conn
gc.collect()
connector._release.assert_called_with(key, protocol, should_close=True)
msg = {'client_connection': mock.ANY, # conn was deleted
'message': 'Unclosed connection'}
if loop.get_debug():
msg['source_traceback'] = mock.ANY
loop.call_exception_handler.assert_called_with(msg)
def test_close(connector, key, protocol, loop) -> None:
conn = Connection(connector, key, protocol, loop)
assert not conn.closed
conn.close()
assert conn._protocol is None
connector._release.assert_called_with(key, protocol, should_close=True)
assert conn.closed
def test_release(connector, key, protocol, loop) -> None:
conn = Connection(connector, key, protocol, loop)
assert not conn.closed
conn.release()
assert not protocol.transport.close.called
assert conn._protocol is None
connector._release.assert_called_with(key, protocol, should_close=False)
assert conn.closed
def test_release_proto_should_close(connector, key, protocol, loop) -> None:
protocol.should_close = True
conn = Connection(connector, key, protocol, loop)
assert not conn.closed
conn.release()
assert not protocol.transport.close.called
assert conn._protocol is None
connector._release.assert_called_with(key, protocol, should_close=True)
assert conn.closed
def test_release_released(connector, key, protocol, loop) -> None:
conn = Connection(connector, key, protocol, loop)
conn.release()
connector._release.reset_mock()
conn.release()
assert not protocol.transport.close.called
assert conn._protocol is None
assert not connector._release.called
| arthurdarcet/aiohttp | tests/test_client_connection.py | Python | apache-2.0 | 3,420 |
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Utilties for sending data.
Author: Jeff Kinnison (jkinniso@nd.edu)
"""
import json
import pika
class PikaProducer(object):
"""
Utility for sending job data to a set of endpoints.
"""
def __init__(self, rabbitmq_url, exchange, exchange_type="direct", routing_keys=[]):
"""
Instantiate a new PikaProducer.
Arguments:
rabbitmq_url -- the url of the RabbitMQ server to send to
exchange -- the name of the exchange to send to
Keyword Arguments:
exchange_type -- one of one of 'direct', 'topic', 'fanout', 'headers'
(default 'direct')
routing_key -- the routing keys to the endpoints for this producer
(default [])
"""
self._url = rabbitmq_url
self._exchange = exchange
self._exchange_type = exchange_type
self._routing_keys = routing_keys
self._connection = None # RabbitMQ connection object
self._channel = None # RabbitMQ channel object
import random
self._name = random.randint(0,100)
def __call__(self, data):
"""
Publish data to the RabbitMQ server.
Arguments:
data -- JSON serializable data to send
"""
if self._connection is None: # Start the connection if it is inactive
self.start()
else: # Serialize and send the data
message = self.pack_data(data)
self.send_data(message)
def add_routing_key(self, key):
"""
Add a new endpoint that will receive this data.
Arguments:
key -- the routing key for the new endpoint
"""
if key not in self._routing_keys:
#print("Adding key %s to %s" % (key, self._name))
self._routing_keys.append(key)
#print(self._routing_keys)
def remove_routing_key(self, key):
"""
Stop sending data to an existing endpoint.
Arguments:
key -- the routing key for the existing endpoint
"""
try:
self._routing_keys.remove(key)
except ValueError:
pass
def pack_data(self, data):
"""
JSON-serialize the data for transport.
Arguments:
data -- JSON-serializable data
"""
try: # Generate a JSON string from the data
msg = json.dumps(data)
except TypeError as e: # Generate and return an error if serialization fails
msg = json.dumps({"err": str(e)})
finally:
return msg
def send_data(self, data):
"""
Send the data to all active endpoints.
Arguments:
data -- the message to send
"""
if self._channel is not None: # Make sure the connection is active
for key in self._routing_keys: # Send to all endpoints
#print(self._exchange, key, self._name)
self._channel.basic_publish(exchange = self._exchange,
routing_key=key,
body=data)
def start(self):
"""
Open a connection if one does not exist.
"""
print("Starting new connection")
if self._connection is None:
print("Creating connection object")
self._connection = pika.BlockingConnection(pika.URLParameters(self._url))
self._channel = self._connection.channel()
self._channel.exchange_declare(exchange=self._exchange,
type=self._exchange_type)
def shutdown(self):
"""
Close an existing connection.
"""
if self._channel is not None:
self._channel.close()
def _on_connection_open(self, unused_connection):
"""
Create a new channel if the connection opens successful.
Arguments:
unused_connection -- a reference to self._connection
"""
print("Connection is open")
self._connection.channel(on_open_callback=self._on_channel_open)
def _on_connection_close(self, connection, code, text):
"""
Actions to take when the connection is closed for any reason.
Arguments:
connection -- the connection that was closed (same as self._connection)
code -- response code from the RabbitMQ server
text -- response body from the RabbitMQ server
"""
print("Connection is closed")
self._channel = None
self._connection = None
def _on_channel_open(self, channel):
"""
Actions to take when the channel opens.
Arguments:
channel -- the newly opened channel
"""
print("Channel is open")
self._channel = channel
self._channel.add_on_close_callback(self._on_channel_close)
self._declare_exchange()
def _on_channel_close(self, channel, code, text):
"""
Actions to take when the channel closes for any reason.
Arguments:
channel -- the channel that was closed (same as self._channel)
code -- response code from the RabbitMQ server
text -- response body from the RabbitMQ server
"""
print("Channel is closed")
self._connection.close()
def _declare_exchange(self):
"""
Set up the exchange to publish to even if it already exists.
"""
print("Exchange is declared")
self._channel.exchange_declare(exchange=self._exchange,
type=self.exchange_type)
if __name__ == "__main__":
import time
config = {
"url": "amqp://guest:guest@localhost:5672",
"exchange": "simstream",
"routing_key": "test_consumer",
"exchange_type": "topic"
}
producer = PikaProducer(config["url"],
config["exchange"],
exchange_type=config["exchange_type"],
routing_keys=[config["routing_key"]])
producer.start()
while True:
try:
time.sleep(5)
data = str(time.time()) + ": Hello SimStream"
producer.send_data(data)
except KeyboardInterrupt:
producer.shutdown()
| gouravshenoy/airavata | sandbox/simstream/simstream/pikaproducer.py | Python | apache-2.0 | 7,107 |
from __future__ import print_function
import pandas as pd
from bokeh._legacy_charts import Bar
from bokeh.models import NumeralTickFormatter
from numpy import round
from .utils import write_plot, read_data, fix_nan_inf
@write_plot('culture')
def plot(newest_changes):
df, date_range = read_data(newest_changes, 'culture')
# remove nan genders and nan rows
del df['nan']
df = df[list(map(lambda x: not pd.isnull(x), df.index))]
has_changes = df.abs().sum().sum() != 0
if not has_changes:
return None, None, None, False
df['total'] = df.sum(axis=1)
df['nonbin'] = df['total'] - df['male'] - df['female']
df['fem_per'] = df['female'] / (df['total']) * 100
df['nonbin_per'] = df['nonbin'] / df['total'] * 100
df['fem_per_million'] = df['fem_per'] * 10000
df['nonbin_per_million'] = df['nonbin_per'] * 10000
fix_nan_inf(df['fem_per'])
fix_nan_inf(df['nonbin_per'])
fix_nan_inf(df['fem_per_million'])
fix_nan_inf(df['nonbin_per_million'])
# sort, process
dfs = df.sort_values('female')
dfs = round(dfs, decimals=2)
interesante = ['female', 'male', 'nonbin', 'total', 'fem_per']
p = Bar(dfs[['female', 'male']],
stacked=True,
xlabel="Culture",
ylabel="Total gendered biographies",
width=800,
height=500,
legend='top_left')
p._yaxis.formatter = NumeralTickFormatter(format='0,0')
htmltable = dfs[interesante].sort_values('female', ascending=False)
htmltable.columns = ['Female', 'Male', 'Non Binary', 'Total', 'Female (%)']
top_rows = htmltable.head(10)
bottom_rows = htmltable[::-1].head(10)
table = [top_rows, bottom_rows]
return p, date_range, table, True
if __name__ == "__main__":
print(plot('newest'))
print(plot('newest-changes'))
| vivekiitkgp/WIGI-website | plots/gender_by_culture.py | Python | mit | 1,845 |
# coding=UTF-8
"""
Short codes - types, a database/allocator object, and so on
"""
from __future__ import print_function, absolute_import, division
import six
import logging
logger = logging.getLogger(__name__)
class ShortCodeType(object):
BITS_8 = 0 # 8 bit short code
BITS_16 = 1 # 16 bit short code
BITS_32 = 2 # 32 bit short code
class ShortCodeDatabase(object):
def __init__(self):
self.path_to_sc = {} # dict(path => sc_type, sc)
self.sc_to_path = {} # dict (sc_type, sc => path)
def notify(self, path_name, sc_type, sc):
"""
Inform the database that path name has a particular short code.
Used by reader when reading in a database from a TSLF file.
"""
def allocate(self, path_name):
"""
Return a new short code for a path.
This does not imply that it's in effect - call notify for that
:param path_name:
:return: sc_Type, sc
"""
| smok-serwis/tslf-format | tslfformat/framing/shortcodes.py | Python | mit | 987 |
"""This module holds classes for image loading and manipulation."""
import copy
import io
import pathlib
from collections import Counter, Iterable
from datetime import datetime
from io import BytesIO, BufferedReader
import re
import os.path as osp
import os
from typing import Union, Sequence, List, Any, Tuple, Optional, BinaryIO
import pydicom
from pydicom.errors import InvalidDicomError
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image as pImage
from scipy import ndimage
import scipy.ndimage.filters as spf
import argue
from .utilities import is_close
from .geometry import Point
from .io import get_url, TemporaryZipDirectory, retrieve_filenames, is_dicom_image, retrieve_dicom_file
from .profile import stretch as stretcharray
from .typing import NumberLike
from ..settings import get_dicom_cmap, PATH_TRUNCATION_LENGTH
ARRAY = 'Array'
DICOM = 'DICOM'
IMAGE = 'Image'
FILE_TYPE = 'file'
STREAM_TYPE = 'stream'
MM_PER_INCH = 25.4
ImageLike = Union['DicomImage', 'ArrayImage', 'FileImage', 'LinacDicomImage']
def equate_images(image1: ImageLike, image2: ImageLike) -> Tuple[ImageLike, ImageLike]:
"""Crop and resize two images to make them:
* The same pixel dimensions
* The same DPI
The usefulness of the function comes when trying to compare images from different sources.
The best example is calculating gamma on a machine log fluence and EPID image. The physical
and pixel dimensions must be normalized, the SID normalized
Parameters
----------
image1 : {:class:`~pylinac.core.image.ArrayImage`, :class:`~pylinac.core.image.DicomImage`, :class:`~pylinac.core.image.FileImage`}
Must have DPI and SID.
image2 : {:class:`~pylinac.core.image.ArrayImage`, :class:`~pylinac.core.image.DicomImage`, :class:`~pylinac.core.image.FileImage`}
Must have DPI and SID.
Returns
-------
image1 : :class:`~pylinac.core.image.ArrayImage`
image2 : :class:`~pylinac.core.image.ArrayImage`
The returns are new instances of Images.
"""
image1 = copy.deepcopy(image1)
image2 = copy.deepcopy(image2)
# crop images to be the same physical size
# ...crop height
physical_height_diff = image1.physical_shape[0] - image2.physical_shape[0]
if physical_height_diff < 0: # image2 is bigger
img = image2
else:
img = image1
pixel_height_diff = abs(int(round(-physical_height_diff * img.dpmm / 2)))
img.remove_edges(pixel_height_diff, edges=('top', 'bottom'))
# ...crop width
physical_width_diff = image1.physical_shape[1] - image2.physical_shape[1]
if physical_width_diff > 0:
img = image1
else:
img = image2
pixel_width_diff = abs(int(round(physical_width_diff*img.dpmm/2)))
img.remove_edges(pixel_width_diff, edges=('left', 'right'))
# resize images to be of the same shape
zoom_factor = image1.shape[1] / image2.shape[1]
image2_array = ndimage.interpolation.zoom(image2.as_type(float), zoom_factor)
image2 = load(image2_array, dpi=image2.dpi * zoom_factor)
return image1, image2
def is_image(path: Union[str, io.BytesIO, ImageLike, np.ndarray]) -> bool:
"""Determine whether the path is a valid image file.
Returns
-------
bool
"""
return any((_is_array(path), _is_dicom(path), _is_image_file(path)))
def retrieve_image_files(path: str) -> List[str]:
"""Retrieve the file names of all the valid image files in the path.
Returns
-------
list
Contains strings pointing to valid image paths.
"""
return retrieve_filenames(directory=path, func=is_image)
def load(path: Union[str, ImageLike, np.ndarray, BinaryIO], **kwargs) -> ImageLike:
"""Load a DICOM image, JPG/TIF/BMP image, or numpy 2D array.
Parameters
----------
path : str, file-object
The path to the image file or data stream or array.
kwargs
See :class:`~pylinac.core.image.FileImage`, :class:`~pylinac.core.image.DicomImage`,
or :class:`~pylinac.core.image.ArrayImage` for keyword arguments.
Returns
-------
::class:`~pylinac.core.image.FileImage`, :class:`~pylinac.core.image.ArrayImage`, or :class:`~pylinac.core.image.DicomImage`
Return type depends on input image.
Examples
--------
Load an image from a file and then apply a filter::
>>> from pylinac.core.image import load
>>> my_image = r"C:\QA\image.tif"
>>> img = load(my_image) # returns a FileImage
>>> img.filter(5)
Loading from an array is just like loading from a file::
>>> arr = np.arange(36).reshape(6, 6)
>>> img = load(arr) # returns an ArrayImage
"""
if isinstance(path, BaseImage):
return path
if _is_array(path):
return ArrayImage(path, **kwargs)
elif _is_dicom(path):
return DicomImage(path, **kwargs)
elif _is_image_file(path):
return FileImage(path, **kwargs)
else:
raise TypeError(f"The argument `{path}` was not found to be a valid DICOM file, Image file, or array")
def load_url(url: str, progress_bar: bool = True, **kwargs) -> ImageLike:
"""Load an image from a URL.
Parameters
----------
url : str
A string pointing to a valid URL that points to a file.
.. note:: For some images (e.g. Github), the raw binary URL must be used, not simply the basic link.
progress_bar: bool
Whether to display a progress bar of download status.
"""
filename = get_url(url, progress_bar=progress_bar)
return load(filename, **kwargs)
@argue.options(method=('mean', 'max', 'sum'))
def load_multiples(image_file_list: Sequence, method: str = 'mean', stretch_each: bool = True, **kwargs) -> ImageLike:
"""Combine multiple image files into one superimposed image.
Parameters
----------
image_file_list : list
A list of the files to be superimposed.
method : {'mean', 'max', 'sum'}
A string specifying how the image values should be combined.
stretch_each : bool
Whether to normalize the images being combined by stretching their high/low values to the same values across images.
kwargs :
Further keyword arguments are passed to the load function and stretch function.
Examples
--------
Load multiple images::
>>> from pylinac.core.image import load_multiples
>>> paths = ['starshot1.tif', 'starshot2.tif']
>>> superimposed_img = load_multiples(paths)
"""
# load images
img_list = [load(path, **kwargs) for path in image_file_list]
first_img = img_list[0]
# check that all images are the same size and stretch if need be
for img in img_list:
if img.shape != first_img.shape:
raise ValueError("Images were not the same shape")
if stretch_each:
img.array = stretcharray(img.array, fill_dtype=kwargs.get('dtype'))
# stack and combine arrays
new_array = np.dstack(tuple(img.array for img in img_list))
if method == 'mean':
combined_arr = np.mean(new_array, axis=2)
elif method == 'max':
combined_arr = np.max(new_array, axis=2)
elif method == 'sum':
combined_arr = np.sum(new_array, axis=2)
# replace array of first object and return
first_img.array = combined_arr
return first_img
def _is_dicom(path: Union[str, io.BytesIO, ImageLike, np.ndarray]) -> bool:
"""Whether the file is a readable DICOM file via pydicom."""
return is_dicom_image(file=path)
def _is_image_file(path: str) -> bool:
"""Whether the file is a readable image file via Pillow."""
try:
pImage.open(path)
return True
except:
return False
def _is_array(obj: Any) -> bool:
"""Whether the object is a numpy array."""
return isinstance(obj, np.ndarray)
class BaseImage:
"""Base class for the Image classes.
Attributes
----------
path : str
The path to the image file.
array : numpy.ndarray
The actual image pixel array.
"""
def __init__(self, path: Union[str, BytesIO, ImageLike, np.ndarray, BufferedReader]):
"""
Parameters
----------
path : str
The path to the image.
"""
source: Union[FILE_TYPE, STREAM_TYPE]
if isinstance(path, (str, pathlib.Path)) and not osp.isfile(path):
raise FileExistsError(f"File `{path}` does not exist. Verify the file path name.")
elif isinstance(path, (str, pathlib.Path)) and osp.isfile(path):
self.path = path
self.base_path = osp.basename(path)
self.source = FILE_TYPE
else:
self.source = STREAM_TYPE
path.seek(0)
try:
self.path = str(pathlib.Path(path.name))
except AttributeError:
self.path = ''
@property
def truncated_path(self) -> str:
if self.source == FILE_TYPE:
if len(self.path) > PATH_TRUNCATION_LENGTH:
return self.path[:PATH_TRUNCATION_LENGTH // 2] + '...' + self.path[-PATH_TRUNCATION_LENGTH // 2:]
else:
return self.path
else:
return '' # was from stream, no path
@classmethod
def from_multiples(cls, filelist: List[str], method: str='mean', stretch: bool=True, **kwargs) -> ImageLike:
"""Load an instance from multiple image items. See :func:`~pylinac.core.image.load_multiples`."""
return load_multiples(filelist, method, stretch, **kwargs)
@property
def center(self) -> Point:
"""Return the center position of the image array as a Point."""
x_center = (self.shape[1] / 2) - 0.5
y_center = (self.shape[0] / 2) - 0.5
return Point(x_center, y_center)
@property
def physical_shape(self) -> Tuple[float, float]:
"""The physical size of the image in mm."""
return self.shape[0] / self.dpmm, self.shape[1] / self.dpmm
def date_created(self, format: str="%A, %B %d, %Y") -> str:
date = None
try:
date = datetime.strptime(self.metadata.InstanceCreationDate+str(round(float(self.metadata.InstanceCreationTime))), "%Y%m%d%H%M%S")
date = date.strftime(format)
except (AttributeError, ValueError):
try:
date = datetime.strptime(self.metadata.StudyDate, "%Y%m%d")
date = date.strftime(format)
except:
pass
if date is None:
try:
date = datetime.fromtimestamp(osp.getctime(self.path)).strftime(format)
except AttributeError:
date = 'Unknown'
return date
def plot(self, ax: plt.Axes=None, show: bool=True, clear_fig: bool=False, **kwargs) -> plt.Axes:
"""Plot the image.
Parameters
----------
ax : matplotlib.Axes instance
The axis to plot the image to. If None, creates a new figure.
show : bool
Whether to actually show the image. Set to false when plotting multiple items.
clear_fig : bool
Whether to clear the prior items on the figure before plotting.
"""
if ax is None:
fig, ax = plt.subplots()
if clear_fig:
plt.clf()
ax.imshow(self.array, cmap=get_dicom_cmap(), **kwargs)
if show:
plt.show()
return ax
@argue.options(kind=('median', 'gaussian'))
def filter(self, size: Union[float, int]=0.05, kind: str='median') -> None:
"""Filter the profile.
Parameters
----------
size : int, float
Size of the median filter to apply.
If a float, the size is the ratio of the length. Must be in the range 0-1.
E.g. if size=0.1 for a 1000-element array, the filter will be 100 elements.
If an int, the filter is the size passed.
kind : {'median', 'gaussian'}
The kind of filter to apply. If gaussian, *size* is the sigma value.
"""
if isinstance(size, float):
if 0 < size < 1:
size *= len(self.array)
size = max(size, 1)
else:
raise TypeError("Float was passed but was not between 0 and 1")
if kind == 'median':
self.array = ndimage.median_filter(self.array, size=size)
elif kind == 'gaussian':
self.array = ndimage.gaussian_filter(self.array, sigma=size)
def crop(self, pixels: int=15, edges: Tuple[str, ...]=('top', 'bottom', 'left', 'right')) -> None:
"""Removes pixels on all edges of the image in-place.
Parameters
----------
pixels : int
Number of pixels to cut off all sides of the image.
edges : tuple
Which edges to remove from. Can be any combination of the four edges.
"""
if pixels < 0:
raise ValueError("Pixels to remove must be a positive number")
if 'top' in edges:
self.array = self.array[pixels:, :]
if 'bottom' in edges:
self.array = self.array[:-pixels, :]
if 'left' in edges:
self.array = self.array[:, pixels:]
if 'right' in edges:
self.array = self.array[:, :-pixels]
def remove_edges(self, pixels: int=15, edges: Tuple[str, ...]=('top', 'bottom', 'left', 'right')) -> None:
"""Removes pixels on all edges of the image in-place.
Parameters
----------
pixels : int
Number of pixels to cut off all sides of the image.
edges : tuple
Which edges to remove from. Can be any combination of the four edges.
"""
DeprecationWarning("`remove_edges` is deprecated and will be removed in a future version. Use `crop` instead")
self.crop(pixels=pixels, edges=edges)
def flipud(self) -> None:
""" Flip the image array upside down in-place. Wrapper for np.flipud()"""
self.array = np.flipud(self.array)
def fliplr(self) -> None:
""" Flip the image array upside down in-place. Wrapper for np.fliplr()"""
self.array = np.fliplr(self.array)
def invert(self) -> None:
"""Invert (imcomplement) the image."""
orig_array = self.array
self.array = -orig_array + orig_array.max() + orig_array.min()
def roll(self, direction: str='x', amount: int=1) -> None:
"""Roll the image array around in-place. Wrapper for np.roll().
Parameters
----------
direction : {'x', 'y'}
The axis to roll over.
amount : int
The amount of elements to roll over.
"""
axis = 1 if direction == 'x' else 0
self.array = np.roll(self.array, amount, axis=axis)
def rot90(self, n: int=1) -> None:
"""Wrapper for numpy.rot90; rotate the array by 90 degrees CCW."""
self.array = np.rot90(self.array, n)
@argue.options(kind=('high', 'low'))
def threshold(self, threshold: int, kind: str='high') -> None:
"""Apply a high- or low-pass threshold filter.
Parameters
----------
threshold : int
The cutoff value.
kind : str
If ``high`` (default), will apply a high-pass threshold. All values above the cutoff are left as-is.
Remaining points are set to 0.
If ``low``, will apply a low-pass threshold.
"""
if kind == 'high':
self.array = np.where(self.array >= threshold, self, 0)
else:
self.array = np.where(self.array <= threshold, self, 0)
def as_binary(self, threshold: int) -> ImageLike:
"""Return a binary (black & white) image based on the given threshold.
Parameters
----------
threshold : int, float
The threshold value. If the value is above or equal to the threshold it is set to 1, otherwise to 0.
Returns
-------
ArrayImage
"""
array = np.where(self.array >= threshold, 1, 0)
return ArrayImage(array)
def dist2edge_min(self, point: Union[Point, Tuple]) -> float:
"""Calculates minimum distance from given point to image edges.
Parameters
----------
point : geometry.Point, tuple
Returns
-------
float
"""
if isinstance(point, tuple):
point = Point(point)
rows = self.shape[0]
cols = self.shape[1]
disttoedge = np.zeros(4)
disttoedge[0] = rows - point.y
disttoedge[1] = cols - point.x
disttoedge[2] = point.y
disttoedge[3] = point.x
return min(disttoedge)
def ground(self) -> float:
"""Ground the profile such that the lowest value is 0.
.. note::
This will also "ground" profiles that are negative or partially-negative.
For such profiles, be careful that this is the behavior you desire.
Returns
-------
float
The amount subtracted from the image.
"""
min_val = self.array.min()
self.array -= min_val
return min_val
def normalize(self, norm_val: Union[str, NumberLike]='max') -> None:
"""Normalize the image values to the given value.
Parameters
----------
norm_val : str, number
If a string, must be 'max', which normalizes the values to the maximum value.
If a number, normalizes all values to that number.
"""
if norm_val == 'max':
val = self.array.max()
else:
val = norm_val
self.array = self.array / val
def check_inversion(self, box_size: int=20, position: Sequence=(0.0, 0.0)) -> None:
"""Check the image for inversion by sampling the 4 image corners.
If the average value of the four corners is above the average pixel value, then it is very likely inverted.
Parameters
----------
box_size : int
The size in pixels of the corner box to detect inversion.
position : 2-element sequence
The location of the sampling boxes.
"""
row_pos = max(int(position[0]*self.array.shape[0]), 1)
col_pos = max(int(position[1]*self.array.shape[1]), 1)
lt_upper = self.array[row_pos: row_pos+box_size, col_pos: col_pos+box_size]
rt_upper = self.array[row_pos: row_pos+box_size, -col_pos-box_size: -col_pos]
lt_lower = self.array[-row_pos-box_size:-row_pos, col_pos: col_pos+box_size]
rt_lower = self.array[-row_pos-box_size:-row_pos, -col_pos-box_size:-col_pos]
avg = np.mean((lt_upper, lt_lower, rt_upper, rt_lower))
if avg > np.mean(self.array.flatten()):
self.invert()
def check_inversion_by_histogram(self, percentiles=(5, 50, 95)) -> bool:
"""Check the inversion of the image using histogram analysis. The assumption is that the image
is mostly background-like values and that there is a relatively small amount of dose getting to the image
(e.g. a picket fence image). This function looks at the distance from one percentile to another to determine
if the image should be inverted.
Parameters
----------
percentiles : 3-element tuple
The 3 percentiles to compare. Default is (5, 50, 95). Recommend using (x, 50, y). To invert the other way
(where pixel value is *decreasing* with dose, reverse the percentiles, e.g. (95, 50, 5).
"""
was_inverted = False
p5 = np.percentile(self.array, percentiles[0])
p50 = np.percentile(self.array, percentiles[1])
p95 = np.percentile(self.array, percentiles[2])
dist_to_5 = abs(p50 - p5)
dist_to_95 = abs(p50 - p95)
if dist_to_5 > dist_to_95:
was_inverted = True
self.invert()
return was_inverted
@argue.bounds(threshold=(0.0, 1.0))
def gamma(self, comparison_image: ImageLike, doseTA: NumberLike=1, distTA: NumberLike=1,
threshold: NumberLike=0.1, ground: bool=True, normalize: bool=True) -> np.ndarray:
"""Calculate the gamma between the current image (reference) and a comparison image.
.. versionadded:: 1.2
The gamma calculation is based on `Bakai et al
<http://iopscience.iop.org/0031-9155/48/21/006/>`_ eq.6,
which is a quicker alternative to the standard Low gamma equation.
Parameters
----------
comparison_image : {:class:`~pylinac.core.image.ArrayImage`, :class:`~pylinac.core.image.DicomImage`, or :class:`~pylinac.core.image.FileImage`}
The comparison image. The image must have the same DPI/DPMM to be comparable.
The size of the images must also be the same.
doseTA : int, float
Dose-to-agreement in percent; e.g. 2 is 2%.
distTA : int, float
Distance-to-agreement in mm.
threshold : float
The dose threshold percentage of the maximum dose, below which is not analyzed.
Must be between 0 and 1.
ground : bool
Whether to "ground" the image values. If true, this sets both datasets to have the minimum value at 0.
This can fix offset errors in the data.
normalize : bool
Whether to normalize the images. This sets the max value of each image to the same value.
Returns
-------
gamma_map : numpy.ndarray
The calculated gamma map.
See Also
--------
:func:`~pylinac.core.image.equate_images`
"""
# error checking
if not is_close(self.dpi, comparison_image.dpi, delta=0.1):
raise AttributeError(f"The image DPIs to not match: {self.dpi:.2f} vs. {comparison_image.dpi:.2f}")
same_x = is_close(self.shape[1], comparison_image.shape[1], delta=1.1)
same_y = is_close(self.shape[0], comparison_image.shape[0], delta=1.1)
if not (same_x and same_y):
raise AttributeError(f"The images are not the same size: {self.shape} vs. {comparison_image.shape}")
# set up reference and comparison images
ref_img = ArrayImage(copy.copy(self.array))
ref_img.check_inversion_by_histogram()
if ground:
ref_img.ground()
if normalize:
ref_img.normalize()
comp_img = ArrayImage(copy.copy(comparison_image.array))
comp_img.check_inversion_by_histogram()
if ground:
comp_img.ground()
if normalize:
comp_img.normalize()
# invalidate dose values below threshold so gamma doesn't calculate over it
ref_img.array[ref_img < threshold * np.max(ref_img)] = np.NaN
# convert distance value from mm to pixels
distTA_pixels = self.dpmm * distTA
# construct image gradient using sobel filter
img_x = spf.sobel(ref_img.as_type(np.float32), 1)
img_y = spf.sobel(ref_img.as_type(np.float32), 0)
grad_img = np.hypot(img_x, img_y)
# equation: (measurement - reference) / sqrt ( doseTA^2 + distTA^2 * image_gradient^2 )
subtracted_img = np.abs(comp_img - ref_img)
denominator = np.sqrt(((doseTA / 100.0) ** 2) + ((distTA_pixels ** 2) * (grad_img ** 2)))
gamma_map = subtracted_img / denominator
return gamma_map
def as_type(self, dtype) -> np.ndarray:
return self.array.astype(dtype)
@property
def shape(self) -> Tuple[int, int]:
return self.array.shape
@property
def size(self) -> int:
return self.array.size
@property
def ndim(self) -> int:
return self.array.ndim
@property
def dtype(self) -> np.dtype:
return self.array.dtype
def sum(self) -> float:
return self.array.sum()
def ravel(self) -> np.ndarray:
return self.array.ravel()
@property
def flat(self) -> np.ndarray:
return self.array.flat
def __len__(self):
return len(self.array)
def __getitem__(self, item):
return self.array[item]
class DicomImage(BaseImage):
"""An image from a DICOM RTImage file.
Attributes
----------
metadata : pydicom Dataset
The dataset of the file as returned by pydicom without pixel data.
"""
metadata: pydicom.FileDataset
_sid = NumberLike
_dpi = NumberLike
def __init__(self, path: Union[str, BytesIO, BufferedReader], *, dtype=None, dpi: NumberLike=None, sid: NumberLike=None):
"""
Parameters
----------
path : str, file-object
The path to the file or the data stream.
dtype : dtype, None, optional
The data type to cast the image data as. If None, will use whatever raw image format is.
dpi : int, float
The dots-per-inch of the image, defined at isocenter.
.. note:: If a DPI tag is found in the image, that value will override the parameter, otherwise this one
will be used.
sid : int, float
The Source-to-Image distance in mm.
"""
super().__init__(path)
self._sid = sid
self._dpi = dpi
# read the file once to get just the DICOM metadata
self.metadata = retrieve_dicom_file(path)
self._original_dtype = self.metadata.pixel_array.dtype
# read a second time to get pixel data
try:
path.seek(0)
except AttributeError:
pass
ds = retrieve_dicom_file(path)
if dtype is not None:
self.array = ds.pixel_array.astype(dtype)
else:
self.array = ds.pixel_array.copy()
# convert values to HU or CU: real_values = slope * raw + intercept
has_all_rescale_tags = hasattr(self.metadata, 'RescaleSlope') and hasattr(self.metadata, 'RescaleIntercept') and hasattr(self.metadata, 'PixelIntensityRelationshipSign')
has_some_rescale_tags = hasattr(self.metadata, 'RescaleSlope') and hasattr(self.metadata, 'RescaleIntercept')
is_ct_storage = self.metadata.SOPClassUID.name == 'CT Image Storage'
if has_all_rescale_tags:
self.array = ((self.metadata.RescaleSlope*self.array) + self.metadata.RescaleIntercept)*self.metadata.PixelIntensityRelationshipSign
elif is_ct_storage or has_some_rescale_tags:
self.array = (self.metadata.RescaleSlope * self.array) + self.metadata.RescaleIntercept
else:
# invert it
orig_array = self.array
self.array = -orig_array + orig_array.max() + orig_array.min()
def save(self, filename: str) -> str:
"""Save the image instance back out to a .dcm file.
Returns
-------
A string pointing to the new filename.
"""
if self.metadata.SOPClassUID.name == 'CT Image Storage':
self.array = (self.array - int(self.metadata.RescaleIntercept)) / int(self.metadata.RescaleSlope)
self.metadata.PixelData = self.array.astype(self._original_dtype).tobytes()
self.metadata.save_as(filename)
return filename
@property
def sid(self) -> NumberLike:
"""The Source-to-Image in mm."""
try:
return float(self.metadata.RTImageSID)
except:
return self._sid
@property
def dpi(self) -> NumberLike:
"""The dots-per-inch of the image, defined at isocenter."""
try:
return self.dpmm * MM_PER_INCH
except:
return self._dpi
@property
def dpmm(self) -> NumberLike:
"""The Dots-per-mm of the image, defined at isocenter. E.g. if an EPID image is taken at 150cm SID,
the dpmm will scale back to 100cm."""
dpmm = None
for tag in ('PixelSpacing', 'ImagePlanePixelSpacing'):
mmpd = self.metadata.get(tag)
if mmpd is not None:
dpmm = 1 / mmpd[0]
break
if dpmm is not None and self.sid is not None:
dpmm *= self.sid / 1000
elif dpmm is None and self._dpi is not None:
dpmm = self._dpi / MM_PER_INCH
return dpmm
@property
def cax(self) -> Point:
"""The position of the beam central axis. If no DICOM translation tags are found then the center is returned.
Uses this tag: https://dicom.innolitics.com/ciods/rt-beams-delivery-instruction/rt-beams-delivery-instruction/00741020/00741030/3002000d"""
try:
x = self.center.x - self.metadata.XRayImageReceptorTranslation[0]
y = self.center.y - self.metadata.XRayImageReceptorTranslation[1]
except AttributeError:
return self.center
else:
return Point(x, y)
class LinacDicomImage(DicomImage):
"""DICOM image taken on a linac. Also allows passing of gantry/coll/couch values via the filename."""
gantry_keyword = 'Gantry'
collimator_keyword = 'Coll'
couch_keyword = 'Couch'
_use_filenames: bool
def __init__(self, path: str, use_filenames: bool=False):
super().__init__(path)
self._use_filenames = use_filenames
@property
def gantry_angle(self) -> float:
"""Gantry angle of the irradiation."""
return self._get_axis_value(self.gantry_keyword.lower(), 'GantryAngle')
@property
def collimator_angle(self) -> float:
"""Collimator angle of the irradiation."""
return self._get_axis_value(self.collimator_keyword.lower(), 'BeamLimitingDeviceAngle')
@property
def couch_angle(self) -> float:
"""Couch angle of the irradiation."""
return self._get_axis_value(self.couch_keyword.lower(), 'PatientSupportAngle')
def _get_axis_value(self, axis_str: str, axis_dcm_attr: str) -> float:
"""Retrieve the value of the axis. This will first look in the file name for the value.
If not in the filename then it will look in the DICOM metadata. If the value can be found in neither
then a value of 0 is assumed.
Parameters
----------
axis_str : str
The string to look for in the filename.
axis_dcm_attr : str
The DICOM attribute that should contain the axis value.
Returns
-------
float
"""
axis_found = False
if self._use_filenames:
filename = osp.basename(self.path)
# see if the keyword is in the filename
keyword_in_filename = axis_str.lower() in filename.lower()
# if it's not there, then assume it's zero
if not keyword_in_filename:
axis = 0
axis_found = True
# if it is, then make sure it follows the naming convention of <axis###>
else:
match = re.search(r'(?<={})\d+'.format(axis_str.lower()), filename.lower())
if match is None:
raise ValueError(
f"The filename contains '{axis_str}' but could not read a number following it. Use the format '...{axis_str}<#>...'")
else:
axis = float(match.group())
axis_found = True
# try to interpret from DICOM data
if not axis_found:
try:
axis = float(getattr(self.metadata, axis_dcm_attr))
except AttributeError:
axis = 0
# if the value is close to 0 or 360 then peg at 0
if is_close(axis, [0, 360], delta=1):
return 0
else:
return axis
class FileImage(BaseImage):
"""An image from a "regular" file (.tif, .jpg, .bmp).
Attributes
----------
info : dict
The info dictionary as generated by Pillow.
sid : float
The SID value as passed in upon construction.
"""
def __init__(self, path: str, *, dpi: NumberLike=None, sid: NumberLike=None, dtype=None):
"""
Parameters
----------
path : str, file-object
The path to the file or a data stream.
dpi : int, float
The dots-per-inch of the image, defined at isocenter.
.. note:: If a DPI tag is found in the image, that value will override the parameter, otherwise this one
will be used.
sid : int, float
The Source-to-Image distance in mm.
dtype : numpy.dtype
The data type to cast the array as.
"""
super().__init__(path)
pil_image = pImage.open(path)
# convert to gray if need be
if pil_image.mode not in ('F', 'L', '1'):
pil_image = pil_image.convert('F')
self.info = pil_image.info
if dtype is not None:
self.array = np.array(pil_image, dtype=dtype)
else:
self.array = np.array(pil_image)
self._dpi = dpi
self.sid = sid
@property
def dpi(self) -> float:
"""The dots-per-inch of the image, defined at isocenter."""
dpi = None
for key in ('dpi', 'resolution'):
dpi = self.info.get(key)
if dpi is not None:
dpi = float(dpi[0])
break
if dpi is None:
dpi = self._dpi
if self.sid is not None and dpi is not None:
dpi *= self.sid / 1000
return dpi
@property
def dpmm(self) -> Optional[float]:
"""The Dots-per-mm of the image, defined at isocenter. E.g. if an EPID image is taken at 150cm SID,
the dpmm will scale back to 100cm."""
try:
return self.dpi / MM_PER_INCH
except TypeError:
return
class ArrayImage(BaseImage):
"""An image constructed solely from a numpy array."""
def __init__(self, array: np.array, *, dpi: NumberLike=None, sid: NumberLike=None, dtype=None):
"""
Parameters
----------
array : numpy.ndarray
The image array.
dpi : int, float
The dots-per-inch of the image, defined at isocenter.
.. note:: If a DPI tag is found in the image, that value will override the parameter, otherwise this one
will be used.
sid : int, float
The Source-to-Image distance in mm.
dtype : dtype, None, optional
The data type to cast the image data as. If None, will use whatever raw image format is.
"""
if dtype is not None:
self.array = np.array(array, dtype=dtype)
else:
self.array = array
self._dpi = dpi
self.sid = sid
@property
def dpmm(self) -> Optional[float]:
"""The Dots-per-mm of the image, defined at isocenter. E.g. if an EPID image is taken at 150cm SID,
the dpmm will scale back to 100cm."""
try:
return self.dpi / MM_PER_INCH
except:
return
@property
def dpi(self) -> Optional[float]:
"""The dots-per-inch of the image, defined at isocenter."""
dpi = None
if self._dpi is not None:
dpi = self._dpi
if self.sid is not None:
dpi *= self.sid / 1000
return dpi
def __sub__(self, other):
return ArrayImage(self.array - other.array)
class DicomImageStack:
"""A class that loads and holds a stack of DICOM images (e.g. a CT dataset). The class can take
a folder or zip file and will read CT images. The images must all be the same size. Supports
indexing to individual images.
Attributes
----------
images : list
Holds instances of :class:`~pylinac.core.image.DicomImage`. Can be accessed via index;
i.e. self[0] == self.images[0].
Examples
--------
Load a folder of Dicom images
>>> from pylinac import image
>>> img_folder = r"folder/qa/cbct/june"
>>> dcm_stack = image.DicomImageStack(img_folder) # loads and sorts the images
>>> dcm_stack.plot(3) # plot the 3rd image
Load a zip archive
>>> img_folder_zip = r"archive/qa/cbct/june.zip" # save space and zip your CBCTs
>>> dcm_stack = image.DicomImageStack.from_zip(img_folder_zip)
Load as a certain data type
>>> dcm_stack_uint32 = image.DicomImageStack(img_folder, dtype=np.uint32)
"""
images: List
def __init__(self, folder: str, dtype=None, min_number: int=39, check_uid: bool=True):
"""Load a folder with DICOM CT images.
Parameters
----------
folder : str
Path to the folder.
dtype : dtype, None, optional
The data type to cast the image data as. If None, will use whatever raw image format is.
"""
self.images = []
paths = []
# load in images in their received order
if isinstance(folder, (list, tuple)):
paths = folder
elif osp.isdir(folder):
for pdir, sdir, files in os.walk(folder):
for file in files:
paths.append(osp.join(pdir, file))
for path in paths:
if self.is_CT_slice(path):
img = DicomImage(path, dtype=dtype)
self.images.append(img)
# check that at least 1 image was loaded
if len(self.images) < 1:
raise FileNotFoundError(f"No files were found in the specified location: {folder}")
# error checking
if check_uid:
self.images = self._check_number_and_get_common_uid_imgs(min_number)
# sort according to physical order
self.images.sort(key=lambda x: x.metadata.ImagePositionPatient[-1])
@classmethod
def from_zip(cls, zip_path: str, dtype=None):
"""Load a DICOM ZIP archive.
Parameters
----------
zip_path : str
Path to the ZIP archive.
dtype : dtype, None, optional
The data type to cast the image data as. If None, will use whatever raw image format is.
"""
with TemporaryZipDirectory(zip_path) as tmpzip:
obj = cls(tmpzip, dtype)
return obj
@staticmethod
def is_CT_slice(file: str) -> bool:
"""Test if the file is a CT Image storage DICOM file."""
try:
ds = pydicom.dcmread(file, force=True, stop_before_pixels=True)
return ds.SOPClassUID.name == 'CT Image Storage'
except (InvalidDicomError, AttributeError, MemoryError):
return False
def _check_number_and_get_common_uid_imgs(self, min_number: int) -> List:
"""Check that all the images are from the same study."""
most_common_uid = Counter(i.metadata.SeriesInstanceUID for i in self.images).most_common(1)[0]
if most_common_uid[1] < min_number:
raise ValueError("The minimum number images from the same study were not found")
return [i for i in self.images if i.metadata.SeriesInstanceUID == most_common_uid[0]]
def plot(self, slice: int=0) -> None:
"""Plot a slice of the DICOM dataset.
Parameters
----------
slice : int
The slice to plot.
"""
self.images[slice].plot()
@property
def metadata(self) -> pydicom.FileDataset:
"""The metadata of the first image; shortcut attribute. Only attributes that are common throughout the stack should be used,
otherwise the individual image metadata should be used."""
return self.images[0].metadata
def __getitem__(self, item) -> DicomImage:
return self.images[item]
def __setitem__(self, key, value: DicomImage):
self.images[key] = value
def __len__(self):
return len(self.images)
| jrkerns/pylinac | pylinac/core/image.py | Python | mit | 39,827 |
from collections import OrderedDict
from functools import wraps
from rest_framework import routers, status
from rest_framework.response import Response
from rest_framework.settings import api_settings
def _safe_run(func, *args, **kwargs):
"""
Try to run a function with given arguments. If it raises an exception, try
to convert it to response with the exception handler. If that fails, the
exception is re-raised.
"""
try:
return func(*args, **kwargs)
except Exception as exc:
response = api_settings.EXCEPTION_HANDLER(exc, context=kwargs)
if response is not None:
return response
raise
def _may_append_response_msg(response, msg_array, identifier):
msg = None
for key in ('detail', 'msg', 'non_field_errors'):
if response.data.get(key):
msg = response.data.get(key)
break
if msg:
msg_array.append("%s: %s" % (str(identifier), msg))
def _may_add_msg_to_result(msg_array, result):
msg_result = None
if msg_array:
msg_result = msg_array
if len(msg_array) == 1:
msg_result = msg_array[0]
if msg_result:
result['msg'] = msg_result
def bulk_create_wrapper(func):
@wraps(func)
def wrapper(self, request, *args, **kwargs):
"""
try to make response looks like:
{
"totalCount": 3,
"createdCount": 2,
"failed": ["zhangsan"]
}
"""
data = request.data
if not isinstance(data, list):
return func(self, request, *args, **kwargs)
total_count = created_count = 0
failed_objs = []
msg_array = []
for idx, obj in enumerate(data):
request._full_data = obj
response = _safe_run(func, self, request, *args, **kwargs)
if status.is_success(response.status_code):
created_count += 1
else:
if isinstance(obj, dict) and len(obj) == 1:
failed_objs.append(list(obj.values())[0])
_may_append_response_msg(response, msg_array, list(obj.values())[0])
else:
failed_objs.append(obj)
_may_append_response_msg(response, msg_array, obj)
total_count += 1
# Reset object in view set.
setattr(self, 'object', None)
result = {"totalCount": total_count, "createdCount": created_count, "failed": failed_objs}
_may_add_msg_to_result(msg_array, result)
return Response(result, status=status.HTTP_200_OK)
return wrapper
def bulk_destroy_impl(self, request, **kwargs):
"""
It is possible to delete multiple items in one request. Use the `DELETE`
method with the same url as for listing/creating objects. The request body
should contain a list with identifiers for objects to be deleted. The
identifier is usually the last part of the URL for deleting a single
object.
the successful response could be:
{
"totalCount": 3 // 成功添加用户成员个数
"deletedCount": 2 // 删除成功的用户名
"failed": ["renhaitao"] // 删除失败的用户名
}
"""
if not isinstance(request.data, list):
return Response(status=status.HTTP_400_BAD_REQUEST,
data={'detail': 'Bulk delete needs a list of identifiers.'})
for identifier in request.data:
if not isinstance(identifier, str) and not isinstance(identifier, int):
return Response(status=status.HTTP_400_BAD_REQUEST,
data={'detail': '"%s" is not a valid identifier.' % identifier})
self.kwargs.update(kwargs)
total_count = deleted_count = 0
failed_ids = []
msg_array = []
for identifier in OrderedDict.fromkeys(request.data):
self.kwargs[self.lookup_field] = str(identifier)
response = _safe_run(self.destroy, request, **self.kwargs)
if status.is_success(response.status_code):
deleted_count += 1
else:
failed_ids.append(identifier)
_may_append_response_msg(response, msg_array, identifier)
total_count += 1
result = {"totalCount": total_count, "deletedCount": deleted_count, "failed": failed_ids}
_may_add_msg_to_result(msg_array, result)
# it actually return content, so should not return 204
# no content.
return Response(result, status=status.HTTP_200_OK)
class BulkRouter(routers.DefaultRouter):
"""
This router provides the standard set of resources (the same as
`DefaultRouter`). In addition to that, it allows for bulk operations on the
collection as a whole. These are performed as a POST/DELETE request on
the `{basename}-list` url. These requests are dispatched to the
`bulk_create` and `bulk_destroy` methods
respectively.
"""
def get_routes(self, viewset):
for route in self.routes:
if isinstance(route, routers.Route) and route.name.endswith('-list'):
route.mapping.update({'delete': 'bulk_destroy'})
return super().get_routes(viewset)
def register(self, prefix, viewset, base_name=None):
if hasattr(viewset, 'create'):
viewset.create = bulk_create_wrapper(viewset.create)
if hasattr(viewset, 'destroy') and not hasattr(viewset, 'bulk_destroy'):
viewset.bulk_destroy = bulk_destroy_impl
super().register(prefix, viewset, base_name)
| ycheng-aa/qr_server | apps/common/bulk_operations.py | Python | mit | 5,560 |
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PaymentGatewayAccountsInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'payment_gateway_accounts': 'list[PaymentGatewayAccount]'
}
attribute_map = {
'payment_gateway_accounts': 'paymentGatewayAccounts'
}
def __init__(self, payment_gateway_accounts=None): # noqa: E501
"""PaymentGatewayAccountsInfo - a model defined in Swagger""" # noqa: E501
self._payment_gateway_accounts = None
self.discriminator = None
if payment_gateway_accounts is not None:
self.payment_gateway_accounts = payment_gateway_accounts
@property
def payment_gateway_accounts(self):
"""Gets the payment_gateway_accounts of this PaymentGatewayAccountsInfo. # noqa: E501
# noqa: E501
:return: The payment_gateway_accounts of this PaymentGatewayAccountsInfo. # noqa: E501
:rtype: list[PaymentGatewayAccount]
"""
return self._payment_gateway_accounts
@payment_gateway_accounts.setter
def payment_gateway_accounts(self, payment_gateway_accounts):
"""Sets the payment_gateway_accounts of this PaymentGatewayAccountsInfo.
# noqa: E501
:param payment_gateway_accounts: The payment_gateway_accounts of this PaymentGatewayAccountsInfo. # noqa: E501
:type: list[PaymentGatewayAccount]
"""
self._payment_gateway_accounts = payment_gateway_accounts
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PaymentGatewayAccountsInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PaymentGatewayAccountsInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| docusign/docusign-python-client | docusign_esign/models/payment_gateway_accounts_info.py | Python | mit | 3,706 |
import pytest
import uqbar.strings
import supriya.assets.synthdefs
import supriya.nonrealtime
import supriya.patterns
pattern = supriya.patterns.Pgpar(
[
supriya.patterns.Pmono(
amplitude=1.0,
duration=1.0,
frequency=supriya.patterns.Pseq([440, 660, 880, 990], 1),
),
supriya.patterns.Pbind(
amplitude=1.0,
duration=0.75,
frequency=supriya.patterns.Pseq([222, 333, 444, 555], 1),
),
]
)
def test___iter__():
events = list(pattern)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
CompositeEvent(
events=(
GroupEvent(
add_action=AddAction.ADD_TO_TAIL,
uuid=UUID('A'),
),
GroupEvent(
add_action=AddAction.ADD_TO_TAIL,
uuid=UUID('B'),
),
),
)
NoteEvent(
amplitude=1.0,
delta=0.0,
duration=1.0,
frequency=440,
is_stop=False,
target_node=UUID('A'),
uuid=UUID('C'),
)
NoteEvent(
amplitude=1.0,
delta=0.75,
duration=0.75,
frequency=222,
target_node=UUID('B'),
uuid=UUID('D'),
)
NoteEvent(
amplitude=1.0,
delta=0.25,
duration=0.75,
frequency=333,
target_node=UUID('B'),
uuid=UUID('E'),
)
NoteEvent(
amplitude=1.0,
delta=0.5,
duration=1.0,
frequency=660,
is_stop=False,
target_node=UUID('A'),
uuid=UUID('C'),
)
NoteEvent(
amplitude=1.0,
delta=0.5,
duration=0.75,
frequency=444,
target_node=UUID('B'),
uuid=UUID('F'),
)
NoteEvent(
amplitude=1.0,
delta=0.25,
duration=1.0,
frequency=880,
is_stop=False,
target_node=UUID('A'),
uuid=UUID('C'),
)
NoteEvent(
amplitude=1.0,
delta=0.75,
duration=0.75,
frequency=555,
target_node=UUID('B'),
uuid=UUID('G'),
)
NoteEvent(
amplitude=1.0,
delta=1.0,
duration=1.0,
frequency=990,
target_node=UUID('A'),
uuid=UUID('C'),
)
CompositeEvent(
events=(
NullEvent(
delta=0.25,
),
GroupEvent(
is_stop=True,
uuid=UUID('A'),
),
GroupEvent(
is_stop=True,
uuid=UUID('B'),
),
),
is_stop=True,
)
"""
)
def test_send_01():
events = pytest.helpers.setup_pattern_send(pattern, iterations=1)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
CompositeEvent(
events=(
GroupEvent(
add_action=AddAction.ADD_TO_TAIL,
uuid=UUID('A'),
),
GroupEvent(
add_action=AddAction.ADD_TO_TAIL,
uuid=UUID('B'),
),
),
)
CompositeEvent(
events=(
NullEvent(
delta=0.25,
),
GroupEvent(
is_stop=True,
uuid=UUID('A'),
),
GroupEvent(
is_stop=True,
uuid=UUID('B'),
),
),
is_stop=True,
)
"""
)
def test_send_02():
events = pytest.helpers.setup_pattern_send(pattern, iterations=2)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
CompositeEvent(
events=(
GroupEvent(
add_action=AddAction.ADD_TO_TAIL,
uuid=UUID('A'),
),
GroupEvent(
add_action=AddAction.ADD_TO_TAIL,
uuid=UUID('B'),
),
),
)
NoteEvent(
amplitude=1.0,
delta=0.0,
duration=1.0,
frequency=440,
is_stop=False,
target_node=UUID('A'),
uuid=UUID('C'),
)
CompositeEvent(
events=(
NullEvent(
delta=0.25,
),
GroupEvent(
is_stop=True,
uuid=UUID('A'),
),
GroupEvent(
is_stop=True,
uuid=UUID('B'),
),
),
is_stop=True,
)
"""
)
def test_send_03():
events = pytest.helpers.setup_pattern_send(pattern, iterations=3)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
CompositeEvent(
events=(
GroupEvent(
add_action=AddAction.ADD_TO_TAIL,
uuid=UUID('A'),
),
GroupEvent(
add_action=AddAction.ADD_TO_TAIL,
uuid=UUID('B'),
),
),
)
NoteEvent(
amplitude=1.0,
delta=0.0,
duration=1.0,
frequency=440,
is_stop=False,
target_node=UUID('A'),
uuid=UUID('C'),
)
NoteEvent(
amplitude=1.0,
delta=0.75,
duration=0.75,
frequency=222,
target_node=UUID('B'),
uuid=UUID('D'),
)
CompositeEvent(
events=(
NullEvent(
delta=0.25,
),
GroupEvent(
is_stop=True,
uuid=UUID('A'),
),
GroupEvent(
is_stop=True,
uuid=UUID('B'),
),
),
is_stop=True,
)
"""
)
def test_manual_incommunicado():
lists, deltas = pytest.helpers.manual_incommunicado(pattern, 10)
assert lists == [
[
10,
[
["/g_new", 1000, 1, 1],
["/g_new", 1001, 1, 1],
[
"/s_new",
"default",
1002,
0,
1000,
"amplitude",
1.0,
"frequency",
440,
],
[
"/s_new",
"default",
1003,
0,
1001,
"amplitude",
1.0,
"frequency",
222,
],
],
],
[
10.75,
[
["/n_set", 1003, "gate", 0],
[
"/s_new",
"default",
1004,
0,
1001,
"amplitude",
1.0,
"frequency",
333,
],
],
],
[11.0, [["/n_set", 1002, "amplitude", 1.0, "frequency", 660]]],
[
11.5,
[
["/n_set", 1004, "gate", 0],
[
"/s_new",
"default",
1005,
0,
1001,
"amplitude",
1.0,
"frequency",
444,
],
],
],
[12.0, [["/n_set", 1002, "amplitude", 1.0, "frequency", 880]]],
[
12.25,
[
["/n_set", 1005, "gate", 0],
[
"/s_new",
"default",
1006,
0,
1001,
"amplitude",
1.0,
"frequency",
555,
],
],
],
[
13.0,
[
["/n_set", 1006, "gate", 0],
["/n_set", 1002, "amplitude", 1.0, "frequency", 990],
],
],
[14.0, [["/n_set", 1002, "gate", 0]]],
[14.25, [["/n_free", 1000, 1001]]],
]
assert deltas == [0.75, 0.25, 0.5, 0.5, 0.25, 0.75, 1.0, 0.25, None]
def test_nonrealtime():
session = supriya.nonrealtime.Session()
with session.at(10):
session.inscribe(pattern)
d_recv_commands = pytest.helpers.build_d_recv_commands(
[supriya.assets.synthdefs.default]
)
assert session.to_lists() == [
[
10.0,
[
*d_recv_commands,
["/g_new", 1000, 1, 0],
["/g_new", 1001, 1, 0],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1002,
0,
1000,
"amplitude",
1.0,
"frequency",
440,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1003,
0,
1001,
"amplitude",
1.0,
"frequency",
222,
],
],
],
[
10.75,
[
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1004,
0,
1001,
"amplitude",
1.0,
"frequency",
333,
],
["/n_set", 1003, "gate", 0],
],
],
[11.0, [["/n_set", 1002, "amplitude", 1.0, "frequency", 660]]],
[
11.5,
[
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1005,
0,
1001,
"amplitude",
1.0,
"frequency",
444,
],
["/n_set", 1004, "gate", 0],
],
],
[12.0, [["/n_set", 1002, "amplitude", 1.0, "frequency", 880]]],
[
12.25,
[
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1006,
0,
1001,
"amplitude",
1.0,
"frequency",
555,
],
["/n_set", 1005, "gate", 0],
],
],
[
13.0,
[
["/n_set", 1002, "amplitude", 1.0, "frequency", 990],
["/n_set", 1006, "gate", 0],
],
],
[14.0, [["/n_set", 1002, "gate", 0]]],
[14.25, [["/n_free", 1000, 1001], [0]]],
]
| Pulgama/supriya | tests/test_patterns_Pgpar.py | Python | mit | 12,255 |
import pytest
from scoring_engine.models.team import Team
from scoring_engine.models.user import User
from scoring_engine.models.service import Service
from scoring_engine.models.check import Check
from tests.scoring_engine.helpers import generate_sample_model_tree, populate_sample_data
from tests.scoring_engine.unit_test import UnitTest
class TestTeam(UnitTest):
def test_init_whiteteam(self):
team = Team(name="White Team", color="White")
assert team.name == "White Team"
assert team.color == "White"
assert team.id is None
assert team.current_score == 0
assert team.is_red_team is False
assert team.is_white_team is True
assert team.is_blue_team is False
def test_init_blueteam(self):
team = Team(name="Blue Team", color="Blue")
assert team.name == "Blue Team"
assert team.color == "Blue"
assert team.id is None
assert team.current_score == 0
assert team.is_red_team is False
assert team.is_white_team is False
assert team.is_blue_team is True
def test_init_redteam(self):
team = Team(name="Red Team", color="Red")
assert team.name == "Red Team"
assert team.color == "Red"
assert team.id is None
assert team.current_score == 0
assert team.is_red_team is True
assert team.is_white_team is False
assert team.is_blue_team is False
def test_simple_save(self):
white_team = Team(name="White Team", color="White")
self.session.add(white_team)
self.session.commit()
assert white_team.id is not None
assert len(self.session.query(Team).all()) == 1
def test_multiple_saves(self):
white_team = Team(name="White Team", color="White")
self.session.add(white_team)
blue_team = Team(name="Blue", color="Blue")
self.session.add(blue_team)
self.session.commit()
assert len(self.session.query(Team).all()) == 2
def test_services(self):
team = generate_sample_model_tree('Team', self.session)
service_1 = Service(name="Example Service 1", team=team, check_name="ICMP IPv4 Check", host='127.0.0.1')
service_2 = Service(name="Example Service 2", team=team, check_name="SSH IPv4 Check", host='2.3.4.5')
self.session.add(service_1)
self.session.add(service_2)
self.session.commit()
assert team.services == [service_1, service_2]
def test_users(self):
team = generate_sample_model_tree('Team', self.session)
user_1 = User(username="testuser", password="testpass", team=team)
user_2 = User(username="abcuser", password="abcpass", team=team)
self.session.add(user_1)
self.session.add(user_2)
self.session.commit()
assert team.users == [user_2, user_1] # TODO - Figure out why this is flipped
def test_current_score(self):
team = generate_sample_model_tree('Team', self.session)
service_1 = Service(name="Example Service 1", team=team, check_name="ICMP IPv4 Check", host='127.0.0.1')
self.session.add(service_1)
check_1 = Check(service=service_1, result=True, output='Good output')
self.session.add(check_1)
service_2 = Service(name="Example Service 2", team=team, check_name="SSH IPv4 Check", host='127.0.0.2')
self.session.add(service_2)
check_2 = Check(service=service_2, result=True, output='Good output')
self.session.add(check_2)
check_3 = Check(service=service_2, result=True, output='Good output')
self.session.add(check_3)
service_3 = Service(name="Example Service 3", team=team, check_name="SSH IPv4 Check", host='127.0.0.3')
self.session.add(service_3)
check_3 = Check(service=service_3, result=False, output='bad output')
self.session.add(check_3)
self.session.commit()
assert team.current_score == 300
def test_place(self):
team_1 = Team(name="Blue Team 1", color="Blue")
self.session.add(team_1)
service_1 = Service(name="Example Service 1", team=team_1, check_name="ICMP IPv4 Check", host='127.0.0.1')
self.session.add(service_1)
check_1 = Check(service=service_1, result=True, output='Good output')
check_2 = Check(service=service_1, result=True, output='Good output')
self.session.add(check_1)
self.session.add(check_2)
self.session.commit()
team_2 = Team(name="Blue Team 2", color="Blue")
self.session.add(team_2)
service_1 = Service(name="Example Service 1", team=team_2, check_name="ICMP IPv4 Check", host='127.0.0.1')
self.session.add(service_1)
check_1 = Check(service=service_1, result=True, output='Good output')
check_2 = Check(service=service_1, result=True, output='Good output')
self.session.add(check_1)
self.session.add(check_2)
self.session.commit()
team_3 = Team(name="Blue Team 3", color="Blue")
self.session.add(team_3)
service_1 = Service(name="Example Service 1", team=team_3, check_name="ICMP IPv4 Check", host='127.0.0.1')
self.session.add(service_1)
check_1 = Check(service=service_1, result=True, output='Good output')
check_2 = Check(service=service_1, result=False, output='Good output')
self.session.add(check_1)
self.session.add(check_2)
self.session.commit()
assert team_1.place == 1
assert team_2.place == 1
assert team_3.place == 3
def test_get_array_of_scores(self):
populate_sample_data(self.session)
results = Team.get_all_rounds_results()
assert 'rounds' in results
assert results['rounds'] == ['Round 0', 'Round 1', 'Round 2']
assert 'rgb_colors' in results
assert 'Blue Team 1' in results['rgb_colors']
assert results['rgb_colors']['Blue Team 1'].startswith('rgba')
assert 'scores' in results
assert results['scores'] == {'Blue Team 1': [0, 100, 100]}
def test_get_round_scores(self):
team = populate_sample_data(self.session)
assert team.get_round_scores(0) == 0
assert team.get_round_scores(1) == 100
assert team.get_round_scores(2) == 0
with pytest.raises(IndexError):
team.get_round_scores(3)
| pwnbus/scoring_engine | tests/scoring_engine/models/test_team.py | Python | mit | 6,383 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------
# File Name: __init__.py
# Author: Zhao Yanbai
# Fri Oct 31 14:58:04 2014
# Description: none
# ------------------------------------------------------------------------
| acevest/monitor | core/__init__.py | Python | gpl-2.0 | 310 |
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class EdxPipeline(object):
def process_item(self, item, spider):
return item
| cdhekne/My_Thesis | Thesis_WebCrawler/edx/edx/pipelines.py | Python | apache-2.0 | 257 |
# Copyright (C) 2008 Associated Universities, Inc. Washington DC, USA.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Add to simulator.py with the currently running interpreter... hack job.
e.g. within interpreter:
cicada> execfile('update_simulator.py')
"""
__copyright__ = "Copyright (C) 2008 Associated Universities, Inc."
__license__ = "GPL"
def dot_stderr():
sys.stderr.write('.')
sys.stdout = open('simulator.py', 'a')
sys.stderr.write('Appending to simulator.py ')
dot_stderr()
keys = get()
dot_stderr()
values = get(keys)
dot_stderr()
packed = []
for i in range(len(keys)):
packed.append((keys[i], values[i]))
dot_stderr()
print '###################################################################'
print 'parameters = {'
for k,v in packed:
try:
if len(v) > 8:
print ", '%s': '%s'" % (k, v[:32])
else:
print ", '%s': '%s'" % (k, v[:8])
except TypeError:
print ", '%s': '%s'" % (k, str(v))
print '}'
print
dot_stderr()
print 'profiles = {'
lookup = {'a': 'available', 'u': 'unknown', 'r': 'running'}
for token in profiles():
k,v = token.split(',')
print ", '%s': states['%s']" % (k, lookup.get(v, 'unknown'))
print '}'
print
dot_stderr()
sys.stdout.close()
sys.stdout = sys.__stdout__
dot_stderr()
sys.stderr.write(' done.\n')
print 'Do the following touchups in simulator.py:'
print '1. Clean out old data.'
print '2. Fix the syntax of the first parameter.'
print '3. Fix the syntax of the first profile.'
print '(remove the leading comma of each in 2 & 3)'
| nrao/guppi-controller | src/update_simulator.py | Python | gpl-3.0 | 2,143 |
import sys, os, copy
sys.path.append("../../")
from utils import Utils, WordDict
import numpy as np
u = Utils()
n_random = 30
in_file = "phrases_00.txt"
out_file = "phrases.txt"
phrases = u.loadText(in_file).replace("\r", "")
phrases = phrases.split('\n')[0:-1]
indices = np.random.randint(low=0, high=(len(phrases)-1), size=n_random)
new_phrases = [phrases[idx] for idx in indices]
new_phrases = ("\n").join(new_phrases)
u.saveText(new_phrases, out_file)
| singleswitch/ticker | experiments/audio_user_trials/random_phrase_select.py | Python | mit | 464 |
import pytest
import hearinglosssimulator as hls
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import numpy as np
def test_cgc_filter():
freqs = [5000.]
#~ freqs = [ 125*2**i for i in range(7) ]
#~ freqs = hls.erbspace(80.,15000., 16.)
#~ compression_degree = [1]
#~ compression_degree = [0.25]
compression_degree = [0]
level_max = 100.
level_step = 10.
sample_rate = 44100.
coefficients_pgc, coefficients_hpaf, levels, band_overlap_gain = hls.make_cgc_filter(freqs, compression_degree, level_max, level_step, sample_rate)
fig, ax1 = plt.subplots()
fig, ax2 = plt.subplots()
fig, ax3 = plt.subplots()
levels_colors = [ get_cmap('jet', len(levels))(l) for l, level in enumerate(levels) ]
freqs_colors = [ get_cmap('jet', len(freqs))(f) for f, freq in enumerate(freqs) ]
for f, freq in enumerate(freqs):
gains = np.zeros(len(levels))
for l, level in enumerate(levels):
all_filter = np.concatenate([coefficients_pgc[f,:,:],coefficients_hpaf[f,l,:,:], coefficients_pgc[f,:,:]], axis = 0)
w, h = hls.sosfreqz(all_filter, worN = 2**16,)
gains[l] = np.max(20*np.log10(np.abs(h)))
hls.plot_filter(all_filter, ax2, sample_rate, color=levels_colors[l])
hls.plot_filter(coefficients_hpaf[f,l,:,:], ax3, sample_rate, color=levels_colors[l])
hls.plot_filter(coefficients_pgc[f,:,:], ax3, sample_rate, color='k')
ax3.axvline(freq, color='k')
ax2.axvline(freq, color='k')
ax1.plot(levels, levels+gains, label='{:0.1f}'.format(freq), color=freqs_colors[f])
ax1.plot(levels,levels, color='r', ls='--')
ax1.legend()
ax2.set_ylim(-50,50)
ax3.set_ylim(-50,50)
plt.show()
def test_invcomp_filter():
#~ freqs = [1000.]
#~ freqs = [5000.]
#~ freqs = [ 125*2**i for i in range(7) ]
freqs = hls.erbspace(80.,15000., 32.)
#~ compression_degree = [1]* len(freqs)
#~ compression_degree = [0.5]
#~ compression_degree = [0.25] * len(freqs)
#~ compression_degree = [0.] * len(freqs)
compression_degree = [0.5] * len(freqs)
level_max = 100.
level_step = 10.
sample_rate = 44100.
coefficients_pgc, gain_controlled, levels, band_overlap_gain = hls.make_invcomp_filter(freqs, compression_degree, level_max, level_step, sample_rate)
print('ici, band_overlap_gain', band_overlap_gain)
#~ print(gain_controlled)
#~ fig, ax = plt.subplots()
#~ exit()
fig, ax1 = plt.subplots()
#~ fig, ax2 = plt.subplots()
fig, ax3 = plt.subplots()
levels_colors = [ get_cmap('jet', len(levels))(l) for l, level in enumerate(levels) ]
freqs_colors = [ get_cmap('jet', len(freqs))(f) for f, freq in enumerate(freqs) ]
for f, freq in enumerate(freqs):
#~ gains = np.zeros(len(levels))
gains = 20*np.log10(gain_controlled[f])
#~ for l, level in enumerate(levels):
#~ all_filter = np.concatenate([coefficients_pgc[f,:,:], coefficients_pgc[f,:,:]], axis = 0)
#~ w, h = hls.sosfreqz(all_filter, worN = 2**16,)
#~ gains[l] = np.max(20*np.log10(np.abs(h)))
#~ gain = np.max(20*np.log10(np.abs(h)))
#~ hls.plot_filter(all_filter, ax2, sample_rate, color=levels_colors[l])
#~ hls.plot_filter(coefficients_hpaf[f,l,:,:], ax3, sample_rate, color=levels_colors[l])
hls.plot_filter(coefficients_pgc[f,:,:], ax3, sample_rate, color='k')
ax3.axvline(freq, color='k')
#~ ax2.axvline(freq, color='k')
ax1.plot(levels, levels+gains, label='{:0.1f}'.format(freq), color=freqs_colors[f])
#~ ax1.plot(levels, gains, label='{:0.1f}'.format(freq), color=freqs_colors[f])
ax1.plot(levels,levels, color='r', ls='--')
ax1.legend()
#~ ax2.set_ylim(-50,50)
ax3.set_ylim(-50,50)
plt.show()
if __name__ == '__main__':
#~ test_cgc_filter()
test_invcomp_filter()
| samuelgarcia/HearingLossSimulator | hearinglosssimulator/tests/test_cgcfilter.py | Python | mit | 4,194 |
import warnings
from flask import Flask
from flask.ext.cors import CORS
from flask.ext.sqlalchemy import SQLAlchemy
import sqlalchemy
app = Flask(__name__)
db = SQLAlchemy()
def configure(config):
configure_application(config)
configure_database()
configure_json()
configure_cors()
def configure_application(config):
app.config.from_object(config)
def configure_database():
db.init_app(app)
db.app = app
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=sqlalchemy.exc.SAWarning)
#
# Can't reflect expression-based and partial indexes:
#
# valsi_lower_word,
# valsi_unique_word_nospaces,
# natlangwords_lower_word,
# natlangwords_unique_langid_word_null
#
db.reflect(app=app)
def configure_json():
app.config['RESTFUL_JSON'] = {
'sort_keys' : False,
'ensure_ascii' : False,
'indent' : 2
}
def configure_cors():
CORS(app, resources={
r"/api/*" : { "origins" : "*" }
})
| lojban/valis | valis/context.py | Python | gpl-3.0 | 1,078 |
"""
Tests for Django template context processors.
"""
from __future__ import absolute_import
from django.conf import settings
from django.test import TestCase
from django.test.client import RequestFactory
from lms.djangoapps.mobile_api.context_processor import is_from_mobile_app
class MobileContextProcessorTests(TestCase):
"""
Tests for the configuration context processor.
"""
def test_is_from_mobile_app(self):
"""
Verify the context is from mobile app.
"""
request = RequestFactory().get('/')
request.META['HTTP_USER_AGENT'] = settings.MOBILE_APP_USER_AGENT_REGEXES[0]
context = is_from_mobile_app(request)
self.assertEqual(context['is_from_mobile_app'], True)
def test_not_is_from_mobile_app(self):
"""
Verify the context is not from the mobile app.
"""
request = RequestFactory().get('/')
request.META['HTTP_USER_AGENT'] = "Not from the mobile app"
context = is_from_mobile_app(request)
self.assertEqual(context['is_from_mobile_app'], False)
| ESOedX/edx-platform | lms/djangoapps/mobile_api/tests/test_context_processor.py | Python | agpl-3.0 | 1,087 |
"""
Master Boot Record
The first sector on disk, contains the partition table, bootloader, et al.
http://www.win.tue.nl/~aeb/partitions/partition_types-1.html
"""
from binascii import unhexlify
from construct import *
mbr = Struct("mbr",
HexDumpAdapter(Bytes("bootloader_code", 446)),
Array(4,
Struct("partitions",
Enum(Byte("state"),
INACTIVE = 0x00,
ACTIVE = 0x80,
),
BitStruct("beginning",
Octet("head"),
Bits("sect", 6),
Bits("cyl", 10),
),
Enum(UBInt8("type"),
Nothing = 0x00,
FAT12 = 0x01,
XENIX_ROOT = 0x02,
XENIX_USR = 0x03,
FAT16_old = 0x04,
Extended_DOS = 0x05,
FAT16 = 0x06,
FAT32 = 0x0b,
FAT32_LBA = 0x0c,
NTFS = 0x07,
LINUX_SWAP = 0x82,
LINUX_NATIVE = 0x83,
_default_ = Pass,
),
BitStruct("ending",
Octet("head"),
Bits("sect", 6),
Bits("cyl", 10),
),
UBInt32("sector_offset"), # offset from MBR in sectors
UBInt32("size"), # in sectors
)
),
Const("signature", b"\x55\xAA"),
)
if __name__ == "__main__":
cap1 = unhexlify(
b"33C08ED0BC007CFB5007501FFCBE1B7CBF1B065057B9E501F3A4CBBDBE07B104386E00"
"7C09751383C510E2F4CD188BF583C610497419382C74F6A0B507B4078BF0AC3C0074FC"
"BB0700B40ECD10EBF2884E10E84600732AFE4610807E040B740B807E040C7405A0B607"
"75D2804602068346080683560A00E821007305A0B607EBBC813EFE7D55AA740B807E10"
"0074C8A0B707EBA98BFC1E578BF5CBBF05008A5600B408CD1372238AC1243F988ADE8A"
"FC43F7E38BD186D6B106D2EE42F7E239560A77237205394608731CB80102BB007C8B4E"
"028B5600CD1373514F744E32E48A5600CD13EBE48A560060BBAA55B441CD13723681FB"
"55AA7530F6C101742B61606A006A00FF760AFF76086A0068007C6A016A10B4428BF4CD"
"136161730E4F740B32E48A5600CD13EBD661F9C3496E76616C69642070617274697469"
"6F6E207461626C65004572726F72206C6F6164696E67206F7065726174696E67207379"
"7374656D004D697373696E67206F7065726174696E672073797374656D000000000000"
"0000000000000000000000000000000000000000000000000000000000000000000000"
"00000000000000000000000000000000002C4463B7BDB7BD00008001010007FEFFFF3F"
"000000371671020000C1FF0FFEFFFF761671028A8FDF06000000000000000000000000"
"000000000000000000000000000000000000000055AA")
print(mbr.parse(cap1))
| gkonstantyno/construct | construct/formats/filesystem/mbr.py | Python | mit | 2,596 |
"""
Form components for working with trees.
"""
from django import forms
from django.forms.forms import NON_FIELD_ERRORS
from django.forms.util import ErrorList
from django.utils.encoding import smart_unicode
from django.utils.html import conditional_escape, mark_safe
from django.utils.translation import ugettext_lazy as _
from mptt.exceptions import InvalidMove
__all__ = ('TreeNodeChoiceField', 'TreeNodeMultipleChoiceField', 'TreeNodePositionField', 'MoveNodeForm')
# Fields ######################################################################
class TreeNodeChoiceField(forms.ModelChoiceField):
"""A ModelChoiceField for tree nodes."""
def __init__(self, queryset, *args, **kwargs):
self.level_indicator = kwargs.pop('level_indicator', u'---')
# if a queryset is supplied, enforce ordering
if hasattr(queryset, 'model'):
mptt_opts = queryset.model._mptt_meta
queryset = queryset.order_by(mptt_opts.tree_id_attr, mptt_opts.left_attr)
super(TreeNodeChoiceField, self).__init__(queryset, *args, **kwargs)
def _get_level_indicator(self, obj):
level = getattr(obj, obj._mptt_meta.level_attr)
return mark_safe(conditional_escape(self.level_indicator) * level)
def label_from_instance(self, obj):
"""
Creates labels which represent the tree level of each node when
generating option labels.
"""
level_indicator = self._get_level_indicator(obj)
return mark_safe(u'%s %s' % (level_indicator, conditional_escape(smart_unicode(obj))))
class TreeNodeMultipleChoiceField(TreeNodeChoiceField, forms.ModelMultipleChoiceField):
"""A ModelMultipleChoiceField for tree nodes."""
def __init__(self, queryset, *args, **kwargs):
self.level_indicator = kwargs.pop('level_indicator', u'---')
# if a queryset is supplied, enforce ordering
if hasattr(queryset, 'model'):
mptt_opts = queryset.model._mptt_meta
queryset = queryset.order_by(mptt_opts.tree_id_attr, mptt_opts.left_attr)
# For some reason ModelMultipleChoiceField constructor passes kwargs
# as args to its super(), which causes 'multiple values for keyword arg'
# error sometimes. So we skip it (that constructor does nothing anyway!)
forms.ModelChoiceField.__init__(self, queryset, *args, **kwargs)
class TreeNodePositionField(forms.ChoiceField):
"""A ChoiceField for specifying position relative to another node."""
FIRST_CHILD = 'first-child'
LAST_CHILD = 'last-child'
LEFT = 'left'
RIGHT = 'right'
DEFAULT_CHOICES = (
(FIRST_CHILD, _('First child')),
(LAST_CHILD, _('Last child')),
(LEFT, _('Left sibling')),
(RIGHT, _('Right sibling')),
)
def __init__(self, *args, **kwargs):
if 'choices' not in kwargs:
kwargs['choices'] = self.DEFAULT_CHOICES
super(TreeNodePositionField, self).__init__(*args, **kwargs)
# Forms #######################################################################
class MoveNodeForm(forms.Form):
"""
A form which allows the user to move a given node from one location
in its tree to another, with optional restriction of the nodes which
are valid target nodes for the move.
"""
target = TreeNodeChoiceField(queryset=None)
position = TreeNodePositionField()
def __init__(self, node, *args, **kwargs):
"""
The ``node`` to be moved must be provided. The following keyword
arguments are also accepted::
``valid_targets``
Specifies a ``QuerySet`` of valid targets for the move. If
not provided, valid targets will consist of everything other
node of the same type, apart from the node itself and any
descendants.
For example, if you want to restrict the node to moving
within its own tree, pass a ``QuerySet`` containing
everything in the node's tree except itself and its
descendants (to prevent invalid moves) and the root node (as
a user could choose to make the node a sibling of the root
node).
``target_select_size``
The size of the select element used for the target node.
Defaults to ``10``.
``position_choices``
A tuple of allowed position choices and their descriptions.
Defaults to ``TreeNodePositionField.DEFAULT_CHOICES``.
``level_indicator``
A string which will be used to represent a single tree level
in the target options.
"""
self.node = node
valid_targets = kwargs.pop('valid_targets', None)
target_select_size = kwargs.pop('target_select_size', 10)
position_choices = kwargs.pop('position_choices', None)
level_indicator = kwargs.pop('level_indicator', None)
super(MoveNodeForm, self).__init__(*args, **kwargs)
opts = node._mptt_meta
if valid_targets is None:
valid_targets = node._tree_manager.exclude(**{
opts.tree_id_attr: getattr(node, opts.tree_id_attr),
'%s__gte' % opts.left_attr: getattr(node, opts.left_attr),
'%s__lte' % opts.right_attr: getattr(node, opts.right_attr),
})
self.fields['target'].queryset = valid_targets
self.fields['target'].widget.attrs['size'] = target_select_size
if level_indicator:
self.fields['target'].level_indicator = level_indicator
if position_choices:
self.fields['position_choices'].choices = position_choices
def save(self):
"""
Attempts to move the node using the selected target and
position.
If an invalid move is attempted, the related error message will
be added to the form's non-field errors and the error will be
re-raised. Callers should attempt to catch ``InvalidNode`` to
redisplay the form with the error, should it occur.
"""
try:
self.node.move_to(self.cleaned_data['target'],
self.cleaned_data['position'])
return self.node
except InvalidMove, e:
self.errors[NON_FIELD_ERRORS] = ErrorList(e)
raise
class MPTTAdminForm(forms.ModelForm):
"""
A form which validates that the chosen parent for a node isn't one of
its descendants.
"""
def clean(self):
cleaned_data = super(MPTTAdminForm, self).clean()
opts = self._meta.model._mptt_meta
parent = cleaned_data.get(opts.parent_attr)
if self.instance and parent:
if parent.is_descendant_of(self.instance, include_self=True):
if opts.parent_attr not in self._errors:
self._errors[opts.parent_attr] = forms.util.ErrorList()
self._errors[opts.parent_attr].append(_('Invalid parent'))
del self.cleaned_data[opts.parent_attr]
return cleaned_data
| pjdelport/django-mptt | mptt/forms.py | Python | mit | 7,040 |
__version__ = '0.1.7'
| amikrop/django-uaccounts | uaccounts/__init__.py | Python | bsd-3-clause | 22 |
# -*- coding: utf-8 -*-
"""Wordze.com API python bindings."""
__all__ = ("Api", "ApiError", "HistoryKeyword", "Keyword",
"history", "search", "status", "single")
import urllib
import urllib2
import urlparse
from functools import wraps, partial
from datetime import datetime
from xml.dom.minidom import Element, parse
# Search filters
F_NONE = 1
F_ADULT = 2
F_DRUGS = 3
F_GAMBLING = 4
F_WAREZ_HACKING = 5
F_ALL = 6
# Search styles
S_EXACT = "exact"
S_BROAD = "broad"
S_ANY = "any"
def extract_text(dom, name, wrapper=None):
"""
Function tries to extract text data from the first tag
with a given name and wrapps it in a give function / class.
"""
elements = dom.getElementsByTagName(name)
if elements:
text = elements[0].lastChild.data
else:
text = ""
return wrapper(text) if wrapper else text
class ApiError(Exception):
"""Api error wrapper."""
class Keyword(dict):
"""Class wrapper for a keyword item."""
def __init__(self, data):
"""Constructor."""
if isinstance(data, Element):
self["count"] = int(data.getAttribute("Count"))
self["estimated"] = int(data.getAttribute("Estimated"))
# FIXME: ugly
if "term" not in self:
self["term"] = data.childNodes[0].data
else:
self["count"], self["estimated"] = None
self["term"] = data
def __repr__(self):
return "\"%s\"" % self["term"].encode("utf-8")
def __cmp__(self, other):
if not isinstance(other, Keyword):
raise TypeError
if self["count"] < other["count"]:
return -1
elif self["count"] == other["count"]:
return 0
else:
return 1
class HistoryKeyword(Keyword):
"""Class wrapper for a keyword item from history search."""
def __init__(self, term, history):
self["term"] = term
self["date"] = datetime.strptime(
history.getAttribute("Date"), "%Y-%m-%d")
super(HistoryKeyword, self).__init__(history)
def __repr__(self):
return "%s on %s" % (super(HistoryKeyword, self).__repr__(),
self["date"].date())
class Api(object):
"""Api worker class."""
def __init__(self, apikey):
"""Constructor."""
self.apikey = apikey
self.apiurl = "http://api.wordze.com"
def history(self, query, date):
"""
Method performs a lookup of the history for a given
keyword.
Note: the date should be either datetime.datetime
instance or a string of format YYYYMM.
"""
if isinstance(date, datetime):
date = date.strftime("%Y%m")
elif isinstance(date, basestring):
try:
# Validating date format
datetime.strptime(date, "%Y%m")
except ValueError:
raise ApiError("Invalid date format")
else:
raise ApiError("Invalid date format")
dom = parse(self._request("ApiHistory", {"ApiKey": self.apikey,
"Query": query,
"Date": date}))
if self._validate(dom):
# We have just one query, which doesn't change,
# from item to item, so it's convinient to
# wrap it in a partial.
_HistoryKeyword = partial(HistoryKeyword, query)
keywords = map(_HistoryKeyword(query),
dom.getElementsByTagName("data"))
return keywords
def status(self):
"""
Method checks Wordze.com account status (number of API
queries used for a day).
Note: You should ONLY run this at the start of your
application, and keep track until it completes.
"""
dom = parse(self._request("AccountStatus",
{"ApiKey": self.apikey}))
if self._validate(dom):
return {"Search": extract_text(dom, "Search"),
"Wordrank": extract_text(dom, "Wordrank"),
"Dig": extract_text(dom, "Dig")}
return {}
def single(self, *queries):
"""
Method performs a a single keyword search for a given list
of keywords.
"""
if len(queries) > 500:
raise ApiError("Single keyword search is limited to "
"500 queries at a time")
dom = parse(
self._request("KeywordSingle", {"ApiKey": self.apikey,
"Query": ",".join(queries)}))
if self._validate(dom):
return sorted(map(Keyword, dom.getElementsByTagName("Keyword")))
def search(self,
query, pagenum=1, filtertype=F_NONE, numresult=20,
searchstyle=S_BROAD, charlen=None, countlim=None):
"""
Method performs a search using Wordze.com API.
Availible extraparams:
* query - keyword to search for
* pagenum - whe page number in results to show
* filtertype - what to filter out, should be one of the F_* constants
* numresult - number of results per page
* searchstyle - should be one of the S_* constants
* charlen - keyword length limit, explanation:
charlen=-15 will only produce results with 15 or less
characters in the keyword
charlen=25 will only produce results with 25 or more
characters in the keyword.
Note that, length is calculated __including__ spaces.
* countlim - keyword hit count limit , explanation:
countlim=-15 will only produce results with 15 or less hits
countlim=100 will only produce results with 100 or more hits
TODO: write this as a generator yielding pages one by one,
until there's nothing availible
"""
# This is ugly, but well, entitled keyword arguments in a
# function call are even uglier.
params = {"ApiKey": self.apikey,
"Query": query,
"PageNum": pagenum,
"FilterType": filtertype,
"NumResult": numresult,
"SearchStyle": searchstyle,
"CharLen": charlen,
"CountLim": countlim}
dom = parse(self._request("ApiSearch", params))
if self._validate(dom):
print dom.toxml()
return {
"page": extract_text(dom, "Page", int),
"total": extract_text(dom, "TotalPages", int),
"searchstyle": extract_text(dom, "SearchStyle"),
"filters": extract_text(dom, "Filters", int),
"numresult": extract_text(dom, "ResultsPerPage", int),
"keywords": sorted(map(Keyword,
dom.getElementsByTagName("Keyword")))}
def _request(self, method, params, count=None):
url = urlparse.urljoin(
self.apiurl, "%s?%s" % (method, urllib.urlencode(params)))
# XXX: just in case anyone supplies a negative
# max count value :)
count = count if count > 0 else None
while count != 0:
if count:
count -= 1
try:
request = urllib2.urlopen(url)
except urllib2.URLError, exc:
print "%s...retrying" % exc
else:
return request
def _validate(self, dom):
"""
Method validates API response, wrapped in minidom constructor.
If there are errors present, ApiError with appropriate error
message is raised.
"""
errors = dom.getElementsByTagName("Error")
if errors:
raise ApiError(", ".join(error.lastChild.data for error in errors))
return True
# Shortcut functions
apiworker = None
def configure(apikey):
"""Function sets the Api worker for the global (module) calls."""
global apiworker
apiworker = Api(apikey)
def proxy(obj, attr):
@wraps(getattr(Api, attr))
def wrapper(*args, **kw):
global apiworker
if apiworker:
return getattr(apiworker, attr)(*args, **kw)
raise ApiError("ApiKey not set")
return wrapper
search = proxy(apiworker, "search")
status = proxy(apiworker, "status")
single = proxy(apiworker, "single")
history = proxy(apiworker, "history")
| ActiveState/code | recipes/Python/577018_Wordzecom_API_bindings/recipe-577018.py | Python | mit | 8,536 |
"""
If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
"""
# Only Add Known Multiples, but still O(n)
def quicker_multiples_of_3_and_5(top_limit):
multiplier = 1
multiples = set()
while True:
if multiplier * 5 < top_limit:
multiples.add(multiplier * 5)
if multiplier * 3 < top_limit:
multiples.add(multiplier * 3)
else:
break
multiplier += 1
return sum(multiples)
# Brute Force O(n)
def multiples_of_3_and_5(upper_limit):
multiples = set()
for possible_multiple in range(upper_limit):
if not all([possible_multiple % 3, possible_multiple % 5]):
multiples.add(possible_multiple)
return sum(multiples)
# print(sum(multiples))
if __name__ == '__main__':
import timeit
print(timeit.repeat("multiples_of_3_and_5(10000)",
setup="from __main__ import multiples_of_3_and_5",
number=1000, repeat=3))
print(timeit.repeat("quicker_multiples_of_3_and_5(10000)",
setup="from __main__ import quicker_multiples_of_3_and_5",
number=1000, repeat=3))
| bigfatpanda-training/pandas-practical-python-primer | training/level-1-the-zen-of-python/bfp-reference/multiples_of_3_and_5.py | Python | artistic-2.0 | 1,312 |
__author__ = 'saftophobia'
| Saftophobia/shunting | data/__init__.py | Python | mit | 28 |
from django.conf.urls import patterns, include, url
import test_project.views as views
urlpatterns = [
url(r'^computer/(?P<id>[0-9]+)/license/(?P<field_pk>[0-9]+)$', views.index, name="computer-license-detail"),
]
| mixman/django-js-utils | test_project/urls_api.py | Python | mit | 220 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Swaminathan Vasudevan, Hewlett-Packard.
import copy
import mock
from oslo.config import cfg
from webob import exc
import webtest
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.extensions import vpnaas
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron import quota
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_extensions
from neutron.tests.unit import testlib_api
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
class VpnaasTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
vpnaas.RESOURCE_ATTRIBUTE_MAP)
return vpnaas.Vpnaas.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class VpnaasExtensionTestCase(testlib_api.WebTestCase):
fmt = 'json'
def setUp(self):
super(VpnaasExtensionTestCase, self).setUp()
plugin = 'neutron.extensions.vpnaas.VPNPluginBase'
# Ensure 'stale' patched copies of the plugin are never returned
manager.NeutronManager._instance = None
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Create the default configurations
args = ['--config-file', test_api_v2.etcdir('neutron.conf.test')]
config.parse(args)
#just stubbing core plugin with LoadBalancer plugin
cfg.CONF.set_override('core_plugin', plugin)
cfg.CONF.set_override('service_plugins', [plugin])
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
instance.get_plugin_type.return_value = constants.VPN
ext_mgr = VpnaasTestExtensionManager()
self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr)
self.api = webtest.TestApp(self.ext_mdw)
super(VpnaasExtensionTestCase, self).setUp()
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
def tearDown(self):
self._plugin_patcher.stop()
self.api = None
self.plugin = None
cfg.CONF.reset()
super(VpnaasExtensionTestCase, self).tearDown()
def test_ikepolicy_create(self):
"""Test case to create an ikepolicy."""
ikepolicy_id = _uuid()
data = {'ikepolicy': {'name': 'ikepolicy1',
'description': 'myikepolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'phase1_negotiation_mode': 'main',
'lifetime': {
'units': 'seconds',
'value': 3600},
'ike_version': 'v1',
'pfs': 'group5',
'tenant_id': _uuid()}}
return_value = copy.copy(data['ikepolicy'])
return_value.update({'id': ikepolicy_id})
instance = self.plugin.return_value
instance.create_ikepolicy.return_value = return_value
res = self.api.post(_get_path('vpn/ikepolicies', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_ikepolicy.assert_called_with(mock.ANY,
ikepolicy=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('ikepolicy', res)
self.assertEqual(res['ikepolicy'], return_value)
def test_ikepolicy_list(self):
"""Test case to list all ikepolicies."""
ikepolicy_id = _uuid()
return_value = [{'name': 'ikepolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'pfs': 'group5',
'ike_version': 'v1',
'id': ikepolicy_id}]
instance = self.plugin.return_value
instance.get_ikepolicies.return_value = return_value
res = self.api.get(_get_path('vpn/ikepolicies', fmt=self.fmt))
instance.get_ikepolicies.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_ikepolicy_update(self):
"""Test case to update an ikepolicy."""
ikepolicy_id = _uuid()
update_data = {'ikepolicy': {'name': 'ikepolicy1',
'encryption_algorithm': 'aes-256'}}
return_value = {'name': 'ikepolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-256',
'phase1_negotiation_mode': 'main',
'lifetime': {
'units': 'seconds',
'value': 3600},
'ike_version': 'v1',
'pfs': 'group5',
'tenant_id': _uuid(),
'id': ikepolicy_id}
instance = self.plugin.return_value
instance.update_ikepolicy.return_value = return_value
res = self.api.put(_get_path('vpn/ikepolicies', id=ikepolicy_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_ikepolicy.assert_called_with(mock.ANY, ikepolicy_id,
ikepolicy=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('ikepolicy', res)
self.assertEqual(res['ikepolicy'], return_value)
def test_ikepolicy_get(self):
"""Test case to get or show an ikepolicy."""
ikepolicy_id = _uuid()
return_value = {'name': 'ikepolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'phase1_negotiation_mode': 'main',
'lifetime': {
'units': 'seconds',
'value': 3600},
'ike_version': 'v1',
'pfs': 'group5',
'tenant_id': _uuid(),
'id': ikepolicy_id}
instance = self.plugin.return_value
instance.get_ikepolicy.return_value = return_value
res = self.api.get(_get_path('vpn/ikepolicies', id=ikepolicy_id,
fmt=self.fmt))
instance.get_ikepolicy.assert_called_with(mock.ANY,
ikepolicy_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('ikepolicy', res)
self.assertEqual(res['ikepolicy'], return_value)
def test_ikepolicy_delete(self):
"""Test case to delete an ikepolicy."""
self._test_entity_delete('ikepolicy')
def test_ipsecpolicy_create(self):
"""Test case to create an ipsecpolicy."""
ipsecpolicy_id = _uuid()
data = {'ipsecpolicy': {'name': 'ipsecpolicy1',
'description': 'myipsecpolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'encapsulation_mode': 'tunnel',
'lifetime': {
'units': 'seconds',
'value': 3600},
'transform_protocol': 'esp',
'pfs': 'group5',
'tenant_id': _uuid()}}
return_value = copy.copy(data['ipsecpolicy'])
return_value.update({'id': ipsecpolicy_id})
instance = self.plugin.return_value
instance.create_ipsecpolicy.return_value = return_value
res = self.api.post(_get_path('vpn/ipsecpolicies', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_ipsecpolicy.assert_called_with(mock.ANY,
ipsecpolicy=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('ipsecpolicy', res)
self.assertEqual(res['ipsecpolicy'], return_value)
def test_ipsecpolicy_list(self):
"""Test case to list an ipsecpolicy."""
ipsecpolicy_id = _uuid()
return_value = [{'name': 'ipsecpolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'pfs': 'group5',
'id': ipsecpolicy_id}]
instance = self.plugin.return_value
instance.get_ipsecpolicies.return_value = return_value
res = self.api.get(_get_path('vpn/ipsecpolicies', fmt=self.fmt))
instance.get_ipsecpolicies.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_ipsecpolicy_update(self):
"""Test case to update an ipsecpolicy."""
ipsecpolicy_id = _uuid()
update_data = {'ipsecpolicy': {'name': 'ipsecpolicy1',
'encryption_algorithm': 'aes-256'}}
return_value = {'name': 'ipsecpolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'encapsulation_mode': 'tunnel',
'lifetime': {
'units': 'seconds',
'value': 3600},
'transform_protocol': 'esp',
'pfs': 'group5',
'tenant_id': _uuid(),
'id': ipsecpolicy_id}
instance = self.plugin.return_value
instance.update_ipsecpolicy.return_value = return_value
res = self.api.put(_get_path('vpn/ipsecpolicies',
id=ipsecpolicy_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_ipsecpolicy.assert_called_with(mock.ANY,
ipsecpolicy_id,
ipsecpolicy=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('ipsecpolicy', res)
self.assertEqual(res['ipsecpolicy'], return_value)
def test_ipsecpolicy_get(self):
"""Test case to get or show an ipsecpolicy."""
ipsecpolicy_id = _uuid()
return_value = {'name': 'ipsecpolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'encapsulation_mode': 'tunnel',
'lifetime': {
'units': 'seconds',
'value': 3600},
'transform_protocol': 'esp',
'pfs': 'group5',
'tenant_id': _uuid(),
'id': ipsecpolicy_id}
instance = self.plugin.return_value
instance.get_ipsecpolicy.return_value = return_value
res = self.api.get(_get_path('vpn/ipsecpolicies',
id=ipsecpolicy_id,
fmt=self.fmt))
instance.get_ipsecpolicy.assert_called_with(mock.ANY,
ipsecpolicy_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('ipsecpolicy', res)
self.assertEqual(res['ipsecpolicy'], return_value)
def test_ipsecpolicy_delete(self):
"""Test case to delete an ipsecpolicy."""
self._test_entity_delete('ipsecpolicy')
def test_vpnservice_create(self):
"""Test case to create a vpnservice."""
vpnservice_id = _uuid()
data = {'vpnservice': {'name': 'vpnservice1',
'description': 'descr_vpn1',
'subnet_id': _uuid(),
'router_id': _uuid(),
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['vpnservice'])
return_value.update({'status': "ACTIVE", 'id': vpnservice_id})
instance = self.plugin.return_value
instance.create_vpnservice.return_value = return_value
res = self.api.post(_get_path('vpn/vpnservices', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_vpnservice.assert_called_with(mock.ANY,
vpnservice=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('vpnservice', res)
self.assertEqual(res['vpnservice'], return_value)
def test_vpnservice_list(self):
"""Test case to list all vpnservices."""
vpnservice_id = _uuid()
return_value = [{'name': 'vpnservice1',
'tenant_id': _uuid(),
'status': 'ACTIVE',
'id': vpnservice_id}]
instance = self.plugin.return_value
instance.get_vpnservice.return_value = return_value
res = self.api.get(_get_path('vpn/vpnservices', fmt=self.fmt))
instance.get_vpnservices.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_vpnservice_update(self):
"""Test case to update a vpnservice."""
vpnservice_id = _uuid()
update_data = {'vpnservice': {'admin_state_up': False}}
return_value = {'name': 'vpnservice1',
'admin_state_up': False,
'subnet_id': _uuid(),
'router_id': _uuid(),
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': vpnservice_id}
instance = self.plugin.return_value
instance.update_vpnservice.return_value = return_value
res = self.api.put(_get_path('vpn/vpnservices',
id=vpnservice_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_vpnservice.assert_called_with(mock.ANY,
vpnservice_id,
vpnservice=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('vpnservice', res)
self.assertEqual(res['vpnservice'], return_value)
def test_vpnservice_get(self):
"""Test case to get or show a vpnservice."""
vpnservice_id = _uuid()
return_value = {'name': 'vpnservice1',
'admin_state_up': True,
'subnet_id': _uuid(),
'router_id': _uuid(),
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': vpnservice_id}
instance = self.plugin.return_value
instance.get_vpnservice.return_value = return_value
res = self.api.get(_get_path('vpn/vpnservices',
id=vpnservice_id,
fmt=self.fmt))
instance.get_vpnservice.assert_called_with(mock.ANY,
vpnservice_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('vpnservice', res)
self.assertEqual(res['vpnservice'], return_value)
def _test_entity_delete(self, entity):
"""does the entity deletion based on naming convention."""
entity_id = _uuid()
path_map = {'ipsecpolicy': 'vpn/ipsecpolicies',
'ikepolicy': 'vpn/ikepolicies',
'ipsec_site_connection': 'vpn/ipsec-site-connections'}
path = path_map.get(entity, 'vpn/' + entity + 's')
res = self.api.delete(_get_path(path,
id=entity_id,
fmt=self.fmt))
delete_entity = getattr(self.plugin.return_value, "delete_" + entity)
delete_entity.assert_called_with(mock.ANY, entity_id)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
def test_vpnservice_delete(self):
"""Test case to delete a vpnservice."""
self._test_entity_delete('vpnservice')
def test_ipsec_site_connection_create(self):
"""Test case to create a ipsec_site_connection."""
ipsecsite_con_id = _uuid()
ikepolicy_id = _uuid()
ipsecpolicy_id = _uuid()
data = {
'ipsec_site_connection': {'name': 'connection1',
'description': 'Remote-connection1',
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24',
'192.168.3.0/24'],
'mtu': 1500,
'psk': 'abcd',
'initiator': 'bi-directional',
'dpd': {
'action': 'hold',
'interval': 30,
'timeout': 120},
'ikepolicy_id': ikepolicy_id,
'ipsecpolicy_id': ipsecpolicy_id,
'vpnservice_id': _uuid(),
'admin_state_up': True,
'tenant_id': _uuid()}
}
return_value = copy.copy(data['ipsec_site_connection'])
return_value.update({'status': "ACTIVE", 'id': ipsecsite_con_id})
instance = self.plugin.return_value
instance.create_ipsec_site_connection.return_value = return_value
res = self.api.post(_get_path('vpn/ipsec-site-connections',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_ipsec_site_connection.assert_called_with(
mock.ANY, ipsec_site_connection=data
)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('ipsec_site_connection', res)
self.assertEqual(res['ipsec_site_connection'], return_value)
def test_ipsec_site_connection_list(self):
"""Test case to list all ipsec_site_connections."""
ipsecsite_con_id = _uuid()
return_value = [{'name': 'connection1',
'peer_address': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'route_mode': 'static',
'auth_mode': 'psk',
'tenant_id': _uuid(),
'status': 'ACTIVE',
'id': ipsecsite_con_id}]
instance = self.plugin.return_value
instance.get_ipsec_site_connections.return_value = return_value
res = self.api.get(
_get_path('vpn/ipsec-site-connections', fmt=self.fmt))
instance.get_ipsec_site_connections.assert_called_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY
)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_ipsec_site_connection_update(self):
"""Test case to update a ipsec_site_connection."""
ipsecsite_con_id = _uuid()
update_data = {'ipsec_site_connection': {'admin_state_up': False}}
return_value = {'name': 'connection1',
'description': 'Remote-connection1',
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'mtu': 1500,
'psk': 'abcd',
'initiator': 'bi-directional',
'dpd': {
'action': 'hold',
'interval': 30,
'timeout': 120},
'ikepolicy_id': _uuid(),
'ipsecpolicy_id': _uuid(),
'vpnservice_id': _uuid(),
'admin_state_up': False,
'tenant_id': _uuid(),
'status': 'ACTIVE',
'id': ipsecsite_con_id}
instance = self.plugin.return_value
instance.update_ipsec_site_connection.return_value = return_value
res = self.api.put(_get_path('vpn/ipsec-site-connections',
id=ipsecsite_con_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_ipsec_site_connection.assert_called_with(
mock.ANY, ipsecsite_con_id, ipsec_site_connection=update_data
)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('ipsec_site_connection', res)
self.assertEqual(res['ipsec_site_connection'], return_value)
def test_ipsec_site_connection_get(self):
"""Test case to get or show a ipsec_site_connection."""
ipsecsite_con_id = _uuid()
return_value = {'name': 'connection1',
'description': 'Remote-connection1',
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24',
'192.168.3.0/24'],
'mtu': 1500,
'psk': 'abcd',
'initiator': 'bi-directional',
'dpd': {
'action': 'hold',
'interval': 30,
'timeout': 120},
'ikepolicy_id': _uuid(),
'ipsecpolicy_id': _uuid(),
'vpnservice_id': _uuid(),
'admin_state_up': True,
'tenant_id': _uuid(),
'status': 'ACTIVE',
'id': ipsecsite_con_id}
instance = self.plugin.return_value
instance.get_ipsec_site_connection.return_value = return_value
res = self.api.get(_get_path('vpn/ipsec-site-connections',
id=ipsecsite_con_id,
fmt=self.fmt))
instance.get_ipsec_site_connection.assert_called_with(
mock.ANY, ipsecsite_con_id, fields=mock.ANY
)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('ipsec_site_connection', res)
self.assertEqual(res['ipsec_site_connection'], return_value)
def test_ipsec_site_connection_delete(self):
"""Test case to delete a ipsec_site_connection."""
self._test_entity_delete('ipsec_site_connection')
class VpnaasExtensionTestCaseXML(VpnaasExtensionTestCase):
fmt = 'xml'
| oeeagle/quantum | neutron/tests/unit/services/vpn/test_vpnaas_extension.py | Python | apache-2.0 | 26,021 |
#
#
# Copyright (C) 2006, 2007, 2008, 2010, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Functions to bootstrap a new cluster.
"""
import os
import os.path
import re
import logging
import time
import tempfile
from ganeti import rpc
from ganeti import ssh
from ganeti import utils
from ganeti import errors
from ganeti import config
from ganeti import constants
from ganeti import objects
from ganeti import ssconf
from ganeti import serializer
from ganeti import hypervisor
from ganeti import bdev
from ganeti import netutils
from ganeti import luxi
from ganeti import jstore
from ganeti import pathutils
# ec_id for InitConfig's temporary reservation manager
_INITCONF_ECID = "initconfig-ecid"
#: After how many seconds daemon must be responsive
_DAEMON_READY_TIMEOUT = 10.0
def _InitSSHSetup():
"""Setup the SSH configuration for the cluster.
This generates a dsa keypair for root, adds the pub key to the
permitted hosts and adds the hostkey to its own known hosts.
"""
priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER)
for name in priv_key, pub_key:
if os.path.exists(name):
utils.CreateBackup(name)
utils.RemoveFile(name)
result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
"-f", priv_key,
"-q", "-N", ""])
if result.failed:
raise errors.OpExecError("Could not generate ssh keypair, error %s" %
result.output)
utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_CONSOLE_USER)
for name in priv_key, pub_key:
if os.path.exists(name):
utils.CreateBackup(name)
utils.RemoveFile(name)
result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
"-f", priv_key,
"-q", "-N", ""])
if result.failed:
raise errors.OpExecError("Could not generate ssh keypair, error %s" %
result.output)
utils.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key),
options=constants.SSH_CONSOLE_USER_OPTIONS)
def GenerateHmacKey(file_name):
"""Writes a new HMAC key.
@type file_name: str
@param file_name: Path to output file
"""
utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
backup=True)
def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
new_confd_hmac_key, new_cds,
rapi_cert_pem=None, spice_cert_pem=None,
spice_cacert_pem=None, cds=None,
nodecert_file=pathutils.NODED_CERT_FILE,
rapicert_file=pathutils.RAPI_CERT_FILE,
spicecert_file=pathutils.SPICE_CERT_FILE,
spicecacert_file=pathutils.SPICE_CACERT_FILE,
hmackey_file=pathutils.CONFD_HMAC_KEY,
cds_file=pathutils.CLUSTER_DOMAIN_SECRET_FILE):
"""Updates the cluster certificates, keys and secrets.
@type new_cluster_cert: bool
@param new_cluster_cert: Whether to generate a new cluster certificate
@type new_rapi_cert: bool
@param new_rapi_cert: Whether to generate a new RAPI certificate
@type new_spice_cert: bool
@param new_spice_cert: Whether to generate a new SPICE certificate
@type new_confd_hmac_key: bool
@param new_confd_hmac_key: Whether to generate a new HMAC key
@type new_cds: bool
@param new_cds: Whether to generate a new cluster domain secret
@type rapi_cert_pem: string
@param rapi_cert_pem: New RAPI certificate in PEM format
@type spice_cert_pem: string
@param spice_cert_pem: New SPICE certificate in PEM format
@type spice_cacert_pem: string
@param spice_cacert_pem: Certificate of the CA that signed the SPICE
certificate, in PEM format
@type cds: string
@param cds: New cluster domain secret
@type nodecert_file: string
@param nodecert_file: optional override of the node cert file path
@type rapicert_file: string
@param rapicert_file: optional override of the rapi cert file path
@type spicecert_file: string
@param spicecert_file: optional override of the spice cert file path
@type spicecacert_file: string
@param spicecacert_file: optional override of the spice CA cert file path
@type hmackey_file: string
@param hmackey_file: optional override of the hmac key file path
"""
# noded SSL certificate
cluster_cert_exists = os.path.exists(nodecert_file)
if new_cluster_cert or not cluster_cert_exists:
if cluster_cert_exists:
utils.CreateBackup(nodecert_file)
logging.debug("Generating new cluster certificate at %s", nodecert_file)
utils.GenerateSelfSignedSslCert(nodecert_file)
# confd HMAC key
if new_confd_hmac_key or not os.path.exists(hmackey_file):
logging.debug("Writing new confd HMAC key to %s", hmackey_file)
GenerateHmacKey(hmackey_file)
# RAPI
rapi_cert_exists = os.path.exists(rapicert_file)
if rapi_cert_pem:
# Assume rapi_pem contains a valid PEM-formatted certificate and key
logging.debug("Writing RAPI certificate at %s", rapicert_file)
utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
elif new_rapi_cert or not rapi_cert_exists:
if rapi_cert_exists:
utils.CreateBackup(rapicert_file)
logging.debug("Generating new RAPI certificate at %s", rapicert_file)
utils.GenerateSelfSignedSslCert(rapicert_file)
# SPICE
spice_cert_exists = os.path.exists(spicecert_file)
spice_cacert_exists = os.path.exists(spicecacert_file)
if spice_cert_pem:
# spice_cert_pem implies also spice_cacert_pem
logging.debug("Writing SPICE certificate at %s", spicecert_file)
utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
elif new_spice_cert or not spice_cert_exists:
if spice_cert_exists:
utils.CreateBackup(spicecert_file)
if spice_cacert_exists:
utils.CreateBackup(spicecacert_file)
logging.debug("Generating new self-signed SPICE certificate at %s",
spicecert_file)
(_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file)
# Self-signed certificate -> the public certificate is also the CA public
# certificate
logging.debug("Writing the public certificate to %s",
spicecert_file)
utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
# Cluster domain secret
if cds:
logging.debug("Writing cluster domain secret to %s", cds_file)
utils.WriteFile(cds_file, data=cds, backup=True)
elif new_cds or not os.path.exists(cds_file):
logging.debug("Generating new cluster domain secret at %s", cds_file)
GenerateHmacKey(cds_file)
def _InitGanetiServerSetup(master_name):
"""Setup the necessary configuration for the initial node daemon.
This creates the nodepass file containing the shared password for
the cluster, generates the SSL certificate and starts the node daemon.
@type master_name: str
@param master_name: Name of the master node
"""
# Generate cluster secrets
GenerateClusterCrypto(True, False, False, False, False)
result = utils.RunCmd([pathutils.DAEMON_UTIL, "start", constants.NODED])
if result.failed:
raise errors.OpExecError("Could not start the node daemon, command %s"
" had exitcode %s and error %s" %
(result.cmd, result.exit_code, result.output))
_WaitForNodeDaemon(master_name)
def _WaitForNodeDaemon(node_name):
"""Wait for node daemon to become responsive.
"""
def _CheckNodeDaemon():
# Pylint bug <http://www.logilab.org/ticket/35642>
# pylint: disable=E1101
result = rpc.BootstrapRunner().call_version([node_name])[node_name]
if result.fail_msg:
raise utils.RetryAgain()
try:
utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
except utils.RetryTimeout:
raise errors.OpExecError("Node daemon on %s didn't answer queries within"
" %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
def _WaitForMasterDaemon():
"""Wait for master daemon to become responsive.
"""
def _CheckMasterDaemon():
try:
cl = luxi.Client()
(cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
except Exception:
raise utils.RetryAgain()
logging.debug("Received cluster name %s from master", cluster_name)
try:
utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
except utils.RetryTimeout:
raise errors.OpExecError("Master daemon didn't answer queries within"
" %s seconds" % _DAEMON_READY_TIMEOUT)
def RunNodeSetupCmd(cluster_name, node, basecmd, debug, verbose,
use_cluster_key, ask_key, strict_host_check, data):
"""Runs a command to configure something on a remote machine.
@type cluster_name: string
@param cluster_name: Cluster name
@type node: string
@param node: Node name
@type basecmd: string
@param basecmd: Base command (path on the remote machine)
@type debug: bool
@param debug: Enable debug output
@type verbose: bool
@param verbose: Enable verbose output
@type use_cluster_key: bool
@param use_cluster_key: See L{ssh.SshRunner.BuildCmd}
@type ask_key: bool
@param ask_key: See L{ssh.SshRunner.BuildCmd}
@type strict_host_check: bool
@param strict_host_check: See L{ssh.SshRunner.BuildCmd}
@param data: JSON-serializable input data for script (passed to stdin)
"""
cmd = [basecmd]
# Pass --debug/--verbose to the external script if set on our invocation
if debug:
cmd.append("--debug")
if verbose:
cmd.append("--verbose")
family = ssconf.SimpleStore().GetPrimaryIPFamily()
srun = ssh.SshRunner(cluster_name,
ipv6=(family == netutils.IP6Address.family))
scmd = srun.BuildCmd(constants.REMOTE_CMD_ESCALATION,
node, constants.SSH_LOGIN_USER,
utils.ShellQuoteArgs(cmd),
batch=False, ask_key=ask_key, quiet=False,
strict_host_check=strict_host_check,
use_cluster_key=use_cluster_key)
tempfh = tempfile.TemporaryFile()
try:
tempfh.write(serializer.DumpJson(data))
tempfh.seek(0)
result = utils.RunCmd(scmd, interactive=True, input_fd=tempfh)
finally:
tempfh.close()
if result.failed:
raise errors.OpExecError("Command '%s' failed: %s" %
(result.cmd, result.fail_reason))
def _InitFileStorage(file_storage_dir):
"""Initialize if needed the file storage.
@param file_storage_dir: the user-supplied value
@return: either empty string (if file storage was disabled at build
time) or the normalized path to the storage directory
"""
file_storage_dir = os.path.normpath(file_storage_dir)
if not os.path.isabs(file_storage_dir):
raise errors.OpPrereqError("File storage directory '%s' is not an absolute"
" path" % file_storage_dir, errors.ECODE_INVAL)
if not os.path.exists(file_storage_dir):
try:
os.makedirs(file_storage_dir, 0750)
except OSError, err:
raise errors.OpPrereqError("Cannot create file storage directory"
" '%s': %s" % (file_storage_dir, err),
errors.ECODE_ENVIRON)
if not os.path.isdir(file_storage_dir):
raise errors.OpPrereqError("The file storage directory '%s' is not"
" a directory." % file_storage_dir,
errors.ECODE_ENVIRON)
return file_storage_dir
def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
master_netmask, master_netdev, file_storage_dir,hostname,
shared_file_storage_dir, candidate_pool_size, secondary_ip=None,
vg_name=None, beparams=None, nicparams=None, ndparams=None,
hvparams=None, diskparams=None, enabled_hypervisors=None,
modify_etc_hosts=True, modify_ssh_setup=True,
maintain_node_health=False, drbd_helper=None, uid_pool=None,
default_iallocator=None, primary_ip_version=None, ipolicy=None,
prealloc_wipe_disks=False, use_external_mip_script=False,
hv_state=None, disk_state=None):
"""Initialise the cluster.
@type candidate_pool_size: int
@param candidate_pool_size: master candidate pool size
"""
# TODO: complete the docstring
if config.ConfigWriter.IsCluster():
raise errors.OpPrereqError("Cluster is already initialised",
errors.ECODE_STATE)
if not enabled_hypervisors:
raise errors.OpPrereqError("Enabled hypervisors list must contain at"
" least one member", errors.ECODE_INVAL)
invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
if invalid_hvs:
raise errors.OpPrereqError("Enabled hypervisors contains invalid"
" entries: %s" % invalid_hvs,
errors.ECODE_INVAL)
try:
ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
except errors.ProgrammerError:
raise errors.OpPrereqError("Invalid primary ip version: %d." %
primary_ip_version, errors.ECODE_INVAL)
hostname = netutils.GetHostname(name=hostname,family=ipcls.family)
if not ipcls.IsValid(hostname.ip):
raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
" address." % (hostname.ip, primary_ip_version),
errors.ECODE_INVAL)
if ipcls.IsLoopback(hostname.ip):
raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
" address. Please fix DNS or %s." %
(hostname.ip, pathutils.ETC_HOSTS),
errors.ECODE_ENVIRON)
if not ipcls.Own(hostname.ip):
raise errors.OpPrereqError("Inconsistency: this host's name resolves"
" to %s,\nbut this ip address does not"
" belong to this host" %
hostname.ip, errors.ECODE_ENVIRON)
clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
raise errors.OpPrereqError("Cluster IP already active",
errors.ECODE_NOTUNIQUE)
if not secondary_ip:
if primary_ip_version == constants.IP6_VERSION:
raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
" IPv4 address must be given as secondary",
errors.ECODE_INVAL)
secondary_ip = hostname.ip
if not netutils.IP4Address.IsValid(secondary_ip):
raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
" IPv4 address." % secondary_ip,
errors.ECODE_INVAL)
if not netutils.IP4Address.Own(secondary_ip):
raise errors.OpPrereqError("You gave %s as secondary IP,"
" but it does not belong to this host." %
secondary_ip, errors.ECODE_ENVIRON)
if master_netmask is not None:
if not ipcls.ValidateNetmask(master_netmask):
raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
(master_netmask, primary_ip_version),
errors.ECODE_INVAL)
else:
master_netmask = ipcls.iplen
if vg_name is not None:
# Check if volume group is valid
vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
constants.MIN_VG_SIZE)
if vgstatus:
raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
" you are not using lvm" % vgstatus,
errors.ECODE_INVAL)
if drbd_helper is not None:
try:
curr_helper = bdev.BaseDRBD.GetUsermodeHelper()
except errors.BlockDeviceError, err:
raise errors.OpPrereqError("Error while checking drbd helper"
" (specify --no-drbd-storage if you are not"
" using drbd): %s" % str(err),
errors.ECODE_ENVIRON)
if drbd_helper != curr_helper:
raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
" is the current helper" % (drbd_helper,
curr_helper),
errors.ECODE_INVAL)
logging.debug("Stopping daemons (if any are running)")
result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"])
if result.failed:
raise errors.OpExecError("Could not stop daemons, command %s"
" had exitcode %s and error '%s'" %
(result.cmd, result.exit_code, result.output))
if constants.ENABLE_FILE_STORAGE:
file_storage_dir = _InitFileStorage(file_storage_dir)
else:
file_storage_dir = ""
if constants.ENABLE_SHARED_FILE_STORAGE:
shared_file_storage_dir = _InitFileStorage(shared_file_storage_dir)
else:
shared_file_storage_dir = ""
if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
errors.ECODE_INVAL)
result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
if result.failed:
raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
(master_netdev,
result.output.strip()), errors.ECODE_INVAL)
dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)]
utils.EnsureDirs(dirs)
objects.UpgradeBeParams(beparams)
utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
objects.NIC.CheckParameterSyntax(nicparams)
full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy)
if ndparams is not None:
utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
else:
ndparams = dict(constants.NDC_DEFAULTS)
# This is ugly, as we modify the dict itself
# FIXME: Make utils.ForceDictType pure functional or write a wrapper
# around it
if hv_state:
for hvname, hvs_data in hv_state.items():
utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES)
hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data)
else:
hv_state = dict((hvname, constants.HVST_DEFAULTS)
for hvname in enabled_hypervisors)
# FIXME: disk_state has no default values yet
if disk_state:
for storage, ds_data in disk_state.items():
if storage not in constants.DS_VALID_TYPES:
raise errors.OpPrereqError("Invalid storage type in disk state: %s" %
storage, errors.ECODE_INVAL)
for ds_name, state in ds_data.items():
utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES)
ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state)
# hvparams is a mapping of hypervisor->hvparams dict
for hv_name, hv_params in hvparams.iteritems():
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
hv_class = hypervisor.GetHypervisor(hv_name)
hv_class.CheckParameterSyntax(hv_params)
# diskparams is a mapping of disk-template->diskparams dict
for template, dt_params in diskparams.items():
param_keys = set(dt_params.keys())
default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
if not (param_keys <= default_param_keys):
unknown_params = param_keys - default_param_keys
raise errors.OpPrereqError("Invalid parameters for disk template %s:"
" %s" % (template,
utils.CommaJoin(unknown_params)),
errors.ECODE_INVAL)
utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
if template == constants.DT_DRBD8 and vg_name is not None:
# The default METAVG value is equal to the VG name set at init time,
# if provided
dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name
try:
utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
except errors.OpPrereqError, err:
raise errors.OpPrereqError("While verify diskparam options: %s" % err,
errors.ECODE_INVAL)
# set up ssh config and /etc/hosts
sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB)
sshkey = sshline.split(" ")[1]
if modify_etc_hosts:
utils.AddHostToEtcHosts(hostname.name, hostname.ip)
if modify_ssh_setup:
_InitSSHSetup()
if default_iallocator is not None:
alloc_script = utils.FindFile(default_iallocator,
constants.IALLOCATOR_SEARCH_PATH,
os.path.isfile)
if alloc_script is None:
raise errors.OpPrereqError("Invalid default iallocator script '%s'"
" specified" % default_iallocator,
errors.ECODE_INVAL)
elif constants.HTOOLS:
# htools was enabled at build-time, we default to it
if utils.FindFile(constants.IALLOC_HAIL,
constants.IALLOCATOR_SEARCH_PATH,
os.path.isfile):
default_iallocator = constants.IALLOC_HAIL
now = time.time()
# init of cluster config file
cluster_config = objects.Cluster(
serial_no=1,
rsahostkeypub=sshkey,
highest_used_port=(constants.FIRST_DRBD_PORT - 1),
mac_prefix=mac_prefix,
volume_group_name=vg_name,
tcpudp_port_pool=set(),
master_node=hostname.name,
master_ip=clustername.ip,
master_netmask=master_netmask,
master_netdev=master_netdev,
cluster_name=clustername.name,
file_storage_dir=file_storage_dir,
shared_file_storage_dir=shared_file_storage_dir,
enabled_hypervisors=enabled_hypervisors,
beparams={constants.PP_DEFAULT: beparams},
nicparams={constants.PP_DEFAULT: nicparams},
ndparams=ndparams,
hvparams=hvparams,
diskparams=diskparams,
candidate_pool_size=candidate_pool_size,
modify_etc_hosts=modify_etc_hosts,
modify_ssh_setup=modify_ssh_setup,
uid_pool=uid_pool,
ctime=now,
mtime=now,
maintain_node_health=maintain_node_health,
drbd_usermode_helper=drbd_helper,
default_iallocator=default_iallocator,
primary_ip_family=ipcls.family,
prealloc_wipe_disks=prealloc_wipe_disks,
use_external_mip_script=use_external_mip_script,
ipolicy=full_ipolicy,
hv_state_static=hv_state,
disk_state_static=disk_state,
)
master_node_config = objects.Node(name=hostname.name,
primary_ip=hostname.ip,
secondary_ip=secondary_ip,
serial_no=1,
master_candidate=True,
offline=False, drained=False,
ctime=now, mtime=now,
)
InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
cfg = config.ConfigWriter(offline=True)
ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
cfg.Update(cfg.GetClusterInfo(), logging.error)
ssconf.WriteSsconfFiles(cfg.GetSsconfValues())
# set up the inter-node password and certificate
_InitGanetiServerSetup(hostname.name)
logging.debug("Starting daemons")
result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
if result.failed:
raise errors.OpExecError("Could not start daemons, command %s"
" had exitcode %s and error %s" %
(result.cmd, result.exit_code, result.output))
_WaitForMasterDaemon()
def InitConfig(version, cluster_config, master_node_config,
cfg_file=pathutils.CLUSTER_CONF_FILE):
"""Create the initial cluster configuration.
It will contain the current node, which will also be the master
node, and no instances.
@type version: int
@param version: configuration version
@type cluster_config: L{objects.Cluster}
@param cluster_config: cluster configuration
@type master_node_config: L{objects.Node}
@param master_node_config: master node configuration
@type cfg_file: string
@param cfg_file: configuration file path
"""
uuid_generator = config.TemporaryReservationManager()
cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
_INITCONF_ECID)
master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
_INITCONF_ECID)
nodes = {
master_node_config.name: master_node_config,
}
default_nodegroup = objects.NodeGroup(
uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
name=constants.INITIAL_NODE_GROUP_NAME,
members=[master_node_config.name],
diskparams={},
)
nodegroups = {
default_nodegroup.uuid: default_nodegroup,
}
now = time.time()
config_data = objects.ConfigData(version=version,
cluster=cluster_config,
nodegroups=nodegroups,
nodes=nodes,
instances={},
networks={},
serial_no=1,
ctime=now, mtime=now)
utils.WriteFile(cfg_file,
data=serializer.Dump(config_data.ToDict()),
mode=0600)
def FinalizeClusterDestroy(master):
"""Execute the last steps of cluster destroy
This function shuts down all the daemons, completing the destroy
begun in cmdlib.LUDestroyOpcode.
"""
cfg = config.ConfigWriter()
modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
runner = rpc.BootstrapRunner()
master_params = cfg.GetMasterNetworkParameters()
master_params.name = master
ems = cfg.GetUseExternalMipScript()
result = runner.call_node_deactivate_master_ip(master_params.name,
master_params, ems)
msg = result.fail_msg
if msg:
logging.warning("Could not disable the master IP: %s", msg)
result = runner.call_node_stop_master(master)
msg = result.fail_msg
if msg:
logging.warning("Could not disable the master role: %s", msg)
result = runner.call_node_leave_cluster(master, modify_ssh_setup)
msg = result.fail_msg
if msg:
logging.warning("Could not shutdown the node daemon and cleanup"
" the node: %s", msg)
def SetupNodeDaemon(opts, cluster_name, node):
"""Add a node to the cluster.
This function must be called before the actual opcode, and will ssh
to the remote node, copy the needed files, and start ganeti-noded,
allowing the master to do the rest via normal rpc calls.
@param cluster_name: the cluster name
@param node: the name of the new node
"""
data = {
constants.NDS_CLUSTER_NAME: cluster_name,
constants.NDS_NODE_DAEMON_CERTIFICATE:
utils.ReadFile(pathutils.NODED_CERT_FILE),
constants.NDS_SSCONF: ssconf.SimpleStore().ReadAll(),
constants.NDS_START_NODE_DAEMON: True,
}
RunNodeSetupCmd(cluster_name, node, pathutils.NODE_DAEMON_SETUP,
opts.debug, opts.verbose,
True, opts.ssh_key_check, opts.ssh_key_check, data)
_WaitForNodeDaemon(node)
def MasterFailover(no_voting=False):
"""Failover the master node.
This checks that we are not already the master, and will cause the
current master to cease being master, and the non-master to become
new master.
@type no_voting: boolean
@param no_voting: force the operation without remote nodes agreement
(dangerous)
"""
sstore = ssconf.SimpleStore()
old_master, new_master = ssconf.GetMasterAndMyself(sstore)
node_list = sstore.GetNodeList()
mc_list = sstore.GetMasterCandidates()
if old_master == new_master:
raise errors.OpPrereqError("This commands must be run on the node"
" where you want the new master to be."
" %s is already the master" %
old_master, errors.ECODE_INVAL)
if new_master not in mc_list:
mc_no_master = [name for name in mc_list if name != old_master]
raise errors.OpPrereqError("This node is not among the nodes marked"
" as master candidates. Only these nodes"
" can become masters. Current list of"
" master candidates is:\n"
"%s" % ("\n".join(mc_no_master)),
errors.ECODE_STATE)
if not no_voting:
vote_list = GatherMasterVotes(node_list)
if vote_list:
voted_master = vote_list[0][0]
if voted_master is None:
raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
" not respond.", errors.ECODE_ENVIRON)
elif voted_master != old_master:
raise errors.OpPrereqError("I have a wrong configuration, I believe"
" the master is %s but the other nodes"
" voted %s. Please resync the configuration"
" of this node." %
(old_master, voted_master),
errors.ECODE_STATE)
# end checks
rcode = 0
logging.info("Setting master to %s, old master: %s", new_master, old_master)
try:
# instantiate a real config writer, as we now know we have the
# configuration data
cfg = config.ConfigWriter(accept_foreign=True)
cluster_info = cfg.GetClusterInfo()
cluster_info.master_node = new_master
# this will also regenerate the ssconf files, since we updated the
# cluster info
cfg.Update(cluster_info, logging.error)
except errors.ConfigurationError, err:
logging.error("Error while trying to set the new master: %s",
str(err))
return 1
# if cfg.Update worked, then it means the old master daemon won't be
# able now to write its own config file (we rely on locking in both
# backend.UploadFile() and ConfigWriter._Write(); hence the next
# step is to kill the old master
logging.info("Stopping the master daemon on node %s", old_master)
runner = rpc.BootstrapRunner()
master_params = cfg.GetMasterNetworkParameters()
master_params.name = old_master
ems = cfg.GetUseExternalMipScript()
result = runner.call_node_deactivate_master_ip(master_params.name,
master_params, ems)
msg = result.fail_msg
if msg:
logging.warning("Could not disable the master IP: %s", msg)
result = runner.call_node_stop_master(old_master)
msg = result.fail_msg
if msg:
logging.error("Could not disable the master role on the old master"
" %s, please disable manually: %s", old_master, msg)
logging.info("Checking master IP non-reachability...")
master_ip = sstore.GetMasterIP()
total_timeout = 30
# Here we have a phase where no master should be running
def _check_ip():
if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
raise utils.RetryAgain()
try:
utils.Retry(_check_ip, (1, 1.5, 5), total_timeout)
except utils.RetryTimeout:
logging.warning("The master IP is still reachable after %s seconds,"
" continuing but activating the master on the current"
" node will probably fail", total_timeout)
if jstore.CheckDrainFlag():
logging.info("Undraining job queue")
jstore.SetDrainFlag(False)
logging.info("Starting the master daemons on the new master")
result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master,
no_voting)
msg = result.fail_msg
if msg:
logging.error("Could not start the master role on the new master"
" %s, please check: %s", new_master, msg)
rcode = 1
logging.info("Master failed over from %s to %s", old_master, new_master)
return rcode
def GetMaster():
"""Returns the current master node.
This is a separate function in bootstrap since it's needed by
gnt-cluster, and instead of importing directly ssconf, it's better
to abstract it in bootstrap, where we do use ssconf in other
functions too.
"""
sstore = ssconf.SimpleStore()
old_master, _ = ssconf.GetMasterAndMyself(sstore)
return old_master
def GatherMasterVotes(node_list):
"""Check the agreement on who is the master.
This function will return a list of (node, number of votes), ordered
by the number of votes. Errors will be denoted by the key 'None'.
Note that the sum of votes is the number of nodes this machine
knows, whereas the number of entries in the list could be different
(if some nodes vote for another master).
We remove ourselves from the list since we know that (bugs aside)
since we use the same source for configuration information for both
backend and boostrap, we'll always vote for ourselves.
@type node_list: list
@param node_list: the list of nodes to query for master info; the current
node will be removed if it is in the list
@rtype: list
@return: list of (node, votes)
"""
myself = netutils.Hostname.GetSysName()
try:
node_list.remove(myself)
except ValueError:
pass
if not node_list:
# no nodes left (eventually after removing myself)
return []
results = rpc.BootstrapRunner().call_master_info(node_list)
if not isinstance(results, dict):
# this should not happen (unless internal error in rpc)
logging.critical("Can't complete rpc call, aborting master startup")
return [(None, len(node_list))]
votes = {}
for node in results:
nres = results[node]
data = nres.payload
msg = nres.fail_msg
fail = False
if msg:
logging.warning("Error contacting node %s: %s", node, msg)
fail = True
# for now we accept both length 3, 4 and 5 (data[3] is primary ip version
# and data[4] is the master netmask)
elif not isinstance(data, (tuple, list)) or len(data) < 3:
logging.warning("Invalid data received from node %s: %s", node, data)
fail = True
if fail:
if None not in votes:
votes[None] = 0
votes[None] += 1
continue
master_node = data[2]
if master_node not in votes:
votes[master_node] = 0
votes[master_node] += 1
vote_list = [v for v in votes.items()]
# sort first on number of votes then on name, since we want None
# sorted later if we have the half of the nodes not responding, and
# half voting all for the same master
vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
return vote_list
| sarahn/ganeti | lib/bootstrap.py | Python | gpl-2.0 | 36,301 |
# Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
""" VM based authorization for docker volumes and tenant management.
"""
import sqlite3
import uuid
import os
import vmdk_utils
import vmdk_ops
import logging
import auth_data_const
AUTH_DB_PATH = '/etc/vmware/vmdkops/auth-db'
def all_columns_set(privileges):
if not privileges:
return False
all_columns = [
auth_data_const.COL_DATASTORE,
auth_data_const.COL_GLOBAL_VISIBILITY,
auth_data_const.COL_CREATE_VOLUME,
auth_data_const.COL_DELETE_VOLUME,
auth_data_const.COL_MOUNT_VOLUME,
auth_data_const.COL_MAX_VOLUME_SIZE,
auth_data_const.COL_USAGE_QUOTA
]
for col in all_columns:
if not col in privileges:
return False
return True
class DbConnectionError(Exception):
""" An exception thrown when connection to a sqlite database fails. """
def __init__(self, db_path):
self.db_path = db_path
def __str__(self):
return "DB connection error %s" % self.db_path
class DockerVolumeTenant:
""" This class abstracts the operations to manage a DockerVolumeTenant.
The interfaces it provides includes:
- add VMs to tenant
- revmove VMs from tenant
- change tenant name and description
- set datastore and privileges for a tenant
"""
def __init__(self, name, description, default_datastore, default_privileges,
vms, privileges, id=None):
""" Constuct a DockerVOlumeTenant object. """
self.name = name
self.description = description
self.default_datastore = default_datastore
self.default_privileges = default_privileges
self.vms = vms
self.privileges = privileges
if not id:
self.id = str(uuid.uuid4())
else:
self.id = id
def add_vms(self, conn, vms):
""" Add vms in the vms table for this tenant. """
tenant_id = self.id
vms = [(vm_id, vm_name, tenant_id) for (vm_id, vm_name) in vms]
if vms:
try:
conn.executemany(
"INSERT INTO vms(vm_id, vm_name, tenant_id) VALUES (?, ?, ?)",
vms
)
conn.commit()
except sqlite3.Error as e:
logging.error("Error %s when inserting into vms table with vm_id %s vm_name %s"
" tenant_id %s", e, vm_id, vm_name, tenant_id)
return str(e)
return None
def remove_vms(self, conn, vms):
""" Remove vms from the vms table for this tenant. """
tenant_id = self.id
vms = [(vm_id, tenant_id) for (vm_id, vm_name) in vms]
try:
conn.executemany(
"DELETE FROM vms WHERE vm_id = ? AND tenant_id = ?",
vms
)
conn.commit()
except sqlite3.Error as e:
logging.error("Error %s when removing from vms table with vm_id %s tenant_id"
"tenant_id %s", e, vm_id, tenant_id)
return str(e)
return None
def set_name(self, conn, name):
""" Set name column in tenant table for this tenant. """
tenant_id = self.id
try:
conn.execute(
"UPDATE tenants SET name = ? WHERE id = ?",
(name, tenant_id)
)
conn.commit()
except sqlite3.Error as e:
logging.error("Error %s when updating tenants table with tenant_id"
"tenant_id %s", e, tenant_id)
return str(e)
return None
def set_description(self, conn, description):
""" Set description column in tenant table for this tenant. """
tenant_id = self.id
try:
conn.execute(
"UPDATE tenants SET description = ? WHERE id = ?",
(description, tenant_id)
)
conn.commit()
except sqlite3.Error as e:
logging.error("Error %s when updating tenants table with tenant_id"
"tenant_id %s", e, tenant_id)
return str(e)
return None
def set_default_datastore_and_privileges(self, conn, datastore, privileges):
"Set default_datastore and default privileges for this tenant."
tenant_id = self.id
exist_default_datastore = self.default_datastore
if not all_columns_set(privileges):
error_info = "Not all columns are set in privileges"
return error_info
try:
conn.execute(
"UPDATE tenants SET default_datastore = ? WHERE id = ?",
(datastore, tenant_id)
)
# remove the old entry
conn.execute(
"DELETE FROM privileges WHERE tenant_id = ? AND datastore = ?",
[tenant_id, exist_default_datastore]
)
privileges[auth_data_const.COL_TENANT_ID] = tenant_id
conn.execute(
"""
INSERT OR IGNORE INTO privileges VALUES
(:tenant_id, :datastore, :global_visibility, :create_volume,
:delete_volume, :mount_volume, :max_volume_size, :usage_quota)
""",
privileges
)
conn.commit()
except sqlite3.Error as e:
logging.error("Error %s when setting default datastore and privileges for tenant_id %s",
e, tenant_id)
return str(e)
return None
def set_datastore_access_privileges(self, conn, privileges):
""" Set datastore and privileges for this tenant.
"privileges"" is an array of dict
each dict represent a privilege that the tenant has for a given datastore
Example:
privileges = [{'datastore': 'datastore1',
'global_visibility': 0,
'create_volume': 0,
'delete_volume': 0,
'mount_volume': 1,
'max_volume_size': 0,
'usage_quota': 0},
{'datastore': 'datastore2',
'global_visibility': 0,
'create_volume': 1,
'delete_volume': 1,
'mount_volume': 1,
'max_volume_size': 0,
'usage_quota': 0}]
"""
tenant_id = self.id
for p in privileges:
p[auth_data_const.COL_TENANT_ID] = tenant_id
if not all_columns_set(p):
return "Not all columns are set in 'privileges''"
try:
conn.executemany(
"""
INSERT OR IGNORE INTO privileges VALUES
(:tenant_id, :datastore, :global_visibility, :create_volume,
:delete_volume, :mount_volume, :max_volume_size, :usage_quota)
""",
privileges
)
for p in privileges:
# privileges ia an array of dict
# each dict represent a privilege that the tenant has for a given datastore
# for each dict, add a new element which maps 'tenant_id' to tenant_id
p[auth_data_const.COL_TENANT_ID] = tenant_id
column_list = ['tenant_id', 'datastore', 'global_visibility', 'create_volume',
'delete_volume', 'mount_volume', 'max_volume_size', 'usage_quota']
update_list = []
update_list = [p[col] for col in column_list]
update_list.append(tenant_id)
update_list.append(p[auth_data_const.COL_DATASTORE])
logging.debug("set_datastore_access_privileges: update_list %s", update_list)
conn.execute(
"""
UPDATE OR IGNORE privileges SET
tenant_id = ?,
datastore = ?,
global_visibility = ?,
create_volume = ?,
delete_volume = ?,
mount_volume = ?,
max_volume_size = ?,
usage_quota = ?
WHERE tenant_id = ? AND datastore = ?
""",
update_list
)
conn.commit()
except sqlite3.Error as e:
logging.error("Error %s when setting datastore and privileges for tenant_id %s",
e, tenant_id)
return str(e)
return None
def remove_datastore_access_privileges(self, conn, datastore):
""" Remove privileges from privileges table for this tenant. """
tenant_id = self.id
try:
conn.execute(
"DELETE FROM privileges WHERE tenant_id = ? AND datastore = ?",
[tenant_id, datastore]
)
conn.commit()
except sqlite3.Error as e:
logging.error("Error %s when removing from privileges table with tenant_id%s and "
"datastore %s", e, tenant_id, datastore)
return str(e)
return None
class AuthorizationDataManager:
""" This class abstracts the creation, modification and retrieval of
authorization data used by vmdk_ops as well as the VMODL interface for
Docker volume management.
init arg:
The constructor of this class takes "db_path" as an argument.
"db_path" specifies the path of sqlite3 database
If caller does not pass the value of this argument, function "get_auth_db_path"
will be called to figure out the value
otherwise, the value passed by caller will be used
"""
def __init__(self, db_path=None):
if not db_path:
self.db_path = self.get_auth_db_path()
else:
self.db_path = db_path
self.conn = None
def __del__(self):
if self.conn:
self.conn.close()
def get_auth_db_path(self):
""" Return the path of authorization database.
DB tables should be stored in VSAN datastore
DB file should be stored under /vmfs/volume/VSAN_datastore/
See issue #618
Currently, it is hardcoded.
"""
return AUTH_DB_PATH
def connect(self):
""" Connect to a sqlite database file given by `db_path`.
Ensure foreign key
constraints are enabled on the database and set the return type for
select operations to dict-like 'Rows' instead of tuples.
Raises a ConnectionFailed exception when connect fails.
"""
need_create_table = False
if not os.path.isfile(self.db_path):
logging.debug("auth DB %s does not exist, try to create table", self.db_path)
need_create_table = True
self.conn = sqlite3.connect(self.db_path)
if not self.conn:
raise DbConnectionError(self.db_path)
# Return rows as Row instances instead of tuples
self.conn.row_factory = sqlite3.Row
if need_create_table:
self.create_tables()
def create_tables(self):
""" Create tables used for per-datastore authorization.
This function should only be called once per datastore.
It will raise an exception if the schema file isn't
accessible or the tables already exist.
"""
try:
self.conn.execute(
'''
PRAGMA foreign_key = ON;
'''
)
self.conn.execute(
'''
CREATE TABLE tenants(
-- uuid for the tenant, which is generated by create_tenant() API
id TEXT PRIMARY KEY NOT NULL,
-- name of the tenant, which is specified by user when creating the tenant
-- this field can be changed later by using set_name() API
name TEXT,
-- brief description of the tenant, which is specified by user when creating the tenant
-- this field can be changed laster by using set_description API
description TEXT,
-- not use currently
default_datastore TEXT
)
'''
)
self.conn.execute(
'''
CREATE TABLE vms(
-- uuid for the VM, which is generated when VM is created
-- this uuid will be passed in to executeRequest()
-- this field need to be specified when adding a VM to a tenant
vm_id TEXT PRIMARY KEY NOT NULL,
-- name of the VM, which is generated when VM is created
-- this field need to be specified when adding a VM to a tenant
vm_name TEXT,
-- id in tenants table
tenant_id TEXT NOT NULL,
FOREIGN KEY(tenant_id) REFERENCES tenants(id)
);
'''
)
self.conn.execute(
'''
CREATE TABLE privileges(
-- id in tenants table
tenant_id TEXT NOT NULL,
-- datastore name
datastore TEXT NOT NULL,
-- not use currently, will drop this field later
global_visibility INTEGER,
create_volume INTEGER,
delete_volume INTEGER,
mount_volume INTEGER,
-- The unit of "max_volume_size" is "MB"
max_volume_size INTEGER,
-- The unit of usage_quota is "MB"
usage_quota INTEGER,
PRIMARY KEY (tenant_id, datastore),
FOREIGN KEY(tenant_id) REFERENCES tenants(id)
);
'''
)
self.conn.execute(
'''
CREATE TABLE volumes (
-- id in tenants table
tenant_id TEXT NOT NULL,
-- datastore name
datastore TEXT NOT NULL,
volume_name TEXT,
-- The unit of "volume_size" is "MB"
volume_size INTEGER,
PRIMARY KEY(tenant_id, datastore, volume_name),
FOREIGN KEY(tenant_id) REFERENCES tenants(id)
);
'''
)
self.conn.commit()
except sqlite3.Error as e:
logging.error("Error %s when creating auth DB tables", e)
return str(e)
return None
def create_tenant(self, name, description, default_datastore, default_privileges,
vms, privileges):
""" Create a tenant in the database.
A tenant id will be auto-generated and returned.
vms are (vm_id, vm_name) pairs. Privileges are dictionaries
with keys matching the row names in the privileges table. Tenant id is
filled in for both the vm and privileges tables.
"""
logging.debug ("create_tenant name=%s", name)
if default_privileges:
if not all_columns_set(default_privileges):
error_info = "Not all columns are set in default_privileges"
return error_info, None
if privileges:
for p in privileges:
if not all_columns_set(p):
error_info = "Not all columns are set in privileges"
return error_info, None
# Create the entry in the tenants table
tenant = DockerVolumeTenant(name, description, default_datastore,
default_privileges, vms, privileges)
id = tenant.id
try:
self.conn.execute(
"INSERT INTO tenants(id, name, description, default_datastore) VALUES (?, ?, ?, ?)",
(id, name, description, default_datastore)
)
# Create the entries in the vms table
vms = [(vm_id, vm_name, id) for (vm_id, vm_name) in vms]
if vms:
self.conn.executemany(
"INSERT INTO vms(vm_id, vm_name, tenant_id) VALUES (?, ?, ?)",
vms
)
# Create the entries in the privileges table
if default_privileges:
default_privileges[auth_data_const.COL_TENANT_ID] = id
self.conn.execute(
"""
INSERT INTO privileges VALUES
(:tenant_id, :datastore, :global_visibility, :create_volume,
:delete_volume, :mount_volume, :max_volume_size, :usage_quota)
""",
default_privileges
)
if privileges:
for p in privileges:
p[auth_data_const.COL_TENANT_ID] = id
self.conn.executemany(
"""
INSERT INTO privileges VALUES
(:tenant_id, :datastore, :global_visibility, :create_volume,
:delete_volume, :mount_volume, :max_volume_size, :usage_quota)
""",
privileges
)
self.conn.commit()
except sqlite3.Error as e:
logging.error("Error %s when setting datastore and privileges for tenant_id %s",
e, tenant.id)
return str(e), tenant
return None, tenant
def get_tenant(self, tenant_name):
""" Return a DockerVolumeTenant object which match the given tenant_name """
logging.debug("get_tenant: tenant_name=%s", tenant_name)
tenant = None
try:
cur = self.conn.execute(
"SELECT * FROM tenants WHERE name = ?",
(tenant_name,)
)
result = cur.fetchall()
for r in result:
# loop through each tenant
id = r[auth_data_const.COL_ID]
name = r[auth_data_const.COL_NAME]
description = r[auth_data_const.COL_DESCRIPTION]
default_datastore = r[auth_data_const.COL_DEFAULT_DATASTORE]
logging.debug("id=%s name=%s description=%s default_datastore=%s",
id, name, description, default_datastore)
# search vms for this tenant
vms = []
cur = self.conn.execute(
"SELECT * FROM vms WHERE tenant_id = ?",
(id,)
)
vms = cur.fetchall()
logging.debug("vms=%s", vms)
# search privileges and default_privileges for this tenant
privileges = []
cur = self.conn.execute(
"SELECT * FROM privileges WHERE tenant_id = ? AND datastore != ?",
(id,default_datastore)
)
privileges = cur.fetchall()
logging.debug("privileges=%s", privileges)
default_privileges = []
cur = self.conn.execute(
"SELECT * FROM privileges WHERE tenant_id = ? AND datastore = ?",
(id,default_datastore)
)
default_privileges = cur.fetchall()
logging.debug("default_privileges=%s", default_privileges)
tenant = DockerVolumeTenant(name, description, default_datastore,
default_privileges, vms, privileges, id)
except sqlite3.Error as e:
logging.error("Error %s when get tenant %s", e, tenant_name)
return str(e), tenant
return None, tenant
def list_tenants(self):
""" Return a list of DockerVolumeTenants objects. """
tenant_list = []
try:
cur = self.conn.execute(
"SELECT * FROM tenants"
)
result = cur.fetchall()
for r in result:
# loop through each tenant
id = r[auth_data_const.COL_ID]
name = r[auth_data_const.COL_NAME]
description = r[auth_data_const.COL_DESCRIPTION]
default_datastore = r[auth_data_const.COL_DEFAULT_DATASTORE]
# search vms for this tenant
vms = []
cur = self.conn.execute(
"SELECT * FROM vms WHERE tenant_id = ?",
(id,)
)
vms = cur.fetchall()
# search privileges and default_privileges for this tenant
privileges = []
cur = self.conn.execute(
"SELECT * FROM privileges WHERE tenant_id = ? AND datastore != ?",
(id,default_datastore)
)
privileges = cur.fetchall()
default_privileges = []
cur = self.conn.execute(
"SELECT * FROM privileges WHERE tenant_id = ? AND datastore = ?",
(id,default_datastore)
)
default_privileges = cur.fetchall()
tenant = DockerVolumeTenant(name, description, default_datastore,
default_privileges, vms, privileges, id)
tenant_list.append(tenant)
except sqlite3.Error as e:
logging.error("Error %s when listing all tenants", e)
return str(e), tenant_list
return None, tenant_list
def remove_volumes_from_volume_table(self, tenant_id):
""" Remove all volumes from volumes table. """
try:
self.conn.execute(
"DELETE FROM volumes WHERE tenant_id = ?",
[tenant_id]
)
self.conn.commit()
except sqlite3.Error as e:
logging.error("Error %s when removing volumes from volumes table for tenant_id %s",
e, tenant_id)
return str(e)
return None
def _remove_volumes_for_tenant(self, tenant_id):
""" Delete all volumes belongs to this tenant.
Do not use it outside of removing a tenant.
"""
try:
cur = self.conn.execute(
"SELECT name FROM tenants WHERE id = ?",
(tenant_id,)
)
result = cur.fetchone()
except sqlite3.Error as e:
logging.error("Error %s when querying from tenants table", e)
return str(e)
error_info = ""
if result:
logging.debug("remove_volumes_for_tenant: %s %s", tenant_id, result)
tenant_name = result[0]
vmdks = vmdk_utils.get_volumes(tenant_name)
# Delete all volumes for this tenant.
dir_paths = set()
for vmdk in vmdks:
vmdk_path = os.path.join(vmdk['path'], "{0}".format(vmdk['filename']))
dir_paths.add(vmdk['path'])
logging.debug("path=%s filename=%s", vmdk['path'], vmdk['filename'])
logging.debug("Deleting volume path%s", vmdk_path)
err = vmdk_ops.removeVMDK(vmdk_path)
if err:
logging.error("remove vmdk %s failed with error %s", vmdk_path, err)
error_info += err
# Delete path /vmfs/volumes/datastore_name/tenant_name
logging.debug("Deleting dir paths %s", dir_paths)
for path in list(dir_paths):
try:
os.rmdir(path)
except os.error as e:
msg = "remove dir {0} failed with error {1}".format(path, e)
logging.error(msg)
error_info += msg
err = self.remove_volumes_from_volume_table(tenant_id)
if err:
logging.error("Failed to remove volumes from database %s", err)
error_info += err
if error_info:
return error_info
return None
def remove_tenant(self, tenant_id, remove_volumes):
""" Remove a tenant with given id.
A row with given tenant_id will be removed from table tenants, vms,
and privileges.
All the volumes created by this tenant will be removed if remove_volumes
is set to True.
"""
logging.debug("remove_tenant: tenant_id%s, remove_volumes=%d", tenant_id, remove_volumes)
if remove_volumes:
error_info = self._remove_volumes_for_tenant(tenant_id)
if error_info:
return error_info
try:
self.conn.execute(
"DELETE FROM vms WHERE tenant_id = ?",
[tenant_id]
)
self.conn.execute(
"DELETE FROM privileges WHERE tenant_id = ?",
[tenant_id]
)
self.conn.execute(
"DELETE FROM tenants WHERE id = ?",
[tenant_id]
)
self.conn.commit()
except sqlite3.Error as e:
logging.error("Error %s when removing tables", e)
return str(e)
return None
| BaluDontu/docker-volume-vsphere | esx_service/utils/auth_data.py | Python | apache-2.0 | 26,778 |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = ""
services_str = ""
pkg_name = "tsim"
dependencies_str = "std_msgs;geometry_msgs"
langs = "gencpp;genlisp;genpy"
dep_include_paths_str = "std_msgs;/opt/ros/groovy/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/groovy/share/geometry_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
| rafafigueroa/cws | build/tsim/cmake/tsim-genmsg-context.py | Python | apache-2.0 | 404 |
"""mischia URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^scrum/', include('scrum.urls')),
url(r'^admin/', include(admin.site.urls)),
]
| l-dfa/Mischia | mischia/urls.py | Python | gpl-2.0 | 803 |
import numpy as np
from scipy import optimize
from matplotlib import pyplot as plt, cm, colors
from math import *
def calc_R(x,y, xc, yc):
""" calculate the distance of each 2D points from the center (xc, yc) """
return np.sqrt((x-xc)**2 + (y-yc)**2)
def f(c, x, y):
""" calculate the algebraic distance between the data points and the mean circle centered at c=(xc, yc) """
Ri = calc_R(x, y, *c)
return Ri - Ri.mean()
def leastsq_circle(x,y):
# coordinates of the barycenter
x_m = np.mean(x)
y_m = np.mean(y)
center_estimate = x_m, y_m
center, ier = optimize.leastsq(f, center_estimate, args=(x,y))
xc, yc = center
Ri = calc_R(x, y, *center)
R = Ri.mean()
residu = np.sum((Ri - R)**2)
return xc, yc, R, residu
def plot_data_circle(x,y, xc, yc, R):
f = plt.figure( facecolor='white') #figsize=(7, 5.4), dpi=72,
plt.axis('equal')
theta_fit = np.linspace(-pi, pi, 180)
x_fit = xc + R*np.cos(theta_fit)
y_fit = yc + R*np.sin(theta_fit)
plt.plot(x_fit, y_fit, 'b-' , label="fitted circle", lw=2)
plt.plot([xc], [yc], 'bD', mec='y', mew=1)
plt.xlabel('x')
plt.ylabel('y')
# plot data
plt.plot(x, y, 'r-.', label='data', mew=1)
plt.legend(loc='best',labelspacing=0.1 )
plt.grid()
plt.title('Least Squares Circle') | thomasvdv/flightbit | analysis/least_square_circle.py | Python | gpl-2.0 | 1,345 |
# Copyright (c) 2010 Witchspace <witchspace81@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Exception definitions.
"""
class BitcoinException(Exception):
"""
Base class for exceptions received from Bitcoin server.
- *code* -- Error code from ``bitcoind``.
"""
# Standard JSON-RPC 2.0 errors
INVALID_REQUEST = -32600,
METHOD_NOT_FOUND = -32601,
INVALID_PARAMS = -32602,
INTERNAL_ERROR = -32603,
PARSE_ERROR = -32700,
# General application defined errors
MISC_ERROR = -1 # std::exception thrown in command handling
FORBIDDEN_BY_SAFE_MODE = -2 # Server is in safe mode, and command is not allowed in safe mode
TYPE_ERROR = -3 # Unexpected type was passed as parameter
INVALID_ADDRESS_OR_KEY = -5 # Invalid address or key
OUT_OF_MEMORY = -7 # Ran out of memory during operation
INVALID_PARAMETER = -8 # Invalid, missing or duplicate parameter
DATABASE_ERROR = -20 # Database error
DESERIALIZATION_ERROR = -22 # Error parsing or validating structure in raw format
# P2P client errors
CLIENT_NOT_CONNECTED = -9 # Bitcoin is not connected
CLIENT_IN_INITIAL_DOWNLOAD = -10 # Still downloading initial blocks
# Wallet errors
WALLET_ERROR = -4 # Unspecified problem with wallet (key not found etc.)
WALLET_INSUFFICIENT_FUNDS = -6 # Not enough funds in wallet or account
WALLET_INVALID_ACCOUNT_NAME = -11 # Invalid account name
WALLET_KEYPOOL_RAN_OUT = -12 # Keypool ran out, call keypoolrefill first
WALLET_UNLOCK_NEEDED = -13 # Enter the wallet passphrase with walletpassphrase first
WALLET_PASSPHRASE_INCORRECT = -14 # The wallet passphrase entered was incorrect
WALLET_WRONG_ENC_STATE = -15 # Command given in wrong wallet encryption state (encrypting an encrypted wallet etc.)
WALLET_ENCRYPTION_FAILED = -16 # Failed to encrypt the wallet
WALLET_ALREADY_UNLOCKED = -17 # Wallet is already unlocked
def __init__(self, error):
Exception.__init__(self, error['message'])
self.code = error['code']
class TransportException(Exception):
"""
Class to define transport-level failures.
"""
def __init__(self, msg, code=None, protocol=None, raw_detail=None):
self.msg = msg
self.code = code
self.protocol = protocol
self.raw_detail = raw_detail
self.s = """
Transport-level failure: {msg}
Code: {code}
Protocol: {protocol}
""".format(msg=msg, code=code, protocol=protocol)
def __str__(self):
return self.s
##### General application defined errors
class SafeMode(BitcoinException):
"""
Operation denied in safe mode (run ``bitcoind`` with ``-disablesafemode``).
"""
class JSONTypeError(BitcoinException):
"""
Unexpected type was passed as parameter
"""
InvalidAmount = JSONTypeError # Backwards compatibility
class InvalidAddressOrKey(BitcoinException):
"""
Invalid address or key.
"""
InvalidTransactionID = InvalidAddressOrKey # Backwards compatibility
class OutOfMemory(BitcoinException):
"""
Out of memory during operation.
"""
class InvalidParameter(BitcoinException):
"""
Invalid parameter provided to RPC call.
"""
##### Client errors
class ClientException(BitcoinException):
"""
P2P network error.
This exception is never raised but functions as a superclass
for other P2P client exceptions.
"""
class NotConnected(ClientException):
"""
Not connected to any peers.
"""
class DownloadingBlocks(ClientException):
"""
Client is still downloading blocks.
"""
##### Wallet errors
class WalletError(BitcoinException):
"""
Unspecified problem with wallet (key not found etc.)
"""
SendError = WalletError # Backwards compatibility
class InsufficientFunds(WalletError):
"""
Insufficient funds to complete transaction in wallet or account
"""
class InvalidAccountName(WalletError):
"""
Invalid account name
"""
class KeypoolRanOut(WalletError):
"""
Keypool ran out, call keypoolrefill first
"""
class WalletUnlockNeeded(WalletError):
"""
Enter the wallet passphrase with walletpassphrase first
"""
class WalletPassphraseIncorrect(WalletError):
"""
The wallet passphrase entered was incorrect
"""
class WalletWrongEncState(WalletError):
"""
Command given in wrong wallet encryption state (encrypting an encrypted wallet etc.)
"""
class WalletEncryptionFailed(WalletError):
"""
Failed to encrypt the wallet
"""
class WalletAlreadyUnlocked(WalletError):
"""
Wallet is already unlocked
"""
# For convenience, we define more specific exception classes
# for the more common errors.
_exception_map = {
BitcoinException.FORBIDDEN_BY_SAFE_MODE: SafeMode,
BitcoinException.TYPE_ERROR: JSONTypeError,
BitcoinException.WALLET_ERROR: WalletError,
BitcoinException.INVALID_ADDRESS_OR_KEY: InvalidAddressOrKey,
BitcoinException.WALLET_INSUFFICIENT_FUNDS: InsufficientFunds,
BitcoinException.OUT_OF_MEMORY: OutOfMemory,
BitcoinException.INVALID_PARAMETER: InvalidParameter,
BitcoinException.CLIENT_NOT_CONNECTED: NotConnected,
BitcoinException.CLIENT_IN_INITIAL_DOWNLOAD: DownloadingBlocks,
BitcoinException.WALLET_INSUFFICIENT_FUNDS: InsufficientFunds,
BitcoinException.WALLET_INVALID_ACCOUNT_NAME: InvalidAccountName,
BitcoinException.WALLET_KEYPOOL_RAN_OUT: KeypoolRanOut,
BitcoinException.WALLET_UNLOCK_NEEDED: WalletUnlockNeeded,
BitcoinException.WALLET_PASSPHRASE_INCORRECT: WalletPassphraseIncorrect,
BitcoinException.WALLET_WRONG_ENC_STATE: WalletWrongEncState,
BitcoinException.WALLET_ENCRYPTION_FAILED: WalletEncryptionFailed,
BitcoinException.WALLET_ALREADY_UNLOCKED: WalletAlreadyUnlocked,
}
def wrap_exception(error):
"""
Convert a JSON error object to a more specific Bitcoin exception.
"""
# work around to temporarily fix https://github.com/bitcoin/bitcoin/issues/3007
if error['code'] == BitcoinException.WALLET_ERROR and error['message'] == 'Insufficient funds':
error['code'] = BitcoinException.WALLET_INSUFFICIENT_FUNDS
return _exception_map.get(error['code'], BitcoinException)(error)
| XertroV/bitcoin-python3 | src/bitcoinrpc/exceptions.py | Python | mit | 7,464 |
#!/usr/local/django-oscar/oscar/bin/python2.7
#
# The Python Imaging Library.
# $Id$
#
# convert image files
#
# History:
# 0.1 96-04-20 fl Created
# 0.2 96-10-04 fl Use draft mode when converting images
# 0.3 96-12-30 fl Optimize output (PNG, JPEG)
# 0.4 97-01-18 fl Made optimize an option (PNG, JPEG)
# 0.5 98-12-30 fl Fixed -f option (from Anthony Baxter)
#
from __future__ import print_function
import getopt
import string
import sys
from PIL import Image
def usage():
print("PIL Convert 0.5/1998-12-30 -- convert image files")
print("Usage: pilconvert [option] infile outfile")
print()
print("Options:")
print()
print(" -c <format> convert to format (default is given by extension)")
print()
print(" -g convert to greyscale")
print(" -p convert to palette image (using standard palette)")
print(" -r convert to rgb")
print()
print(" -o optimize output (trade speed for size)")
print(" -q <value> set compression quality (0-100, JPEG only)")
print()
print(" -f list supported file formats")
sys.exit(1)
if len(sys.argv) == 1:
usage()
try:
opt, argv = getopt.getopt(sys.argv[1:], "c:dfgopq:r")
except getopt.error as v:
print(v)
sys.exit(1)
output_format = None
convert = None
options = {}
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats (* indicates output format):")
for i in id:
if i in Image.SAVE:
print(i+"*", end=' ')
else:
print(i, end=' ')
sys.exit(1)
elif o == "-c":
output_format = a
if o == "-g":
convert = "L"
elif o == "-p":
convert = "P"
elif o == "-r":
convert = "RGB"
elif o == "-o":
options["optimize"] = 1
elif o == "-q":
options["quality"] = string.atoi(a)
if len(argv) != 2:
usage()
try:
im = Image.open(argv[0])
if convert and im.mode != convert:
im.draft(convert, im.size)
im = im.convert(convert)
if output_format:
im.save(argv[1], output_format, **options)
else:
im.save(argv[1], **options)
except:
print("cannot convert image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| vicky2135/lucious | oscar/bin/pilconvert.py | Python | bsd-3-clause | 2,385 |
"""
------------------------------------------------------------------------------
The MIT License (MIT)
Copyright (c) 2016 Newcastle University
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.import time
------------------------------------------------------------------------------
Author
Nick Cook, School of Computing Science, Newcastle University
------------------------------------------------------------------------------
"""
import array
import doctest
import random
import unittest
from microbit_stub import *
from microbit_stub import Button, Pin, STATE_FILE_DEFAULT, State
def init(full_init):
print()
if full_init:
state.power_off()
reset()
""" ---------------------------------------------------------------------- """
""" state tests -----------------===-------------------------------------- """
class TestState(unittest.TestCase):
keys = [key for key in state._State__data.keys()
if key != State._State__STATE_FILE_KEY]
def assert_state(self, value, power):
for key in self.keys:
if key != 'power':
self.assertEqual(state._State__data[key], value)
else:
self.assertEqual(state._State__data[key], power)
def setUp(self):
init(True)
def test_get_set(self):
state.power_off()
self.assert_state(0, 0)
for key in self.keys:
state.set(key, 1)
self.assertEqual(state.get(key), 1)
self.assertEqual(state.get('unknown'), 0)
state.set('unknown', 1)
self.assertEqual(state.get('unknown'), 0)
limited_keys = [ key for key in self.keys
if key not in State._State__ACCELEROMETER_KEYS
and key not in State._State__PRESSES_KEYS]
for key in limited_keys:
with self.assertRaises(ValueError):
state.set(key, -1)
with self.assertRaises(ValueError):
state.set(key, 1024)
state.set(key, 1023)
self.assertEqual(state.get(key), 1023)
for key in State._State__PRESSES_KEYS:
with self.assertRaises(ValueError):
state.set(key, -1)
state.set(key, 1023)
self.assertEqual(state.get(key), 1023)
state.set(key, 1024)
self.assertEqual(state.get(key), 1024)
state.set(key, 10240)
self.assertEqual(state.get(key), 10240)
for key in State._State__ACCELEROMETER_KEYS:
state.set(key, -1)
self.assertEqual(state.get(key), -1)
state.set(key, -1023)
self.assertEqual(state.get(key), -1023)
state.set(key, -1024)
self.assertEqual(state.get(key), -1024)
state.set(key, -10240)
self.assertEqual(state.get(key), -10240)
state.set(key, 1023)
self.assertEqual(state.get(key), 1023)
state.set(key, 1024)
self.assertEqual(state.get(key), 1024)
state.set(key, 10240)
self.assertEqual(state.get(key), 10240)
state.power_off()
state.reset()
def test_state_file(self):
# test with file and without ('')
self.assert_state(0, 1)
for key in self.keys:
state.set(key, 1)
self.assert_state(1, 1)
state.set(State._State__STATE_FILE_KEY, '')
self.assertEqual(state.get(State._State__STATE_FILE_KEY), '')
state.reset()
self.assert_state(0, 1)
state._State__data[State._State__STATE_FILE_KEY] = STATE_FILE_DEFAULT
state.load()
self.assert_state(1, 1)
state.reset()
self.assert_state(0, 1)
def test_press(self):
for button in [button_a, button_b]:
state.press(button.name)
self.assertTrue(button.is_pressed())
self.assertEqual(button.get_presses(), 1)
button_c = Button('button_c')
state.press('button_c')
self.assertFalse(button_c.is_pressed())
self.assertEqual(button_c.get_presses(), 0)
def test_release(self):
for button in [button_a, button_b]:
state.press(button.name)
self.assertTrue(button.is_pressed())
self.assertEqual(button_a.get_presses(), 1)
state.release(button.name.upper())
self.assertFalse(button.is_pressed())
self.assertEqual(button_a.get_presses(), 1)
def test_press_and_release(self):
for button in [button_a, button_b]:
state.press_and_release(button.name)
self.assertFalse(button.is_pressed())
self.assertEqual(button_a.get_presses(), 1)
def test_power(self):
self.assertTrue(state.is_on())
state.power_off()
self.assertFalse(state.is_on())
state.power_on()
self.assertTrue(state.is_on())
def test_load(self):
state.load()
self.assert_state(0, 1)
for key in self.keys:
state._State__data[key] = state._State__data[key] + 1
self.assert_state(1, 2)
state.load()
self.assert_state(0, 1)
def test_dump(self):
self.assert_state(0, 1)
for key in self.keys:
state._State__data[key] = state._State__data[key] + 1
self.assert_state(1, 2)
state.dump()
state.load()
self.assert_state(1, 2)
def test_reset(self):
for key in self.keys:
state.set(key, 1)
self.assertEqual(state.get(key), 1)
state.reset()
self.assert_state(0, 1)
self.assertEqual(state.get('state_file'), STATE_FILE_DEFAULT)
""" ---------------------------------------------------------------------- """
""" reset test ----------------------------------------------------------- """
class TestReset(unittest.TestCase):
def setUp(self):
init(True)
def test_reset(self):
# power off statements below are to prevent display output
# which is tested elsewhere
state_check = TestState()
state_check.assert_state(0, 1)
for key in state_check.keys:
state.set(key, 1)
state_check.assert_state(1, 1)
state.power_off()
state_check.assert_state(1, 0)
reset()
state_check.assert_state(0, 1)
self.assertEqual(button_a.get_presses(), 0)
self.assertEqual(button_b.get_presses(), 0)
for button in [button_a, button_b]:
for i in range(4):
state.press(button.name)
self.assertEqual(button.get_presses(), i + 1)
state.power_off()
reset()
self.assertEqual(button_a.get_presses(), 0)
self.assertEqual(button_b.get_presses(), 0)
state.power_off()
display.show(Image.HAPPY)
self.assertEqual(display.image, Image.HAPPY)
reset()
self.assertEqual(display.image, Image())
""" ---------------------------------------------------------------------- """
""" running_time test ---------------------------------------------------- """
class TestRunningTime(unittest.TestCase):
def setUp(self):
init(True)
def test_running_time(self):
print('... running time test - be patient! ...')
state.power_off()
prev = state._State__get_runtime()
self.assertEqual(prev, 0)
delay = 10
for i in range(1, 1000):
sleep(delay)
curr = running_time()
diff = curr - prev
self.assertTrue(diff > delay)
self.assertTrue(diff <= State._State__RUNTIME_MAX_INCR + delay)
prev = curr
state.power_off()
prev = state._State__get_runtime()
self.assertEqual(prev, 0)
curr = running_time()
diff = curr - prev
self.assertTrue(diff > delay)
self.assertTrue(diff <= State._State__RUNTIME_MAX_INCR + delay)
""" ---------------------------------------------------------------------- """
""" panic test ----------------------------------------------------------- """
class TestPanic(unittest.TestCase):
def setUp(self):
init(True)
def test_panic(self):
state.power_off()
panic(0)
self.assertEqual(display.image, Image.SAD)
panic(100)
self.assertEqual(display.image, Image.SAD)
panic(-100)
self.assertEqual(display.image, Image.SAD)
with self.assertRaises(TypeError):
panic('a')
""" ---------------------------------------------------------------------- """
""" button tests --------------------------------------------------------- """
class TestButton(unittest.TestCase):
def setUp(self):
init(True)
def test_is_pressed(self):
for button in [button_a, button_a]:
self.assertFalse(button.is_pressed())
state.press(button.name)
self.assertTrue(button.is_pressed())
state.release(button.name)
self.assertFalse(button.is_pressed())
def test_was_pressed(self):
for button in [button_a, button_a]:
self.assertFalse(button.was_pressed())
state.press(button.name)
self.assertTrue(button.was_pressed())
self.assertFalse(button.was_pressed())
state.release(button.name)
self.assertFalse(button.was_pressed())
button.reset_presses()
self.assertFalse(button.was_pressed())
def test_get_presses(self):
for button in [button_a, button_b]:
self.assertEqual(button.get_presses(), 0)
for i in range(4):
state.press(button.name)
self.assertEqual(button.get_presses(), 4)
def test_reset_presses(self):
for button in [button_a, button_b]:
for i in range(4):
state.press(button.name)
self.assertTrue(button.was_pressed())
self.assertFalse(button.was_pressed())
self.assertEqual(button.get_presses(), 4)
button.reset_presses()
self.assertEqual(button.get_presses(), 0)
self.assertFalse(button.was_pressed())
""" ---------------------------------------------------------------------- """
""" image tests --------------------------------------------------------- """
class TestImage(unittest.TestCase):
def setUp(self):
init(False)
def checksum(self, image, **kwargs):
sum = 0
if 'pixels' in kwargs:
pixels = kwargs['pixels']
for pix in pixels:
self.assertEqual(image.get_pixel(pix[0], pix[1]), pix[2])
sum = sum + pix[2]
else:
sum = kwargs['sum']
for y in range(image.height()):
for x in range(image.width()):
sum = sum - image.get_pixel(x, y)
self.assertEqual(sum, 0)
def test_init_default(self):
image = Image()
self.assertEqual(image, Image('00000:00000:00000:00000:00000:'), 0)
self.checksum(image, sum=0)
def test_init_fromstring(self):
image = Image('90009:09090:00900:09090:90009')
self.checksum(image,
pixels=[(0,0,9),(4,0,9),(1,1,9),(3,1,9),(2,2,9),(1,3,9),(3,3,9),(0,4,9),(4,4,9)])
image = Image('')
self.assertEqual(image.width(), 0)
self.assertEqual(image.height(), 0)
self.checksum(image, sum=0)
image = Image('123')
self.checksum(image, pixels=[(0,0,1),(1,0,2),(2,0,3)])
image = Image(':')
self.assertEqual(image, Image())
image = Image(':123:')
self.assertEqual(image, Image('000:123:'))
self.checksum(image, pixels=[(0,1,1),(1,1,2),(2,1,3)])
image = Image(':123')
self.assertEqual(image, Image('000:123:'))
self.checksum(image, pixels=[(0,1,1),(1,1,2),(2,1,3)])
with self.assertRaises(TypeError):
image = Image(None)
with self.assertRaises(ValueError):
image = Image('rubbish')
def test_init_frombuffer(self):
image = Image(3, 2, array.array('B', [0,0,0,1,1,1]))
self.assertEqual(image, Image('000:111:'))
self.checksum(image, pixels=[(0,1,1),(1,1,1),(2,1,1)])
image = Image(0, 0, array.array('B', []))
self.assertEqual(image, Image(''))
self.checksum(image, sum=0)
with self.assertRaises(TypeError):
image = Image(6, 6, None)
with self.assertRaises(ValueError):
image = Image(2, 2, array.array('B', [0, 0]))
with self.assertRaises(ValueError):
image = Image(2, 2, array.array('B', [0, 0, 0, 0, 0, 0]))
image = Image(2, 2, array.array('B', [0, 0, 0, 0]))
self.assertEqual(image, Image('00:00:'))
self.checksum(image, sum=0)
def test_init_fromsize(self):
image = Image(4, 5)
self.assertEqual(image, Image('0000:0000:0000:0000:0000:'))
self.checksum(image, sum=0)
image = Image(0, 0)
self.assertEqual(image, Image(''))
self.checksum(image, sum=0)
with self.assertRaises(ValueError):
image = Image(-1, -1)
def test_width(self):
image = Image(3, 2)
self.assertEqual(image.width(), 3)
image = Image('000:000:0000:00000:')
self.assertEqual(image.width(), 5)
image = Image('')
self.assertEqual(image.width(), 0)
def test_height(self):
image = Image(3, 2)
self.assertEqual(image.height(), 2)
image = Image('000:000:0000:00000:')
self.assertEqual(image.height(), 4)
image = Image('')
self.assertEqual(image.height(), 0)
def test_set_pixel(self):
image = Image()
for y in range(5):
for x in range(5):
image.set_pixel(x, y, 1)
self.assertEqual(image.get_pixel(x, y), 1)
self.assertEqual(image, Image('11111:11111:11111:11111:11111:'))
self.checksum(image, sum=25)
with self.assertRaises(ValueError):
image.set_pixel(0, 0, -1)
with self.assertRaises(ValueError):
image.set_pixel(0, 0, 10)
with self.assertRaises(IndexError):
image.set_pixel(5, 5, 1)
with self.assertRaises(IndexError):
image = Image('')
image.set_pixel(0, 0, 1)
def test_get_pixel(self):
image = Image()
for y in range(5):
for x in range(5):
self.assertEqual(image.get_pixel(x, y), 0)
image.set_pixel(x, y, 1)
self.assertEqual(image.get_pixel(x, y), 1)
def checkshift(self, shift, pos_images, neg_images):
for i in range(len(pos_images)):
self.assertEqual(shift(i), pos_images[i])
for i in range(len(neg_images)):
self.assertEqual(shift(-i), neg_images[i])
def test_shifts(self):
image = Image()
for shift in [image.shift_left, image.shift_right,
image.shift_up, image.shift_down]:
self.assertEqual(shift(1), Image('00000:00000:00000:00000:00000:'))
image = Image('')
for shift in [image.shift_left, image.shift_right,
image.shift_up, image.shift_down]:
self.assertEqual(shift(1), Image(''))
image = Image('10000:01000:00100:00010:00001:')
left_down_images = [image,
Image('00000:10000:01000:00100:00010:'),
Image('00000:00000:10000:01000:00100:'),
Image('00000:00000:00000:10000:01000:'),
Image('00000:00000:00000:00000:10000:'),
Image('00000:00000:00000:00000:00000:'),
Image('00000:00000:00000:00000:00000:'),
]
right_up_images = [image,
Image('01000:00100:00010:00001:00000:'),
Image('00100:00010:00001:00000:00000:'),
Image('00010:00001:00000:00000:00000:'),
Image('00001:00000:00000:00000:00000:'),
Image('00000:00000:00000:00000:00000:'),
Image('00000:00000:00000:00000:00000:'),
]
self.checkshift(image.shift_left, left_down_images, right_up_images)
self.checkshift(image.shift_right, right_up_images, left_down_images)
self.checkshift(image.shift_up, right_up_images, left_down_images)
self.checkshift(image.shift_down, left_down_images, right_up_images)
def test_repr(self):
for image in [Image(), Image(''), Image('10000:01000:00100:00010:00001:')]:
self.assertEqual(image, eval(repr(image)))
def test_str(self):
image_str = '11111\n22222\n33333\n44444\n55555'
image = Image(image_str.replace('\n', ':'))
image_str = image_str.replace('\n', '|\n|')
image_str = '-------\n|' + image_str + '|\n-------'
self.assertEqual(image_str, str(image))
for i in [1, 2, 3, 4, 5]:
image_str = image_str.replace(str(i), ' ')
self.assertEqual(image_str, str(Image()))
image_str = '-------\n' + '| |\n'* 5 + '-------'
self.assertEqual(image_str, str(Image('')))
def test_add(self):
image1 = Image('11111:00000:11111:00000:11111:')
image2 = image1 + Image('00000:11111:00000:11111:00000:')
self.assertEqual(image1, Image('11111:00000:11111:00000:11111:'))
self.assertEqual(image2, Image('11111:11111:11111:11111:11111:'))
self.assertEqual(image2 + image2, Image('22222:22222:22222:22222:22222:'))
self.assertEqual(image2, Image('11111:11111:11111:11111:11111:'))
self.assertEqual(Image('000:111:222:') + Image('999:888:777'),
Image('999:999:999'))
self.assertEqual(Image('999:999:999:') + Image('111:111:111'),
Image('999:999:999'))
with self.assertRaises(ValueError):
image = Image('111:222:') + Image('22:33:44:')
def test_mul(self):
image = Image()
self.assertEqual(image * 0, image)
self.assertEqual(image * 1, image)
self.assertEqual(image * 2, image)
image = Image('')
self.assertEqual(image * 0, image)
self.assertEqual(image * 1, image)
self.assertEqual(image * 2, image)
image = Image('11111:22222:33333:44444:55555:')
self.assertEqual(image * 0, Image())
self.assertEqual(image * 1, image)
self.assertEqual(image * 2, Image('22222:44444:66666:88888:99999:'))
self.assertEqual(image * 3, Image('33333:66666:99999:99999:99999:'))
self.assertEqual(image * 4, Image('44444:88888:99999:99999:99999:'))
self.assertEqual(image * 5, Image('55555:99999:99999:99999:99999:'))
self.assertEqual(image * 6, Image('66666:99999:99999:99999:99999:'))
self.assertEqual(image * 7, Image('77777:99999:99999:99999:99999:'))
self.assertEqual(image * 8, Image('88888:99999:99999:99999:99999:'))
self.assertEqual(image * 9, Image('99999:99999:99999:99999:99999:'))
self.assertEqual(image * 10, Image('99999:99999:99999:99999:99999:'))
with self.assertRaises(ValueError):
image = image * -1
""" ---------------------------------------------------------------------- """
""" display tests -------------------------------------------------------- """
class TestDisplay(unittest.TestCase):
__DOCTEST_FILE = 'test_microbit_stub_display.txt'
def setUp(self):
init(True)
def test__init__(self):
self.assertEqual(display.image, Image())
def test_get_pixel(self):
for y in range(display.image.height()):
for x in range(display.image.width()):
self.assertEqual(display.image.get_pixel(x, y), 0)
display.image.set_pixel(x, y, x)
self.assertEqual(display.image.get_pixel(x, y), x)
def test_set_pixel(self):
for y in range(display.image.height()):
for x in range(display.image.width()):
display.image.set_pixel(x, y, x+y)
self.assertEqual(display.image.get_pixel(x, y), x+y)
with self.assertRaises(ValueError):
display.image.set_pixel(0, 0, -1)
with self.assertRaises(ValueError):
display.image.set_pixel(0, 0, 10)
def test_clear(self):
state.power_off()
self.assertEqual(display.image, Image())
display.clear()
self.assertEqual(display.image, Image())
for y in range(display.image.height()):
for x in range(display.image.width()):
display.image.set_pixel(x, y, x+y)
self.assertEqual(display.image, Image('01234:12345:23456:34567:45678:'))
display.clear()
self.assertEqual(display.image, Image())
def test_show(self):
state.power_off()
display.show(Image('11111:11111:11111:11111:11111:'), clear=False)
self.assertEqual(display.image, Image('11111:11111:11111:11111:11111:'))
display.show(Image('11111:11111:11111:11111:11111:'), clear=True)
self.assertEqual(display.image, Image())
display.show([])
self.assertEqual(display.image, Image())
display.show('')
self.assertEqual(display.image, Image())
with self.assertRaises(TypeError):
display.show(None)
def test_doctest_show_scroll(self):
print('... display show and scroll doctests - be patient! ...')
doctest.testfile(TestDisplay.__DOCTEST_FILE)
def test_scroll(self):
state.power_off()
display.show('notcleared')
self.assertEqual(display.image, Image.CHARACTER_MAP['d'])
display.scroll('cleared')
self.assertEqual(display.image, Image())
""" ---------------------------------------------------------------------- """
""" pin tests ------------------------------------------------------------ """
class TestPin(unittest.TestCase):
__pins = [pin0, pin1, pin2, pin3, pin4, pin5, pin6, pin7, pin8, pin9, pin10,
pin11, pin12, pin13, pin14, pin15, pin16, pin19, pin20]
def setUp(self):
init(True)
def test_read_write_digital(self):
for pin in TestPin.__pins:
self.assertEqual(pin.read_digital(), 0)
pin.write_digital(1)
self.assertEqual(pin.read_digital(), 1)
pin.write_digital(0)
self.assertEqual(pin.read_digital(), 0)
pin.write_digital(True)
self.assertEqual(pin.read_digital(), 1)
pin.write_digital(False)
self.assertEqual(pin.read_digital(), 0)
with self.assertRaises(ValueError):
pin.write_digital(-1)
with self.assertRaises(ValueError):
pin.write_digital(2)
with self.assertRaises(TypeError):
pin.write_digital('a')
# unknown pin
pin17 = Pin('pin17')
pin17.write_digital(1)
self.assertEqual(pin17.read_digital(), 0)
def test_read_write_analog(self):
for pin in TestPin.__pins:
self.assertEqual(pin.read_analog(), 0)
pin.write_analog(1)
self.assertEqual(pin.read_analog(), 1)
pin.write_analog(0)
self.assertEqual(pin.read_analog(), 0)
pin.write_analog(1023)
self.assertEqual(pin.read_analog(), 1023)
pin.write_analog(1.5)
self.assertEqual(pin.read_analog(), 1)
pin.write_analog(0.5)
self.assertEqual(pin.read_analog(), 0)
with self.assertRaises(ValueError):
pin.write_analog(-1)
with self.assertRaises(ValueError):
pin.write_analog(1024)
with self.assertRaises(TypeError):
pin.write_analog('a')
# unknown pin
pin17 = Pin('pin17')
pin17.write_analog(1)
self.assertEqual(pin17.read_analog(), 0)
def test_is_touched(self):
for pin in TestPin.__pins:
self.assertFalse(pin.is_touched())
pin.write_digital(1)
self.assertTrue(pin.is_touched())
pin.write_digital(0)
self.assertFalse(pin.is_touched())
def test_set_analog_period(self):
pin0.set_analog_period(0) # nothing to do
def test_set_analog_period_microseconds(self):
pin0.set_analog_period_microseconds(0) # nothing to do
""" ---------------------------------------------------------------------- """
""" accelerometer tests -------------------------------------------------- """
class TestAccelerometer(unittest.TestCase):
__axes = { 'accelerometer_x':accelerometer.get_x,
'accelerometer_y':accelerometer.get_y,
'accelerometer_z':accelerometer.get_z,
}
def setUp(self):
init(True)
def axisTest(self, axis):
self.assertEqual(TestAccelerometer.__axes[axis](), 0)
val = random.randint(-1000, 1000)
state.set(axis, val)
self.assertEqual(TestAccelerometer.__axes[axis](), val)
state.set(axis, 0)
self.assertEqual(TestAccelerometer.__axes[axis](), 0)
def test_get_x(self):
self.axisTest('accelerometer_x')
def test_get_y(self):
self.axisTest('accelerometer_y')
def test_get_z(self):
self.axisTest('accelerometer_z')
def test_current_gesture(self):
self.assertIn(accelerometer.current_gesture(), accelerometer.gestures)
def test_get_gestures(self):
for g in accelerometer.get_gestures():
self.assertIn(g, Accelerometer.gestures)
self.assertNotIn('unknown', Accelerometer.gestures)
def test_is_gesture(self):
self.assertFalse(accelerometer.is_gesture('unknown'))
def test_was_gesture(self):
self.assertFalse(accelerometer.was_gesture('unknown'))
def test_reset_gestures(self):
accelerometer.reset_gestures()
""" ---------------------------------------------------------------------- """
""" compass tests -------------------------------------------------------- """
class TestCompass(unittest.TestCase):
def setUp(self):
init(False)
def test_calibration(self):
self.assertFalse(compass.is_calibrated())
compass.calibrate()
self.assertTrue(compass.is_calibrated())
compass.clear_calibration()
self.assertFalse(compass.is_calibrated())
def test_heading(self):
for i in range(1000):
h = compass.heading()
self.assertTrue(h > -1 and h < 361)
def test_get_field_strength(self):
for i in range(1000):
fs = compass.get_field_strength()
self.assertTrue(fs > -1001 and fs < 1001)
""" ---------------------------------------------------------------------- """
""" I2C tests ------------------------------------------------------------ """
class TestI2C(unittest.TestCase):
"""I2C is not emulated - tests just pass.
"""
def setUp(self):
init(False)
def test_read(self):
pass
def test_write(self):
pass
""" ---------------------------------------------------------------------- """
""" UART tests ----------------------------------------------------------- """
class TestUART(unittest.TestCase):
"""UART is not emulated tests just pass.
"""
def setUp(self):
init(False)
def test_init(self):
pass
def test_any(self):
pass
def test_read(self):
pass
def test_readall(self):
pass
def test_readline(self):
pass
def test_readinto(self):
pass
def test_write(self):
pass
""" ---------------------------------------------------------------------- """
if __name__ == '__main__':
unittest.main()
| casnortheast/microbit_stub | test_microbit_stub.py | Python | mit | 30,460 |
from __future__ import unicode_literals
from collections import OrderedDict
import hashlib
import os
import posixpath
import re
import json
from django.conf import settings
from django.core.cache import (caches, InvalidCacheBackendError,
cache as default_cache)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import LazyObject
from django.utils.six.moves.urllib.parse import unquote, urlsplit, urlunsplit, urldefrag
from django.contrib.staticfiles.utils import check_settings, matches_patterns
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super(StaticFilesStorage, self).__init__(location, base_url,
*args, **kwargs)
# FileSystemStorage fallbacks to MEDIA_ROOT when location
# is empty, so we restore the empty value.
if not location:
self.base_location = None
self.location = None
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super(StaticFilesStorage, self).path(name)
class HashedFilesMixin(object):
default_template = """url("%s")"""
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
(r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
)),
)
def __init__(self, *args, **kwargs):
super(HashedFilesMixin, self).__init__(*args, **kwargs)
self._patterns = OrderedDict()
self.hashed_files = {}
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern, re.IGNORECASE)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Returns a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
opened = False
if content is None:
if not self.exists(clean_name):
raise ValueError("The file '%s' could not be found with %r." %
(clean_name, self))
try:
content = self.open(clean_name)
except IOError:
# Handle directory paths and fragments
return name
opened = True
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
if file_hash is not None:
file_hash = ".%s" % file_hash
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def url(self, name, force=False):
"""
Returns the real URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
hashed_name = self.stored_name(clean_name)
final_url = super(HashedFilesMixin, self).url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url_converter(self, name, template=None):
"""
Returns the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Converts the matched URL depending on the parent level (`..`)
and returns the normalized and hashed URL using the url method
of the storage.
"""
matched, url = matchobj.groups()
# Completely ignore http(s) prefixed URLs,
# fragments and data-uri URLs
if url.startswith(('#', 'http:', 'https:', 'data:', '//')):
return matched
name_parts = name.split(os.sep)
# Using posix normpath here to remove duplicates
url = posixpath.normpath(url)
url_parts = url.split('/')
parent_level, sub_level = url.count('..'), url.count('/')
if url.startswith('/'):
sub_level -= 1
url_parts = url_parts[1:]
if parent_level or not url.startswith('/'):
start, end = parent_level + 1, parent_level
else:
if sub_level:
if sub_level == 1:
parent_level -= 1
start, end = parent_level, 1
else:
start, end = 1, sub_level - 1
joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
hashed_url = self.url(unquote(joined_result), force=True)
file_name = hashed_url.split('/')[-1:]
relative_url = '/'.join(url.split('/')[:-1] + file_name)
# Return the hashed version to the file
return template % unquote(relative_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given OrderedDict of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_files = OrderedDict()
# build a list of adjustable files
matches = lambda path: matches_patterns(path, self._patterns.keys())
adjustable_paths = [path for path in paths if matches(path)]
# then sort the files by the directory level
path_level = lambda name: len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
# generate the hash with the original content, even for
# adjustable files.
hashed_name = self.hashed_name(name, original_file)
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
content = original_file.read().decode(settings.FILE_CHARSET)
for patterns in self._patterns.values():
for pattern, template in patterns:
converter = self.url_converter(name, template)
try:
content = pattern.sub(converter, content)
except ValueError as exc:
yield name, None, exc
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(force_bytes(content))
saved_name = self._save(hashed_name, content_file)
hashed_name = force_text(self.clean_name(saved_name))
processed = True
else:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_text(self.clean_name(saved_name))
# and then set the cache accordingly
hashed_files[self.hash_key(name)] = hashed_name
yield name, hashed_name, processed
# Finally store the processed paths
self.hashed_files.update(hashed_files)
def clean_name(self, name):
return name.replace('\\', '/')
def hash_key(self, name):
return name
def stored_name(self, name):
hash_key = self.hash_key(name)
cache_name = self.hashed_files.get(hash_key)
if cache_name is None:
cache_name = self.clean_name(self.hashed_name(name))
# store the hashed name if there was a miss, e.g.
# when the files are still processed
self.hashed_files[hash_key] = cache_name
return cache_name
class ManifestFilesMixin(HashedFilesMixin):
manifest_version = '1.0' # the manifest format standard
manifest_name = 'staticfiles.json'
def __init__(self, *args, **kwargs):
super(ManifestFilesMixin, self).__init__(*args, **kwargs)
self.hashed_files = self.load_manifest()
def read_manifest(self):
try:
with self.open(self.manifest_name) as manifest:
return manifest.read().decode('utf-8')
except IOError:
return None
def load_manifest(self):
content = self.read_manifest()
if content is None:
return OrderedDict()
try:
stored = json.loads(content, object_pairs_hook=OrderedDict)
except ValueError:
pass
else:
version = stored.get('version', None)
if version == '1.0':
return stored.get('paths', OrderedDict())
raise ValueError("Couldn't load manifest '%s' (version %s)" %
(self.manifest_name, self.manifest_version))
def post_process(self, *args, **kwargs):
self.hashed_files = OrderedDict()
all_post_processed = super(ManifestFilesMixin,
self).post_process(*args, **kwargs)
for post_processed in all_post_processed:
yield post_processed
self.save_manifest()
def save_manifest(self):
payload = {'paths': self.hashed_files, 'version': self.manifest_version}
if self.exists(self.manifest_name):
self.delete(self.manifest_name)
contents = json.dumps(payload).encode('utf-8')
self._save(self.manifest_name, ContentFile(contents))
class _MappingCache(object):
"""
A small dict-like wrapper for a given cache backend instance.
"""
def __init__(self, cache):
self.cache = cache
def __setitem__(self, key, value):
self.cache.set(key, value)
def __getitem__(self, key):
value = self.cache.get(key, None)
if value is None:
raise KeyError("Couldn't find a file name '%s'" % key)
return value
def clear(self):
self.cache.clear()
def update(self, data):
self.cache.set_many(data)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
class CachedFilesMixin(HashedFilesMixin):
def __init__(self, *args, **kwargs):
super(CachedFilesMixin, self).__init__(*args, **kwargs)
try:
self.hashed_files = _MappingCache(caches['staticfiles'])
except InvalidCacheBackendError:
# Use the default backend
self.hashed_files = _MappingCache(default_cache)
def hash_key(self, name):
key = hashlib.md5(force_bytes(self.clean_name(name))).hexdigest()
return 'staticfiles:%s' % key
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ManifestStaticFilesStorage(ManifestFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
| simbha/mAngE-Gin | lib/django/contrib/staticfiles/storage.py | Python | mit | 14,803 |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action for running GooGet commands with arguments."""
from glazier.lib import googet
from glazier.lib.actions.base import ActionError
from glazier.lib.actions.base import BaseAction
from glazier.lib.actions.base import ValidationError
class GooGetInstall(BaseAction):
"""Execute a GooGet install command."""
# TODO(b/132083921): Add support path transforms
def Run(self):
for args in self._args:
# Default to just the package being required
if len(args) > 1:
flags = args[1]
else:
flags = None
if len(args) > 2:
path = args[2]
else:
path = None
if len(args) > 3:
retries = int(args[3])
else:
retries = 5
if len(args) > 4:
sleep = int(args[4])
else:
sleep = 30
try:
install = googet.GooGetInstall()
install.LaunchGooGet(pkg=args[0],
retries=retries,
sleep=sleep,
build_info=self._build_info,
path=path,
flags=flags)
except googet.Error as e:
raise ActionError("Failure executing GooGet command: '%s'" % e) from e
except IndexError as e:
raise ActionError("Unable to access all required arguments in command "
"'%s'" % str(args)) from e
def Validate(self):
self._TypeValidator(self._args, list)
for args in self._args:
if not 1 <= len(args) <= 5:
raise ValidationError("Invalid GooGet args '%s' with length of "
"'%d'" % (args, len(args)))
self._TypeValidator(args[0], str) # Package
if len(args) > 1:
self._TypeValidator(args[1], list) # Flags
if len(args) > 2:
self._TypeValidator(args[2], str) # Path
if len(args) > 3:
self._TypeValidator(args[3], int) # Retries
if len(args) > 4:
self._TypeValidator(args[4], int) # Sleep interval
| google/glazier | glazier/lib/actions/googet.py | Python | apache-2.0 | 2,611 |
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Package that supports address symbolization of device firmware."""
from pw_symbolizer.symbolizer import Symbolizer, Symbol, FakeSymbolizer
from pw_symbolizer.llvm_symbolizer import LlvmSymbolizer
| google/pigweed | pw_symbolizer/py/pw_symbolizer/__init__.py | Python | apache-2.0 | 784 |
'''function Criptografia(Senha: string): string;
var x, y: Integer;
NovaSenha: string;
begin
for x := 1 to Length(Chave) do
begin
NovaSenha := '';
for y := 1 to Length(Senha) do
NovaSenha := NovaSenha + chr((Ord(Chave[x]) xor Ord(Senha[y])));
Senha := NovaSenha;
end;
result := Senha;
end;
'''
import base64
chave = "COISABOA"
# 6,7,8
num_swap = {
'0':'6', # 678
#'1':'1',
'2':'3',
'3':'4',
'4':'7', # 678
'5':'8', # 678
'6':'9',
'7':'0',
'8':'2',
'9':'5',
'-':'+'
}
def criptografia(senha):
nova_senha = ''
for i in range(len(chave)):
nova_senha = ''
for j in range(len(senha)):
nova_senha = nova_senha + chr( ord(chave[i]) ^ ord(senha[j]) )
senha = nova_senha
result = senha
return result
def translate(encoded):
senha = encoded.swapcase()
senha2 = ''
for i in range(len(senha)):
if senha[i] in num_swap.keys():
#print(num_swap[senha[i]])
senha2 += num_swap[senha[i]]
else:
senha2 += senha[i]
return senha2
def decrypt(encoded):
encoded = translate(encoded)
b64_ok = False
count = 0
while (not b64_ok):
try:
senha = base64.b64decode(encoded)
b64_ok = True
except Exception as e:
encoded += "="
count += 1
if count > 5:
quit()
nova_senha = ''
for i in range(len(chave)-1, -1, -1):
nova_senha = ''
for j in range(len(senha)):
if isinstance(senha[j], int):
nova_senha = nova_senha + chr( ord(chave[i]) ^ senha[j] )
elif isinstance(senha[j], str):
nova_senha = nova_senha + chr( ord(chave[i]) ^ ord(senha[j]) )
else:
print("Error")
senha = nova_senha
result = senha
print(result)
return result
def encrypt(text):
coded = base64.b64encode(criptografia(text).encode('ascii'))
print(coded)
return coded
def encrypt2(text):
coded = base64.urlsafe_b64encode(criptografia(text).encode('ascii'))
print(coded)
return coded
texto1 = "[TXTCONFIGS]"
texto2 = "[NOMEDB]trabuc_dotcom"
texto3 = "qeHEsu1sx1rjx1LgBgXSnw6PENLUEhPPnxH7DJv9Aq"
#a = encrypt(texto1)
#b = encrypt(texto2)
#c = decrypt(texto3)
decoded_content = ''
with open('logs.txt', 'r') as log:
encoded = log.readline()
while encoded:
decoded = decrypt(encoded)
decoded_content += decoded + '\n'
encoded = log.readline()
with open('log_decoded.txt', 'w') as dlog:
dlog.write(decoded_content)
| gomesar/sec-stuff | extras/cripto.py | Python | mit | 2,321 |
from werkzeug import secure_filename
from flask_wtf import Form
from flask_wtf.file import FileField
from flask import Flask,render_template,request,flash
from forms import ClientForm
from flask.ext.wtf import Form
from comm_client import send_file
app=Flask(__name__)
app.secret_key='wehajfhsdgf3876845FRtff%6@#4'
@app.route('/upload/',methods=('GET','POST'))
def upload():
form=ClientForm()
if request.method == 'GET':
return render_template('index.html', form=form)
if request.method=='POST':
file = request.files['filename']
if file :
filename=secure_filename(form.filename.data.filename)
print form.filename.data.filename
file.save('uploads/'+filename)
send_file(filename)
return render_template('index.html', form=form)
if __name__=='__main__':
app.run(host='0.0.0.0',debug=True,port=12346,use_reloader=True)
| denisshockwave/image_processing_ocr_server | app/client.py | Python | gpl-3.0 | 909 |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from marshmallow import fields, validate
import polyaxon_sdk
from polyaxon.contexts import refs as contexts_refs
from polyaxon.lifecycle import V1Statuses
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
class V1EventKind(polyaxon_sdk.V1EventKind):
events_statuses_mapping = {
polyaxon_sdk.V1EventKind.RUN_STATUS_CREATED: V1Statuses.CREATED,
polyaxon_sdk.V1EventKind.RUN_STATUS_RESUMING: V1Statuses.RESUMING,
polyaxon_sdk.V1EventKind.RUN_STATUS_ON_SCHEDULE: V1Statuses.ON_SCHEDULE,
polyaxon_sdk.V1EventKind.RUN_STATUS_COMPILED: V1Statuses.COMPILED,
polyaxon_sdk.V1EventKind.RUN_STATUS_QUEUED: V1Statuses.QUEUED,
polyaxon_sdk.V1EventKind.RUN_STATUS_SCHEDULED: V1Statuses.SCHEDULED,
polyaxon_sdk.V1EventKind.RUN_STATUS_STARTING: V1Statuses.STARTING,
polyaxon_sdk.V1EventKind.RUN_STATUS_RUNNING: V1Statuses.RUNNING,
polyaxon_sdk.V1EventKind.RUN_STATUS_PROCESSING: V1Statuses.PROCESSING,
polyaxon_sdk.V1EventKind.RUN_STATUS_STOPPING: V1Statuses.STOPPING,
polyaxon_sdk.V1EventKind.RUN_STATUS_FAILED: V1Statuses.FAILED,
polyaxon_sdk.V1EventKind.RUN_STATUS_STOPPED: V1Statuses.STOPPED,
polyaxon_sdk.V1EventKind.RUN_STATUS_SUCCEEDED: V1Statuses.SUCCEEDED,
polyaxon_sdk.V1EventKind.RUN_STATUS_SKIPPED: V1Statuses.SKIPPED,
polyaxon_sdk.V1EventKind.RUN_STATUS_WARNING: V1Statuses.WARNING,
polyaxon_sdk.V1EventKind.RUN_STATUS_UNSCHEDULABLE: V1Statuses.UNSCHEDULABLE,
polyaxon_sdk.V1EventKind.RUN_STATUS_UPSTREAM_FAILED: V1Statuses.UPSTREAM_FAILED,
polyaxon_sdk.V1EventKind.RUN_STATUS_RETRYING: V1Statuses.RETRYING,
polyaxon_sdk.V1EventKind.RUN_STATUS_UNKNOWN: V1Statuses.UNKNOWN,
polyaxon_sdk.V1EventKind.RUN_STATUS_DONE: V1Statuses.DONE,
}
class EventTriggerSchema(BaseCamelSchema):
kinds = fields.List(
fields.Str(validate=validate.OneOf(V1EventKind.allowable_values)),
required=True,
)
ref = fields.Str(required=True)
@staticmethod
def schema_config():
return V1EventTrigger
class V1EventTrigger(BaseConfig, contexts_refs.RefMixin, polyaxon_sdk.V1EventTrigger):
"""Events are an advanced triggering logic that users can take advantage of in addition to:
* Manual triggers via API/CLI/UI.
* Time-based triggers with schedules and crons.
* Upstream triggers with upstream runs or upstream ops in DAGs.
Events can be attached to an operation in the context of a DAG
to extend the simple trigger process,
this is generally important when the user defines a dependency between two operations
and needs a run to start as soon as
the upstream run generates an event instead of waiting until it reaches a final state.
For instance, a usual use-case is to start a tensorboard as soon as training starts.
In that case the downstream operation will watch for the `running` status.
Events can be attached as well to a single operation
to wait for an internal alert or external events,
for instance if a user integrates Polyaxon with Github,
they can trigger training as soon as Polyaxon is notified that a new git commit was created.
Polyaxon provides several internal and external events that users
can leverage to fully automate their usage of the platform:
* "run_status_created"
* "run_status_resuming"
* "run_status_compiled"
* "run_status_queued"
* "run_status_scheduled"
* "run_status_starting"
* "run_status_initializing"
* "run_status_running"
* "run_status_processing"
* "run_status_stopping"
* "run_status_failed"
* "run_status_stopped"
* "run_status_succeeded"
* "run_status_skipped"
* "run_status_warning"
* "run_status_unschedulable"
* "run_status_upstream_failed"
* "run_status_retrying"
* "run_status_unknown"
* "run_status_done"
* "run_approved_actor"
* "run_invalidated_actor"
* "run_new_artifacts"
* "connection_git_commit"
* "connection_dataset_version"
* "connection_registry_image"
* "alert_info"
* "alert_warning"
* "alert_critical"
* "model_version_new_metric"
* "project_custom_event"
* "org_custom_event"
Args:
kinds: List[str]
ref: str
> **Important**: Currently only events with prefix `run_status_*` are supported.
## YAML usage
```yaml
>>> events:
>>> ref: {{ ops.upstream-operation }}
>>> kinds: [run_status_running]
```
```yaml
>>> event:
>>> ref: {{ connections.git-repo-connection-name }}
>>> kinds: [connection_git_commit]
```
## Python usage
```python
>>> from polyaxon.polyflow import V1EventKind, V1EventTrigger
>>> event1 = V1EventTrigger(
>>> ref="{{ ops.upstream-operation }}",
>>> kinds=[V1EventTrigger.RUN_STATUS_RUNNING],
>>> )
>>> event2 = V1EventTrigger(
>>> ref="{{ connections.git-repo-connection-name }}",
>>> kinds=[V1EventTrigger.CONNECTION_GIT_COMMIT],
>>> )
```
## Fields
### kinds
The trigger event kinds to watch, if any event is detected the operation defining the `events`
section will be initiated.
```yaml
>>> event:
>>> kinds: [run_status_running, run_status_done]
```
> **Note**: Similar to trigger in DAGs, after an operation is initiated,
> it will still have to validate the rest of the Polyaxonfile,
> i.e. conditions, contexts, connections, ...
### ref
A valid reference that Polyaxon can resolve the objects that will send the events to watch for.
All supported events are prefixed with the object reference that can send such events.
The `run_*` events can be referenced both by `runs.UUID` or
`ops.OPERATION_NAME` if defined in the context of a DAG.
```yaml
>>> event:
>>> ref: ops.upstream_operation_name
```
"""
IDENTIFIER = "event_trigger"
SCHEMA = EventTriggerSchema
REDUCED_ATTRIBUTES = [
"ref",
]
| polyaxon/polyaxon | core/polyaxon/polyflow/events/__init__.py | Python | apache-2.0 | 6,731 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##This file is part of pySequence
#############################################################################
#############################################################################
## ##
## grilles ##
## ##
#############################################################################
#############################################################################
## Copyright (C) 2012 Cédrick FAURY - Jean-Claude FRICOU
##
## pySéquence : aide à la construction
## de Séquences et Progressions pédagogiques
## et à la validation de Projets
# pySequence is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# pySequence is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pySequence; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
Module grilles
**************
Génération des grilles d'évaluation des projets.
"""
# Dossier contenant les grilles
from util_path import TABLE_PATH, toFileEncoding
# Caractère utilisé pour cocher les cases :
COCHE = "X"
# Module utilisé pour accéder au classeur Excel
# (Windows seulement)
import sys
if sys.platform == "win32":
import win32com.client.dynamic#win32com.client,
# Autre module (essai en cours)
# import xlwt
# from xlwt import Workbook
import os
from widgets import messageErreur
def getFullNameGrille(fichier):
""" Renvoie le chemin du fichier Grille nommé <fichier>
"""
return os.path.join(TABLE_PATH, toFileEncoding(fichier))
def ouvrirXLS(fichier):
""" Ouvre la grille XLS nommée <fichier>
renvoie le classeur PyExcel
:return: classeur PyExcel
:rtype: class PyExcel
:return: code d'erreur ()
:rtype: int
:return: liste des fichiers Excel
:rtype: list
"""
fichierPB = [] # Liste des fichiers dont l'ouverture aura échoué
fichier = getFullNameGrille(fichier)
tableau = None
err = 0
# print "ouvrirXLS", fichier
if os.path.isfile(fichier):
# tableau = PyExcel(fichier)
try:
tableau = PyExcel(fichier)
except Exception as err:
messageErreur(self, "Erreur !", err.args[0])
err = 1
except:
err = 1
else:
err = 2
fichierPB.append(fichier)
return tableau, err, fichierPB
def getTableau(win, nomFichier):
""" Ouvre et renvoie les classeurs
contenant les grilles d'évaluation : revues + soutenance
:param win: Fenêtre parente des éventuels wx.Dialog à afficher pendant le processus
:type win: wx.Window
:return: la liste des codes d'erreur
:rtype: list
"""
# print("getTableau", nomFichier)
tableau, err, fichierPB = ouvrirXLS(nomFichier)
#
# Gestion des éventuelles erreurs
#
if err == 0:
return tableau
elif err&1 != 0:
messageErreur(win, "Lancement d'Excel impossible !",
"L'erreur peut avoir une des causes suivantes :\n" \
" - L'application Excel n'est pas installée.\n" \
" - Excel n'est pas prêt (opération en cours)\n"
" - Le fichier original de la grille n'a pas la bonne extention.\n"
)
# elif err&2 != 0:
# messageErreur(parent, u"Fichier non trouvé !",
# u"Le fichier original de la grille,\n " + fichierPB[0] + u"\n" \
# u"n'a pas été trouvé ! \n")
else:
print("Erreur", err)
import threading
def getXlApp():
# xlApp = win32com.client.dynamic.Dispatch('Excel.Application')
try:
xlApp = win32com.client.GetActiveObject('Excel.Application') # voir aussi avec DispatchEx ? ou bien win32com.client.gencache.EnsureDispatch("Excel.Application")
except:
try:
xlApp = win32com.client.dynamic.Dispatch('Excel.Application')
except:
try:
xlApp = win32com.client.gencache.EnsureDispatch('Excel.Application')
except:
try:
xlApp = win32com.client.DispatchEx('Excel.Application')
except:
raise Exception("Impossible d'ouvrir Excel")
return xlApp
def getExentionExcel():
global EXT_EXCEL
import pythoncom
pythoncom.CoInitialize()
try:
xlApp = getXlApp()
# xlApp = win32com.client.Dispatch('Excel.Application')
except :
print("pas Excel")
return
try:
if float(xlApp.Version) < 12:
EXT_EXCEL = ".xls"
else:
EXT_EXCEL = ".xlsx"
except: # Excel est installé mais pas disponible
EXT_EXCEL = ""
del xlApp # Parfois très lent, d'où le thread ...
print(EXT_EXCEL)
EXT_EXCEL = None
def get_th_xls():
try:
th_xls = threading.Thread(None, getExentionExcel, None)
th_xls.start()
return th_xls
except:
pass # ya pas Excel !
# ######################################################################################################
# def getTableaux(win, doc):
# u""" Ouvre et renvoie les classeurs
# contenant les grilles d'évaluation : revues + soutenance
#
# :param win: Fenêtre parente des éventuels wx.Dialog à afficher pendant le processus
# :type win: wx.Window
#
# :return: la liste des codes d'erreur
# :rtype: list
#
#
# """
# # print "getTableaux", doc
# # typ = doc.GetTypeEnseignement()
# # ref = doc.GetReferentiel()
# prj = doc.GetProjetRef()
# fichiers = prj.grilles
# # print " toutes les grilles :", fichiers
# fichierPB = []
#
# def ouvrir(fichier):
# fichier = os.path.join(TABLE_PATH, toFileEncoding(fichier))
# tableau = None
# err = 0
#
# if os.path.isfile(fichier):
# try:
# tableau = PyExcel(fichier)
# except:
# err = 1
# else:
# err = 2
# fichierPB.append(fichier)
#
# return err, tableau
#
# tableaux = {}
# ff = r""
# for k, f in fichiers.items():
# nomFichier = f[0]
# if nomFichier != ff:
# if EXT_EXCEL != os.path.splitext(nomFichier)[1]:
# nomFichier = os.path.splitext(nomFichier)[0] + EXT_EXCEL
# err, tableaux[k] = [ouvrir(nomFichier), f[1]]
#
# # if typ == 'SSI':
# # err, tableau = ouvrir(fichiers[0])
# # if err != 0:
# # err, tableau = ouvrir(fichiers[1])
# # else:
# # errR, tableauR = ouvrir(fichiersR[0])
# # if errR != 0:
# # errR, tableauR = ouvrir(fichiersR[1])
# # errS, tableauS = ouvrir(fichiersS[0])
# # if errS != 0:
# # errS, tableauS = ouvrir(fichiersS[1])
# # err = errR + errR
# # tableau = [tableauR, tableauS]
#
# if err == 0:
# return tableaux
# elif err&1 != 0:
# messageErreur(win, u"Lancement d'Excel impossible !",
# u"L'application Excel ne semble pas installée !")
# elif err&2 != 0:
# messageErreur(win, u"Fichier non trouvé !",
# u"Le fichier original de la grille,\n " + fichierPB[0] + u"\n" \
# u"n'a pas été trouvé ! \n")
# else:
# print "Erreur", err
###################################################################################################
def modifierGrille(doc, tableaux, eleve):
""" Remplissage automatique des grilles d'évaluation
:param tableaux: dictionnaire de la forme :
{chemin_du_fichier_Excel_de_la_grille : (liste_des_parties_concernées, objet_tableau_Excel_ouvert)}
:type tableaux: dict
:param eleve: Elève concerné par la grille à modifier
:type eleve: pysequence.Eleve
"""
# print("modifierGrille", eleve, tableaux)
log = []
ref = doc.GetReferentiel()
prj = doc.GetProjetRef()
#
# On coche les cellules "non" (uniquement grilles "Revues" STI2D)
#
for part, grille in list(prj.grilles.items()):
# print ">>>", part
dicInfo = prj.cellulesInfo[part]
# print " ", dicInfo
# print " ", ref.aColNon
if part in list(ref.aColNon.keys()) and ref.aColNon[part]:
if not "NON" in list(dicInfo.keys()):
log.append("Manque info \"NON\" pour %s" %prj.parties[part])
continue
feuilNON = dicInfo["NON"][0][0]
dicIndic = eleve.GetDicIndicateurs()
dicNon = doc.GetProjetRef()._dicoIndicateurs_simple['S']
# print dicInfo["NON"]
colNON = dicInfo["NON"][0][1][1]
# On rajoute la feuille du cadidat si elle n'existe pas encore
if grille[1] == 'C': # fichier "Collectif"
feuille = feuilNON+str(eleve.id+1)
else:
feuille = feuilNON
for i, indics in list(dicNon.items()):
# print " indic:", indics
lignes = [ind.ligne[part] for ind in indics if part in list(ind.ligne.keys()) and ind.ligne[part] != 0]
# print "keys", part, dicIndic.keys()
for j, ligne in enumerate(lignes):
# print " ", i
# indic = l'indicateur "i" doit être évalué
if 'S'+i in list(dicIndic.keys()):
indic = dicIndic['S'+i][j]
else:
indic = False
# print " ", i, indic
for parts, t in list(tableaux.values()):
if part in parts and t != None:
# print "!!!!!!"
if feuille in t.getSheets():
nf = t.getSheetNum(feuille)
if not indic: # indicateur pas évalué --> on coche NON !
t.setCell(nf, ligne, colNON, COCHE)
else:
t.setCell(nf, ligne, colNON, '')
else:
log.append("Feuille \"" + feuille + "\" non trouvée")
#
# On rajoute quelques informations
#
schem = {"Tit" : doc.intitule,
"Des" : doc.intitule + "\n" + doc.problematique,
"Nom" : eleve.GetNom(),
"Pre" : eleve.GetPrenom(),
"Etab": doc.classe.etablissement,
"N-P" : eleve.GetNomPrenom(),
"Sess": str(doc.annee+1)
}
# print schem
for parts, t in tableaux.values():
for part in parts:
# print ">>>", part
dicInfo = prj.cellulesInfo[part]
# print " ", dicInfo
# print " ", t
for k, v in list(schem.items()):
if k in list(dicInfo.keys()) and t != None:
# print " ", k
for d in dicInfo[k]:
f = d[0] # Feuille
if f in t.getSheets():
nf = t.getSheetNum(f)
l, c, p = d[1] # ligne , colonne
pre = d[2]
if p > 0: # Période - pour classeurs collectifs
l += eleve.id * p
t.setCell(nf, l, c, pre+v)
else:
log.append("Feuille \"" + f + "\" non trouvée")
#
# On rajoute les noms des professeurs
#
for part, grille in list(prj.grilles.items()):
dicInfo = prj.cellulesInfo[part]
ts = [t for parts, t in list(tableaux.values()) if part in parts]
if "Prof" in dicInfo:
for t in ts:
f, lcp , pre = dicInfo["Prof"][0]
l, c, p = lcp # ligne, colonne, période
if grille[1] == 'C': # fichier "Collectif"
f = f+str(eleve.id+1)
if f in t.getSheets():
nf = t.getSheetNum(f)
profs = [pr.GetNomPrenom() for pr in doc.equipe]
if p == 0:
profs = "\n".join(profs)
try:
t.setCell(nf, l, c, profs)
except:
pass
log.append(u"Impossible d'écrire dans la cellule "\
+ part + str(nf) + " " + str(l) + " " + str(c))
else:
for i in range(len(profs)):
try:
if i < len(profs):
t.setCell(nf, l, c, profs[i])
else:
t.setCell(nf, l, c, '')
except:
pass
log.append(u"Impossible d'écrire dans la cellule "\
+ part + str(nf) + " " + str(l) + " " + str(c))
l += p
else:
log.append("Feuille \"" + f + "\" non trouvée")
if "EtabPrf" in dicInfo:
for t in ts:
f, lcp , pre = dicInfo["EtabPrf"][0]
l, c, p = lcp # ligne, colonne, période
if grille[1] == 'C': # fichier "Collectif"
f += str(eleve.id+1)
if f in t.getSheets():
nf = t.getSheetNum(f)
profs = [doc.classe.etablissement for pr in doc.equipe]
if p == 0:
profs = "\n".join(profs)
try:
t.setCell(nf, l, c, profs)
except:
pass
log.append(u"Impossible d'écrire dans la cellule "\
+ part + str(nf) + " " + str(l) + " " + str(c))
else:
for i in range(5):
try:
if i < len(profs):
t.setCell(nf, l, c, profs[i])
else:
t.setCell(nf, l, c, '')
except:
pass
log.append(u"Impossible d'écrire dans la cellule "\
+ part + str(nf) + " " + str(l) + " " + str(c))
l += p
else:
log.append("Feuille \"" + f + "\" non trouvée")
# print("log",log)
return list(set(log))
# ###################################################################################################
# def modifierGrille(doc, tableaux, eleve):
# u""" Remplissage automatique des grilles d'évaluation
#
# :param tableaux: dictionnaire de la forme :
# {chemin_du_fichier_Excel_de_la_grille : (liste_des_parties_concernées, objet_tableau_Excel_ouvert)}
# :type tableaux: dict
#
# :param eleve: Elève concerné par la grille à modifier
# :type eleve: pysequence.Eleve
# """
# # print "modifierGrille", eleve
#
# log = []
# ref = doc.GetReferentiel()
# prj = doc.GetProjetRef()
#
# #
# # On coche les cellules "non" (uniquement grilles "Revues" STI2D)
# #
# for part, grille in prj.grilles.items():
# dicInfo = prj.cellulesInfo[part]
# if not "NON" in dicInfo.keys():
# log.append(u"Manque info \"NON\" pour %s" %prj.parties[part])
# continue
# if part in ref.aColNon.keys() and ref.aColNon[part]:
# feuilNON = dicInfo["NON"][0][0]
# dicIndic = eleve.GetDicIndicateurs()
# dicNon = doc.GetProjetRef()._dicoIndicateurs_simple['S']
# # print dicInfo["NON"]
# colNON = dicInfo["NON"][0][1][1]
#
# # On rajoute la feuille du cadidat si elle n'existe pas encore
# if grille[1] == 'C': # fichier "Collectif"
# feuille = feuilNON+str(eleve.id+1)
# else:
# feuille = feuilNON
#
# for i, indics in dicNon.items():
# # print " ", indics
# lignes = [ind.ligne[part] for ind in indics if part in ind.ligne.keys() and ind.ligne[part] != 0]
# # print "keys", part, dicIndic.keys()
# for j, ligne in enumerate(lignes):
# # print " ", i
# # indic = l'indicateur "i" doit être évalué
# if 'S'+i in dicIndic.keys():
# indic = dicIndic['S'+i][j]
# else:
# indic = False
# if part in tableaux.keys() and tableaux[part] != None:
# if feuille in tableaux[part].getSheets():
# nf = tableaux[part].getSheetNum(feuille)
# if not indic: # indicateur pas évalué --> on coche NON !
# tableaux[part].setCell(nf, ligne, colNON, COCHE)
# else:
# tableaux[part].setCell(nf, ligne, colNON, '')
# else:
# log.append(u"Feuille \"" + feuille + u"\" non trouvée")
#
#
#
# #
# # On rajoute quelques informations
# #
# schem = {"Tit" : doc.intitule,
# "Des" : doc.intitule + "\n" + doc.problematique,
# "Nom" : eleve.GetNom(),
# "Pre" : eleve.GetPrenom(),
# "Etab": doc.classe.etablissement,
# "N-P" : eleve.GetNomPrenom(),
# "Sess": str(doc.annee+1)
# }
#
#
# for ct, t in tableaux.items():
# dicInfo = prj.cellulesInfo[ct]
# # print " ", dicInfo
# # print " ", ct, t
# for k, v in schem.items():
# if k in dicInfo.keys() and t != None:
# # print " ", k
# for d in dicInfo[k]:
# f = d[0] # Feuille
# if f in t.getSheets():
# nf = t.getSheetNum(f)
# l, c, p = d[1] # ligne , colonne
# pre = d[2]
# if p > 0: # Période - pour classeurs collectifs
# l += eleve.id * p
# t.setCell(nf, l, c, pre+v)
# else:
# log.append(u"Feuille \"" + f + u"\" non trouvée")
#
# #
# # On rajoute les noms des professeurs
# #
# for part, grille in prj.grilles.items():
# dicInfo = prj.cellulesInfo[part]
# if "Prof" in dicInfo.keys() and part in tableaux.keys() and tableaux[part] != None:
# f, lcp , pre = dicInfo["Prof"][0]
# l, c, p = lcp # ligne, colonne, période
# if grille[1] == 'C': # fichier "Collectif"
# f = f+str(eleve.id+1)
# if f in tableaux[part].getSheets():
# nf = tableaux[part].getSheetNum(f)
# profs = [pr.GetNomPrenom() for pr in doc.equipe]
# for i in range(5):
# try:
# if i < len(profs):
# tableaux[part].setCell(nf, l, c, profs[i])
# else:
# tableaux[part].setCell(nf, l, c, '')
# except:
# pass
# # log.append(u"Impossible d'écrire dans la cellule "\
# # + part + str(nf) + " " + str(l) + " " + str(c))
# l += p
# else:
# log.append(u"Feuille \"" + f + u"\" non trouvée")
#
# if "EtabPrf" in dicInfo.keys() and part in tableaux.keys() and tableaux[part] != None:
# f, lcp , pre = dicInfo["EtabPrf"][0]
# l, c, p = lcp # ligne, colonne, période
# if grille[1] == 'C': # fichier "Collectif"
# f = f+str(eleve.id+1)
# if f in tableaux[part].getSheets():
# nf = tableaux[part].getSheetNum(f)
# profs = [doc.classe.etablissement for pr in doc.equipe]
# for i in range(5):
# try:
# if i < len(profs):
# tableaux[part].setCell(nf, l, c, profs[i])
# else:
# tableaux[part].setCell(nf, l, c, '')
# except:
# pass
# # log.append(u"Impossible d'écrire dans la cellule "\
# # + part + str(nf) + " " + str(l) + " " + str(c))
# l += p
# else:
# log.append(u"Feuille \"" + f + u"\" non trouvée")
#
# # print "log",log
# return list(set(log))
#
###############################################################################################################################
import shutil
def copierClasseurs(doc, nomFichiers):
# typ = doc.GetTypeEnseignement()
# ref = doc.GetReferentiel()
prj = doc.GetProjetRef()
fichiers = prj.grilles
# fichierPB = []
for k, f in list(fichiers.items()):
shutil.copyfile(os.path.join(TABLE_PATH, toFileEncoding(f[0])), toFileEncoding(nomFichiers[k]))
# err = 0
# if err == 0:
# return
# elif err&1 != 0:
# messageErreur(None, u"Ouverture d'Excel impossible !",
# u"L'application Excel ne semble pas installée !")
# elif err&2 != 0:
# messageErreur(None, u"Fichier non trouvé !",
# u"Le fichier original de la grille,\n " + fichierPB[0] + u"\n" \
# u"n'a pas été trouvé ! \n")
# else:
# print "Erreur", err
#from xlrd import open_workbook
#def modifierGrille2(doc, nomFichiers, eleve):
# """
# """
# #
# # On renseigne quelques informations
# #
# dicInfo = doc.GetReferentiel().cellulesInfo_prj
#
# schem = {"Tit" : doc.intitule,
# "Des" : doc.intitule + "\n" + doc.problematique,
# "Nom" : eleve.GetNom(),
# "Pre" : eleve.GetPrenom(),
# "Etab": doc.classe.etablissement,
# "N-P" : eleve.GetNomPrenom()
# }
#
# for c, nf in nomFichiers.items():
# wb = open_workbook(nf)
# sh = wb.sheet_by_index(0)
# for k, v in schem.items():
# if k in dicInfo.keys():
# l,c = dicInfo[k][1]
# sh.write(l-1, c-1, v)
#
# wb.save()
xlTypePDF = 0
xlQualityStandard = 0
xlLandscape = 1
class PyExcel:
def __init__(self,filename=None):
# Ouverture d'EXcel
self.xlApp = getXlApp()
# Déclenche une erreur si Excel est occupé :
print("Excel :", self.xlApp)
if filename:
self.filename = filename
self.xlBook = self.xlApp.Workbooks.Open(filename)
else:
self.xlBook = self.xlApp.Workbooks.Add()
self.filename=''
self.xlBook.Application.DisplayAlerts = False
def save(self, newfilename=None, ConflictResolution = 1):
if newfilename:
self.filename = newfilename
self.xlBook.SaveAs(newfilename, ConflictResolution = ConflictResolution)
else:
self.xlBook.Save()
def save_pdf(self, nomFichier, orientation = xlLandscape):
ws = self.xlBook.ActiveSheet
# ws.PageSetup.Orientation = orientation
ws.ExportAsFixedFormat(Type = xlTypePDF,
Filename=nomFichier,
Quality=xlQualityStandard,
IncludeDocProperties=True,
IgnorePrintAreas= False,
OpenAfterPublish=False)
def close(self):
self.xlBook.Close(SaveChanges=0)
del self.xlApp
def show(self):
self.xlApp.Visible=1
def hide(self):
self.xlApp.Visible=0
def getCell(self, sheet, row, col):
sht = self.xlBook.Worksheets(sheet)
return sht.Cells(row, col).Value
def setCell(self, sheet, row, col, value):
sht = self.xlBook.Worksheets(sheet)
sht.Cells(row, col).Value = value
def setLink(self, sheet, row, col, value):
sht = self.xlBook.Worksheets(sheet)
hl = sht.Cells(row, col).Hyperlinks
hl.Add(sht.Cells(row, col), value)
def getRange(self, sheet, row1, col1, row2, col2):
sht = self.xlBook.Worksheets(sheet)
return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value
def setRange(self, sheet, leftCol, topRow, data):
bottomRow = topRow + len(data) - 1
rightCol = leftCol + len(data[0]) - 1
sht = self.xlBook.Worksheets(sheet)
sht.Range(
sht.Cells(topRow, leftCol),
sht.Cells(bottomRow, rightCol)
).Value = data
def getContiguousRange(self, sheet, row, col):
sht = self.xlBook.Worksheets(sheet)
# trouve la ligne du bas
bottom = row
while sht.Cells(bottom + 1, col).Value not in [None, '']:
bottom = bottom + 1
#trouve la col de droite
right = col
while sht.Cells(row, right + 1).Value not in [None, '']:
right = right + 1
return sht.Range(sht.Cells(row, col), sht.Cells(bottom, right)).Value
def getActiveCell(self):
r=self.xlApp.ActiveCell
return r
def mergeCells(self,sheet,row1,col1,row2,col2):
sht = self.xlBook.Worksheets(sheet)
sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Merge()
def addSheet(self,Before='',After=''):
if Before :
pos=self.xlBook.Worksheets(Before)
print(self.xlBook.Worksheets.Add(Before=pos))
elif After:
pos=self.xlBook.Worksheets(After)
print(self.xlBook.Worksheets.Add(After=pos))
else:
print(self.xlBook.Worksheets.Add())
def addSheetName(self, name, Before='', After=''):
self.renameSheet(self.addSheet(Before = Before, After = After), name)
def getActiveSheet(self):
sheet=self.xlBook.ActiveSheet.Name
return sheet
def setActiveSheet(self,sheet):
sht = self.xlBook.Worksheets(sheet)
sht.Activate()
def delSheet(self,sheet):
sht = self.xlBook.Worksheets(sheet)
self.xlApp.DisplayAlerts = False
sht.Delete()
self.xlApp.DisplayAlerts = True
def renameSheet(self,sheet,newName):
sht = self.xlBook.Worksheets(sheet)
sht.Name=newName
def moveSheet(self,sheet,Before='',After=''):
sht = self.xlBook.Worksheets(sheet)
if Before :
pos=self.xlBook.Worksheets(Before)
sht.Move(Before=pos)
else:
pos=self.xlBook.Worksheets(After)
sht.Move(After=pos)
def getSheets(self):
lstSheets=[sheet.Name for sheet in self.xlBook.Worksheets]
return lstSheets
def getSheetNum(self, nom):
return self.getSheets().index(nom)+1
def copySheet(self,sheet,Before='',After=''):
sht = self.xlBook.Worksheets(sheet)
if Before :
pos=self.xlBook.Worksheets(Before)
newSht=sht.Copy(pos, None)
elif After:
pos=self.xlBook.Worksheets(After)
newSht=sht.Copy(None, pos)
else:
newSht=sht.Copy(None, sht)
def setBorder(self,sheet,row1,col1,row2,col2,weight):
sht = self.xlBook.Worksheets(sheet)
ran=sht.Range(sht.Cells(row1,col1),sht.Cells(row2,col2))
ran.Borders.Weight=weight
def setColor(self,sheet,row,col,color):
sht = self.xlBook.Worksheets(sheet)
print(sht.Cells(row, col).Interior.ColorIndex)
# Feuille vérrouillé : modification impossible !
#sht.Cells(row, col).Interior.ColorIndex = color
def insertPasteCol(self, sheet, col):
sht = self.xlBook.Worksheets(sheet)
c = sht.Columns(col).EntireColumn
c.Copy()
c = sht.Columns(col+1).EntireColumn
c.Insert()
# https://mail.python.org/pipermail/python-win32/2008-January/006676.html
#class PyOOo(object):
#
# wdReplaceNone=0
# wdReplaceOne=1
# wdReplaceAll=2
# wdFindContinue=1
#
# #pour close/save :
# wdDoNotSaveChanges=0
# wdSaveChanges=-1
#
# wdCharacter=1
# wdCell=12
# wdLine=5
#
# wdAlignLeft=0
# wdAlignCenter=1
# wdAlignRight=2
#
#
#
# def __init__(self, fichier=None, visible=True):
# self.objServiceManager = win32com.client.Dispatch("com.sun.star.ServiceManager")
#
# #self.propert=self.objServiceManager.Bridge_GetStruct("com.sun.star.beans.PropertyValue")
#
# self.w = self.objServiceManager.CreateInstance("com.sun.star.frame.Desktop")
#
# if fichier!=None:
# time.sleep(1)
# self.open(fichier, visible)
#
#
# def u1252(self, chu):
# try:
# if type(chu) is unicode:
# return chu.encode('cp1252','replace')
# else:
# return chu
# except:
# return repr(chu)
#
#
# def open(self, fichier, visible=True):
# """Ouvre un document word
# """
# self.doc=self.w.loadComponentFromURL("file:///"+fichier, "_blank", 0, [])
# #self.visible(visible)
#
#
# def wnew(self, visible=True):
# """Nouveau document writer
# """
# self.doc=self.w.loadComponentFromURL("private:factory/swriter", "_blank", 0, [])
# self.visible(True)
#
#
# def close(self):
# """ferme le document, en sauvegardant, sans demander
# """
# #---
# print"close"
# self.w.store()
# self.w.Terminate(1)
#
#
# def docclose(self):
# """ferme le document, en sauvegardant, sans demander
# """
# self.doc.Close(True) #True ? False ?
#
#
# def saveas(self,fichier, typ=0):
# """Appel de 'Enregistrer sous', avec le nom du fichier
# """#---
# self.doc.storeAsURL("file:///"+fichier, [])
#
#
# def savepdf(self):
#
# def createStruct(nom):
# objCoreReflection= self.objServiceManager.createInstance("com.sun.star.reflection.CoreReflection")
# classSize = objCoreReflection.forName(nom)
# aStruct=[1,2]
# classSize.createObject(aStruct)
# return aStruct
#
# par=createStruct("com.sun.star.beans.PropertyValue")
# par.append([])
# par[0].Name = "URL"
# par[0].Value = "file:///C:/let01.odt"
#
# par=["FilterName", "writer_pdf_Export"]
# self.prop = self.objServiceManager.CreateInstance("com.sun.star.beans.PropertyValue")
# self.prop[0].Name = "URL"
# self.prop[0].Value = "file:///C:/let01.odt"
# self.prop[1].Name = "FilterName"
# self.prop[1].Value = "writer_pdf_Export"
# self.doc.storeAsURL("file:///C:/let01.pdf", self.prop)
#
#
# def saveas2(self,fichier, typ=0):
#
# def createStruct(nom):
# objCoreReflection= self.objServiceManager.createInstance("com.sun.star.reflection.CoreReflection")
# classSize = objCoreReflection.forName(nom)
# aStruct=[]
# classSize.createObject(aStruct)
# return aStruct
#
# #args1= self.objServiceManager.createInstance("com.sun.star.beans.PropertyValue")
# #args1 = createStruct("com.sun.star.beans.NamedValue")
# #print args1
#
# print "Titre :",self.doc.getDocumentInfo()
#
# args1=["file:///c:/titi.rtf"]
#
# self.doc.storeAsURL("",0,args1)
#
#
# """
# #args1=
#self.objServiceManager.createInstance("com.sun.star.beans.PropertyValue")
# #dispatcher =
#self.objServiceManager.createInstance('com.sun.star.frame.DispatchHelper')
# args1=createStruct("com.sun.star.beans.PropertyValue")
# print len(args1)
# prop.Name='Pages'
# prop.Value='3-5'
# args[0]=prop
#
# args1[0].Name = "URL"
# args1[0].Value = "file:///c:/titi.rtf"
# args1[1].Name = "FilterName"
# args1[1].Value = "Rich Text Format"
# args1[4].Name = "SelectionOnly"
# args1[4].Value = true
# """
# #sel=self.doc.SaveAs("",0,args1)
#
# def quit(self):
# """Ferme OOoW
# """
# self.w.Terminate()
#
#
# def quitSaveChange(self):
# """Ferme OooW, en sauvant les changements
# """
# self.w.store()
# self.w.Terminate()
#
#
# def quitCancel(self):
# """Ferme word, SANS sauver les changements
# """
# self.doc.storeAsURL("file:///C:/null__.odt", [])
# self.w.Terminate()
# os.remove("C:/null__.odt")
#
#
# def visible(self, par=True):
# """Rend Word visible (True), ou invisible (False) ; True par défaut
# Note : c'est plus rapide en invisible
# """
# """
# if par:
# self.objServiceManager.Visible(True)
# else:
# self.objServiceManager.Visible=False
# """
# win = self.doc.CurrentController.Frame.ContainerWindow
# if par:
# win.Visible = True
# else:
# win.Visible = False
#
#
# def hide(self):
# """Cache Word
# """
# win = self.doc.CurrentController.Frame.ContainerWindow
# win.Visible = False
#
#
# def show(self):
# """Montre la fenêtre
# """
# win = self.doc.CurrentController.Frame.ContainerWindow
# win.Visible = True
#
#
# def wprint(self):
# """Imprime le document
# """
# warg=[]
# self.doc.Print(warg)
#
#
# def wprint2(self,printer='PDFCreator'):
# """Imprime le document
# """
# warg=['Name','PDFCreator']
# self.doc.Print(warg)
#
## prop.Name='Name'
## prop.Value='PDFCreator'
## args[2]=prop
#
#
# def preview(self):
# """Pré-visualise le document
# """
# self.doc.PrintPreview()
#
#
# def previewclose(self):
# """Ferme la prévisdualisation du document
# """
# self.doc.ClosePrintPreview()
#
#
# def text(self, txt):
# """Remplace le texte sélectionné, par le paramètre
# """
# newchaine=txt.replace('\n','\r')
# self.position.Text = newchaine
#
#
# def TypeText(self, chaine):
# """ 'Tape' le texte à la position courante
# """
# self.position.TypeText(chaine)
#
#
# def chExist(self, chaine):
# """Cherche l'existence d'une chaine dans le document.
# Retourne True ou False, selon le résultat.
# """
# och=self.doc.createSearchDescriptor()
# och.SearchString=chaine
# och.SearchWords = False #mots entiers seulement ?
# position=self.doc.findFirst(och)
# if position:
# return True
# else:
# return False
#
#
# def macroRun(self, name):
# """Lance la macro-word (VBA) 'name'
# """
# print "Non supporté _ àcf"
# print "Non supporté _ àcf"
# print "Non supporté _ àcf"
#
#
# def language(self):
# """Retourne la langue de Writer
# """
# print "Non supporté _ àcf"
# print "Non supporté _ àcf"
# print "Non supporté _ àcf"
#
#
# def filterTxt(self):
# """Interne - Convertit une sélection en texte
# """
# ss=self.u1252(self.doc.GetText().String)
# ss=ss.replace(chr(7)+chr(13),' ')
# ss=ss.replace(chr(13),'\r\n')
# ss=ss.replace(chr(7),' ')
# ss=ss.replace(chr(9),'')
# ss=ss.replace(chr(26),'')
# return ss
#
#
# def eSelAll(self):
# """sélectionne, et retourne, tout le document
# """
# sel=self.doc.GetText()
# return self.filterTxt()
#
#
# def eSelWord(self, nb=1):
# """étend la sélection aux nb mots à droite, et retourne la sélection
# """
# self.w.Selection.WordRightSel(self.wdWord, nb, self.wdExtend)
# return self.filterTxt()
#
#
# def eSelLine(self, nb=1):
# """étend la sélection aux nb lignes en-dessous, et retourne la
# sélection
# """
# args2= self.doc.createInstance("com.sun.star.beans.PropertyValue")
# args2[0].Name= "Count"
# args2[0].Value= 1
# args2[1].Name= "Select"
# args2[1].Value= False
#
# self.doc.GoDown("", 0, args2)
# return self.filterTxt()
#
#
# def eSelEndLine(self):
# """étend la sélection jusqu'à la fin de la ligne, et retourne la
# sélection
# """
# self.w.Selection.EndKey(self.wdLine, self.wdExtend)
# return self.filterTxt()
#
#
# def chRemplAll(self, oldchaine, newchaine=''):
# """
# oldchaine = chaine a remplacer / string to replace
# newchaine = chaine de remplacement / string for replace
# """
# orempl=self.doc.createReplaceDescriptor()
# orempl.SearchString=oldchaine
# orempl.ReplaceString=newchaine
# orempl.SearchWords = False #mots entiers seulement ?
# orempl.SearchCaseSensitive = True #sensible à la casse ?
# nb = self.doc.replaceAll(orempl)
#
#
# def chRemplLstAll(self, lst=[[]]):
# """
# oldchaine = chaine a remplacer / string to replace
# newchaine = chaine de remplacement / string for replace
# """
# nb=0
# for oldchaine, newchaine in lst:
# orempl=self.doc.createReplaceDescriptor()
# orempl.SearchString=oldchaine
# orempl.ReplaceString=newchaine
# orempl.SearchWords = False #mots entiers seulement ?
# orempl.SearchCaseSensitive = True #sensible à la casse ?
# nb += self.doc.replaceAll(orempl)
#
#
# def chRemplOne(self, oldchaine, newchaine=''):
# """
# oldchaine = chaine a remplacer / string to replace
# newchaine = chaine de remplacement / string for replace
# """
# sel = self.w.Selection
# #sel.ClearFormatting()
# sel.Find.Text = oldchaine
# sel.Find.Forward = True
# newchaine=newchaine.replace('\n','\r')
# sel.Find.Execute(oldchaine,False,False,False,False,False,True,self.wdFindContinue,False,newchaine,self.wdReplaceOne)
# self.position=sel
#
#
# def chRemplClipboard(self, oldchaine):
# """
# oldchaine = chaine a remplacer / string to replace
# """
# sel = self.w.Selection
# #sel.ClearFormatting()
# sel.Find.Text = oldchaine
# sel.Find.Forward = True
#
# sel.Find.Execute(oldchaine,False,False,False,False,False,True,self.wdFindContinue,False,'XXX',self.wdReplaceOne)
# sel.Paste()
# self.position=sel
#
#
# def chRemplGraf(self, oldchaine, fichier):
# """
# oldchaine = chaine a remplacer / string to replace
# """
# sel = self.w.Selection
# #sel.ClearFormatting()
# sel.Find.Text = oldchaine
# sel.Find.Forward = True
#
# sel.Find.Execute(oldchaine,False,False,False,False,False,True,self.wdFindContinue,False,'',self.wdReplaceOne)
# sel.InlineShapes.AddPicture(fichier, False, True)
# self.position=sel
#
#
# def TableauInsLigApres(self, oldchaine, nblig=1):
# """
# oldchaine = chaine a remplacer / string to replace
# """
# sel = self.w.Selection
# #sel.ClearFormatting()
# sel.Find.Text = oldchaine
# sel.Find.Forward = True
#
# sel.Find.Execute(oldchaine,False,False,False,False,False,True,self.wdFindContinue,False,'',self.wdReplaceOne)
# sel.InsertRowsBelow(nblig)
#
#
# def TableauDelLig(self, oldchaine):
# """
# oldchaine = chaine a remplacer / string to replace
# """
# sel = self.w.Selection
# #sel.ClearFormatting()
# sel.Find.Text = oldchaine
# sel.Find.Forward = True
#
# sel.Find.Execute(oldchaine,False,False,False,False,False,True,self.wdFindContinue,False,'',self.wdReplaceOne)
# sel.Rows.Delete()
#
#
# def MoveRight(self, nb=1):
# self.position.MoveRight(self.wdCharacter, nb)
#
#
# def MoveLeft(self, nb=1):
# self.position.MoveLeft(self.wdCharacter, nb)
#
#
# def TableauMoveRight(self, nb=1):
# sel = self.w.Selection
# sel.MoveRight(self.wdCell, nb)
#
#
# def TableauMoveLeft(self, nb=1):
# sel = self.w.Selection
# sel.MoveLeft(self.wdCell, nb)
#
#
# def TableauMoveLine(self, nb=1):
# sel = self.w.Selection
# if nb>0:
# sel.MoveDown(self.wdLine, nb)
# else:
# sel.MoveUp(self.wdLine, -nb)
#
#
# def TableauCellule(self, lig=1, col=1, txt='', align=0):
# tbl = self.doc.Tables[0]
# cellule = tbl.Cell(lig, col)
# cellule.Range.Text = txt
# cellule.Range.ParagraphFormat.Alignment = align #0,1,2, left, center, right
#
#
# def landscape(self):
# """Met le document en mode paysage
# """
# self.wdOrientLandscape=1
# self.wdOrientPortrait=0
# self.w.ActiveDocument.PageSetup.Orientation = self.wdOrientLandscape
#
#
# def portrait(self):
# """Met le document en mode portrait
# """
# self.wdOrientLandscape=1
# self.wdOrientPortrait=0
# self.w.ActiveDocument.PageSetup.Orientation = self.wdOrientPortrait
#
#
# def changePrinter(self, printerName):
# """Change l'imprimante active de Word
# """
# self.w.ActivePrinter = printerName
#def exporterGrille(typeDoc):
# rb = xlrd.open_workbook(GRILLE[typeDoc])
# wb = copy(rb)
# l,c = Cellules_NON_SSI["B3"][0]
# wb.get_sheet(1).write(l,c,'x')
#
# wb.save('output.xls')
#
#exporterGrille('SSI')
#xlApp = win32com.client.dynamic.Dispatch('Excel.Application')
#print dir(xlApp)
#print xlApp.Version
| cedrick-f/pySequence | src/grilles.py | Python | gpl-3.0 | 43,902 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
np.set_printoptions(precision=4)
train_data_name = './dataset/winequality-white.csv'
feature_num = 11
enable_test = True
quadratic = True
def read_data():
train_set = np.genfromtxt(train_data_name, comments='#', delimiter=';')
return (train_set, train_set)
def linear_regression(t, x, y, test_set, ma):
a = 0.1
l = 10
m = len(x)
iter_num = 200
for _ in xrange(iter_num):
t = (1. - a * l / m) * t - a / m * np.dot(x.transpose(), np.dot(x, t) - y)
if enable_test and _ % 5 == 0:
print "round %s" % _
test_linear_regression(t, test_set[:, :-1], test_set[:, -1], ma)
def test_linear_regression(t, x, y, ma):
x = x / ma
if quadratic:
x = np.append(x, x * x, axis=1)
p = np.dot(x, t)
for _ in xrange(10):
print p[_], y[_]
p = np.sum(p - y)
print p * p
def train(x, y, test_set):
ma = np.max(x, axis=0)
x = x / ma
t = np.random.randn(feature_num)
if quadratic:
x = np.append(x, x * x, axis=1)
t = np.random.randn(feature_num * 2)
linear_regression(t, x, y, test_set, ma)
def main():
train_set, test_set = read_data()
train(train_set[:, :-1], train_set[:, -1], test_set)
if __name__ == '__main__':
main()
| legendlee1314/ooni | ml/linear_regression.py | Python | mit | 1,332 |
import datetime
import logging
from typing import Dict, Optional, Tuple
from ...RateLimiter import RateLimiter
from . import (
ApplicationRateLimiter,
MethodRateLimiter,
OopsRateLimiter,
InternalLimiter,
)
LOG = logging.getLogger(__name__)
class BasicRateLimiter(RateLimiter):
__application_rate_limiter = ApplicationRateLimiter()
def __init__(self):
super().__init__()
self._limiters: Tuple[InternalLimiter, InternalLimiter, InternalLimiter] = (
BasicRateLimiter.__application_rate_limiter,
MethodRateLimiter(),
OopsRateLimiter(),
)
def wait_until(
self, region: str, endpoint_name: str, method_name: str,
) -> Optional[datetime.datetime]:
wait_until = max(
[
(
limiter.wait_until(region, endpoint_name, method_name),
limiter.friendly_name,
)
for limiter in self._limiters
],
key=lambda lim_pair: lim_pair[0]
if lim_pair[0]
else datetime.datetime(datetime.MINYEAR, 1, 1),
)
if wait_until[0] is not None and wait_until[0] > datetime.datetime.now():
to_wait = wait_until[0] - datetime.datetime.now()
LOG.debug(
"waiting for %s seconds due to %s limit...",
to_wait.total_seconds(),
wait_until[1],
)
return wait_until[0]
return None
def record_response(
self,
region: str,
endpoint_name: str,
method_name: str,
status: int,
headers: Dict[str, str],
):
for limiter in self._limiters:
limiter.update_limiter(
region, endpoint_name, method_name, status, headers,
)
| pseudonym117/Riot-Watcher | src/riotwatcher/Handlers/RateLimit/BasicRateLimiter.py | Python | mit | 1,851 |
import numpy as np
a = np.array([0, 2, 3, 6])
b = np.array([3, 4, 5, 15])
print(np.gcd(a, b))
# [3 2 1 3]
print(type(np.gcd(a, b)))
# <class 'numpy.ndarray'>
l_a = [0, 2, 3, 6]
l_b = [3, 4, 5, 14]
print(np.gcd(l_a, l_b))
# [3 2 1 2]
print(type(np.gcd(l_a, l_b)))
# <class 'numpy.ndarray'>
print(np.gcd(6, 15))
# 3
print(type(np.gcd(6, 15)))
# <class 'numpy.int64'>
a_2d = np.array([[0, 2, 3, 6], [0, 2, 3, 6]])
print(a_2d)
# [[0 2 3 6]
# [0 2 3 6]]
print(b)
# [ 3 4 5 15]
print(a_2d + b)
# [[ 3 6 8 21]
# [ 3 6 8 21]]
print(np.gcd(a_2d, b))
# [[3 2 1 3]
# [3 2 1 3]]
a_mismatch = np.array([0, 1, 2])
# print(np.gcd(a_mismatch, b))
# ValueError: operands could not be broadcast together with shapes (3,) (4,)
print(np.gcd(a, 15))
# [15 1 3 3]
print(np.gcd(15, a))
# [15 1 3 3]
| nkmk/python-snippets | notebook/numpy_gcd.py | Python | mit | 807 |
### BEGIN LICENSE
# Copyright (C) 2011 Guillaume Hain <zedtux@zedroot.org>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>
### END LICENSE
import gtk
import appindicator
from naturalscrolling_lib import naturalscrollingconfig
from naturalscrolling_lib.gconfsettings import GConfSettings
from naturalscrolling_lib.udevobservator import UDevObservator
from naturalscrolling.indicatormenu import IndicatorMenu
class Indicator(object):
# Singleton
_instance = None
_init_done = False
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Indicator, cls).__new__(cls, *args,
**kwargs)
return cls._instance
def __init__(self):
# Initialize a new AppIndicator
self.indicator = appindicator.Indicator(
"natural-scrolling-indicator",
"natural-scrolling-status-not-activated",
appindicator.CATEGORY_APPLICATION_STATUS)
media_path = "%s/media/" % naturalscrollingconfig.get_data_path()
self.indicator.set_icon_theme_path(media_path)
self.indicator.set_attention_icon(
"natural-scrolling-status-activated")
menu = IndicatorMenu()
self.indicator.set_menu(menu)
# Initialize the UDev client
udev_observator = UDevObservator()
udev_observator.on_update_execute(menu.refresh)
udev_observator.start()
# Force the first refresh of the menu in order to populate it.
menu.refresh(udev_observator.gather_devices())
# When something change in GConf, push it to the Indicator menu
# in order to update the status of the device as checked or unchecked
GConfSettings().server().on_update_fire(menu.update_check_menu_item)
# Initialize GConf in order to be up-to-date with existing devices
GConfSettings().initialize(udev_observator.gather_devices())
def status_attention(self):
self.set_status(appindicator.STATUS_ATTENTION)
def status_active(self):
self.set_status(appindicator.STATUS_ACTIVE)
def isreversed(self):
return True
def check_scrolling(self):
if self.isreversed():
self.indicator.set_status(appindicator.STATUS_ATTENTION)
else:
self.indicator.set_status(appindicator.STATUS_ACTIVE)
return True
def start(self):
self.check_scrolling()
try:
gtk.main()
except KeyboardInterrupt:
pass
| cemmanouilidis/naturalscrolling | naturalscrolling/indicator.py | Python | gpl-3.0 | 3,094 |
# Using the standard library's xml.etree.ElementTree module.
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import sys
import xml.etree.ElementTree as ET
count = 0
for event, elem in ET.iterparse(sys.argv[1], events=("end",)):
if event == "end":
if elem.tag == 'location' and elem.text and 'Africa' in elem.text:
count += 1
elem.clear()
print('count =', count)
| eliben/code-for-blog | 2019/xml-stream/etree-count.py | Python | unlicense | 434 |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import re
from asv.results import iter_results_for_machine
from . import tools
from .tools import dummy_packages, get_default_environment_type
from .test_workflow import basic_conf
def test_continuous(capfd, basic_conf):
tmpdir, local, conf, machine_file = basic_conf
python = "{0[0]}.{0[1]}".format(sys.version_info)
env_type = get_default_environment_type(conf, python)
env_spec = ("-E", env_type + ":" + python)
# Check that asv continuous runs
tools.run_asv_with_conf(conf, 'continuous', "master^", '--show-stderr',
'--bench=params_examples.track_find_test',
'--bench=params_examples.track_param',
'--bench=time_examples.TimeSuite.time_example_benchmark_1',
'--attribute=repeat=1', '--attribute=number=1',
'--attribute=warmup_time=0',
*env_spec, _machine_file=machine_file)
text, err = capfd.readouterr()
assert "SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY" in text
assert "PERFORMANCE INCREASED" in text or "PERFORMANCE DECREASED" in text
assert "+ 1 6 6.00 params_examples.track_find_test(2)" in text
assert "params_examples.ClassOne" in text
# Check rounds were interleaved (timing benchmark was run twice)
assert re.search(r"For.*commit [a-f0-9]+ (<[a-z0-9~^]+> )?\(round 1/2\)", text, re.M), text
result_found = False
for results in iter_results_for_machine(conf.results_dir, "orangutan"):
result_found = True
stats = results.get_result_stats('time_examples.TimeSuite.time_example_benchmark_1', [])
assert stats[0]['repeat'] == 2
assert result_found
| spacetelescope/asv | test/test_continuous.py | Python | bsd-3-clause | 1,967 |
#!/usr/bin/env python
# -*- noplot -*-
"""
This example demonstrates how to set a hyperlinks on various kinds of elements.
This currently only works with the SVG backend.
"""
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
f = plt.figure()
s = plt.scatter([1, 2, 3], [4, 5, 6])
s.set_urls(['http://www.bbc.co.uk/news', 'http://www.google.com', None])
f.canvas.print_figure('scatter.svg')
f = plt.figure()
delta = 0.025
x = y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = Z2 - Z1 # difference of Gaussians
im = plt.imshow(Z, interpolation='bilinear', cmap=cm.gray,
origin='lower', extent=[-3, 3, -3, 3])
im.set_url('http://www.google.com')
f.canvas.print_figure('image.svg')
| bundgus/python-playground | matplotlib-playground/examples/pylab_examples/hyperlinks.py | Python | mit | 874 |
#
# Authors: Travis Oliphant, Ed Schofield, Robert Cimrman, Nathan Bell, and others
""" Test functions for sparse matrices. Each class in the "Matrix class
based tests" section become subclasses of the classes in the "Generic
tests" section. This is done by the functions in the "Tailored base
class for generic tests" section.
"""
from __future__ import division, print_function, absolute_import
__usage__ = """
Build sparse:
python setup.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.sparse.test()'
Run tests if sparse is not installed:
python tests/test_base.py
"""
import warnings
import operator
import contextlib
import numpy as np
from scipy._lib.six import xrange, zip as izip
from numpy import (arange, zeros, array, dot, matrix, asmatrix, asarray,
vstack, ndarray, transpose, diag, kron, inf, conjugate,
int8, ComplexWarning, power)
import random
from numpy.testing import (assert_raises, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_almost_equal, assert_,
dec, run_module_suite, assert_allclose)
import scipy.linalg
import scipy.sparse as sparse
from scipy.sparse import (csc_matrix, csr_matrix, dok_matrix,
coo_matrix, lil_matrix, dia_matrix, bsr_matrix,
eye, isspmatrix, SparseEfficiencyWarning, issparse)
from scipy.sparse.sputils import supported_dtypes, isscalarlike, get_index_dtype
from scipy.sparse.linalg import splu, expm, inv
from scipy._lib._version import NumpyVersion
from scipy._lib.decorator import decorator
import nose
# Check for __numpy_ufunc__
class _UFuncCheck(object):
def __array__(self):
return np.array([1])
def __numpy_ufunc__(self, *a, **kwargs):
global HAS_NUMPY_UFUNC
HAS_NUMPY_UFUNC = True
HAS_NUMPY_UFUNC = False
np.add(_UFuncCheck(), np.array([1]))
warnings.simplefilter('ignore', SparseEfficiencyWarning)
warnings.simplefilter('ignore', ComplexWarning)
def with_64bit_maxval_limit(maxval_limit=None, random=False, fixed_dtype=None,
downcast_maxval=None, assert_32bit=False):
"""
Monkeypatch the maxval threshold at which scipy.sparse switches to
64-bit index arrays, or make it (pseudo-)random.
"""
if maxval_limit is None:
maxval_limit = 10
if assert_32bit:
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
tp = get_index_dtype(arrays, maxval, check_contents)
assert_equal(np.iinfo(tp).max, np.iinfo(np.int32).max)
assert_(tp == np.int32 or tp == np.intc)
return tp
elif fixed_dtype is not None:
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
return fixed_dtype
elif random:
counter = np.random.RandomState(seed=1234)
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
return (np.int32, np.int64)[counter.randint(2)]
else:
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
dtype = np.int32
if maxval is not None:
if maxval > maxval_limit:
dtype = np.int64
for arr in arrays:
arr = np.asarray(arr)
if arr.dtype > np.int32:
if check_contents:
if arr.size == 0:
# a bigger type not needed
continue
elif np.issubdtype(arr.dtype, np.integer):
maxval = arr.max()
minval = arr.min()
if minval >= -maxval_limit and maxval <= maxval_limit:
# a bigger type not needed
continue
dtype = np.int64
return dtype
if downcast_maxval is not None:
def new_downcast_intp_index(arr):
if arr.max() > downcast_maxval:
raise AssertionError("downcast limited")
return arr.astype(np.intp)
@decorator
def deco(func, *a, **kw):
backup = []
modules = [scipy.sparse.bsr, scipy.sparse.coo, scipy.sparse.csc,
scipy.sparse.csr, scipy.sparse.dia, scipy.sparse.dok,
scipy.sparse.lil, scipy.sparse.sputils,
scipy.sparse.compressed, scipy.sparse.construct]
try:
for mod in modules:
backup.append((mod, 'get_index_dtype',
getattr(mod, 'get_index_dtype', None)))
setattr(mod, 'get_index_dtype', new_get_index_dtype)
if downcast_maxval is not None:
backup.append((mod, 'downcast_intp_index',
getattr(mod, 'downcast_intp_index', None)))
setattr(mod, 'downcast_intp_index', new_downcast_intp_index)
return func(*a, **kw)
finally:
for mod, name, oldfunc in backup:
if oldfunc is not None:
setattr(mod, name, oldfunc)
return deco
def todense(a):
if isinstance(a, np.ndarray) or isscalarlike(a):
return a
return a.todense()
class BinopTester(object):
# Custom type to test binary operations on sparse matrices.
def __add__(self, mat):
return "matrix on the right"
def __mul__(self, mat):
return "matrix on the right"
def __sub__(self, mat):
return "matrix on the right"
def __radd__(self, mat):
return "matrix on the left"
def __rmul__(self, mat):
return "matrix on the left"
def __rsub__(self, mat):
return "matrix on the left"
#------------------------------------------------------------------------------
# Generic tests
#------------------------------------------------------------------------------
# TODO check that spmatrix( ... , copy=X ) is respected
# TODO test prune
# TODO test has_sorted_indices
class _TestCommon:
"""test common functionality shared by all sparse formats"""
checked_dtypes = supported_dtypes
def __init__(self):
# Canonical data.
self.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d')
self.datsp = self.spmatrix(self.dat)
# Some sparse and dense matrices with data for every supported
# dtype.
self.dat_dtypes = {}
self.datsp_dtypes = {}
for dtype in self.checked_dtypes:
self.dat_dtypes[dtype] = self.dat.astype(dtype)
self.datsp_dtypes[dtype] = self.spmatrix(self.dat.astype(dtype))
# Check that the original data is equivalent to the
# corresponding dat_dtypes & datsp_dtypes.
assert_equal(self.dat, self.dat_dtypes[np.float64])
assert_equal(self.datsp.todense(),
self.datsp_dtypes[np.float64].todense())
def test_bool(self):
def check(dtype):
datsp = self.datsp_dtypes[dtype]
assert_raises(ValueError, bool, datsp)
assert_(self.spmatrix([1]))
assert_(not self.spmatrix([0]))
for dtype in self.checked_dtypes:
fails = isinstance(self, TestDOK)
msg = "Cannot create a rank <= 2 DOK matrix."
yield dec.skipif(fails, msg)(check), dtype
def test_bool_rollover(self):
# bool's underlying dtype is 1 byte, check that it does not
# rollover True -> False at 256.
dat = np.matrix([[True, False]])
datsp = self.spmatrix(dat)
for _ in range(10):
datsp = datsp + datsp
dat = dat + dat
assert_array_equal(dat, datsp.todense())
def test_eq(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datbsr = bsr_matrix(dat)
datcsr = csr_matrix(dat)
datcsc = csc_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat == dat2, (datsp == datsp2).todense())
# mix sparse types
assert_array_equal(dat == dat2, (datbsr == datsp2).todense())
assert_array_equal(dat == dat2, (datcsr == datsp2).todense())
assert_array_equal(dat == dat2, (datcsc == datsp2).todense())
assert_array_equal(dat == dat2, (datlil == datsp2).todense())
# sparse/dense
assert_array_equal(dat == datsp2, datsp2 == dat)
# sparse/scalar
assert_array_equal(dat == 0, (datsp == 0).todense())
assert_array_equal(dat == 1, (datsp == 1).todense())
assert_array_equal(dat == np.nan, (datsp == np.nan).todense())
msg = "Bool comparisons only implemented for BSR, CSC, and CSR."
fails = not isinstance(self, (TestBSR, TestCSC, TestCSR))
for dtype in self.checked_dtypes:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
yield dec.skipif(fails, msg)(check), dtype
def test_ne(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datbsr = bsr_matrix(dat)
datcsc = csc_matrix(dat)
datcsr = csr_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat != dat2, (datsp != datsp2).todense())
# mix sparse types
assert_array_equal(dat != dat2, (datbsr != datsp2).todense())
assert_array_equal(dat != dat2, (datcsc != datsp2).todense())
assert_array_equal(dat != dat2, (datcsr != datsp2).todense())
assert_array_equal(dat != dat2, (datlil != datsp2).todense())
# sparse/dense
assert_array_equal(dat != datsp2, datsp2 != dat)
# sparse/scalar
assert_array_equal(dat != 0, (datsp != 0).todense())
assert_array_equal(dat != 1, (datsp != 1).todense())
assert_array_equal(0 != dat, (0 != datsp).todense())
assert_array_equal(1 != dat, (1 != datsp).todense())
assert_array_equal(dat != np.nan, (datsp != np.nan).todense())
msg = "Bool comparisons only implemented for BSR, CSC, and CSR."
fails = not isinstance(self, (TestBSR, TestCSC, TestCSR))
for dtype in self.checked_dtypes:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
yield dec.skipif(fails, msg)(check), dtype
def test_lt(self):
def check(dtype):
# data
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datcomplex = dat.astype(np.complex)
datcomplex[:,0] = 1 + 1j
datspcomplex = self.spmatrix(datcomplex)
datbsr = bsr_matrix(dat)
datcsc = csc_matrix(dat)
datcsr = csr_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat < dat2, (datsp < datsp2).todense())
assert_array_equal(datcomplex < dat2, (datspcomplex < datsp2).todense())
# mix sparse types
assert_array_equal(dat < dat2, (datbsr < datsp2).todense())
assert_array_equal(dat < dat2, (datcsc < datsp2).todense())
assert_array_equal(dat < dat2, (datcsr < datsp2).todense())
assert_array_equal(dat < dat2, (datlil < datsp2).todense())
assert_array_equal(dat2 < dat, (datsp2 < datbsr).todense())
assert_array_equal(dat2 < dat, (datsp2 < datcsc).todense())
assert_array_equal(dat2 < dat, (datsp2 < datcsr).todense())
assert_array_equal(dat2 < dat, (datsp2 < datlil).todense())
# sparse/dense
assert_array_equal(dat < dat2, datsp < dat2)
assert_array_equal(datcomplex < dat2, datspcomplex < dat2)
# sparse/scalar
assert_array_equal((datsp < 2).todense(), dat < 2)
assert_array_equal((datsp < 1).todense(), dat < 1)
assert_array_equal((datsp < 0).todense(), dat < 0)
assert_array_equal((datsp < -1).todense(), dat < -1)
assert_array_equal((datsp < -2).todense(), dat < -2)
with np.errstate(invalid='ignore'):
assert_array_equal((datsp < np.nan).todense(), dat < np.nan)
assert_array_equal((2 < datsp).todense(), 2 < dat)
assert_array_equal((1 < datsp).todense(), 1 < dat)
assert_array_equal((0 < datsp).todense(), 0 < dat)
assert_array_equal((-1 < datsp).todense(), -1 < dat)
assert_array_equal((-2 < datsp).todense(), -2 < dat)
if NumpyVersion(np.__version__) >= '1.8.0':
# data
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
# dense rhs
assert_array_equal(dat < datsp2, datsp < dat2)
msg = "Bool comparisons only implemented for BSR, CSC, and CSR."
fails = not isinstance(self, (TestBSR, TestCSC, TestCSR))
for dtype in self.checked_dtypes:
with warnings.catch_warnings():
with np.errstate(invalid='ignore'):
warnings.simplefilter("ignore", category=np.ComplexWarning)
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
yield dec.skipif(fails, msg)(check), dtype
def test_gt(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datcomplex = dat.astype(np.complex)
datcomplex[:,0] = 1 + 1j
datspcomplex = self.spmatrix(datcomplex)
datbsr = bsr_matrix(dat)
datcsc = csc_matrix(dat)
datcsr = csr_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat > dat2, (datsp > datsp2).todense())
assert_array_equal(datcomplex > dat2, (datspcomplex > datsp2).todense())
# mix sparse types
assert_array_equal(dat > dat2, (datbsr > datsp2).todense())
assert_array_equal(dat > dat2, (datcsc > datsp2).todense())
assert_array_equal(dat > dat2, (datcsr > datsp2).todense())
assert_array_equal(dat > dat2, (datlil > datsp2).todense())
assert_array_equal(dat2 > dat, (datsp2 > datbsr).todense())
assert_array_equal(dat2 > dat, (datsp2 > datcsc).todense())
assert_array_equal(dat2 > dat, (datsp2 > datcsr).todense())
assert_array_equal(dat2 > dat, (datsp2 > datlil).todense())
# sparse/dense
assert_array_equal(dat > dat2, datsp > dat2)
assert_array_equal(datcomplex > dat2, datspcomplex > dat2)
# sparse/scalar
assert_array_equal((datsp > 2).todense(), dat > 2)
assert_array_equal((datsp > 1).todense(), dat > 1)
assert_array_equal((datsp > 0).todense(), dat > 0)
assert_array_equal((datsp > -1).todense(), dat > -1)
assert_array_equal((datsp > -2).todense(), dat > -2)
with np.errstate(invalid='ignore'):
assert_array_equal((datsp > np.nan).todense(), dat > np.nan)
assert_array_equal((2 > datsp).todense(), 2 > dat)
assert_array_equal((1 > datsp).todense(), 1 > dat)
assert_array_equal((0 > datsp).todense(), 0 > dat)
assert_array_equal((-1 > datsp).todense(), -1 > dat)
assert_array_equal((-2 > datsp).todense(), -2 > dat)
if NumpyVersion(np.__version__) >= '1.8.0':
# data
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
# dense rhs
assert_array_equal(dat > datsp2, datsp > dat2)
msg = "Bool comparisons only implemented for BSR, CSC, and CSR."
fails = not isinstance(self, (TestBSR, TestCSC, TestCSR))
for dtype in self.checked_dtypes:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
yield dec.skipif(fails, msg)(check), dtype
def test_le(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datcomplex = dat.astype(np.complex)
datcomplex[:,0] = 1 + 1j
datspcomplex = self.spmatrix(datcomplex)
datbsr = bsr_matrix(dat)
datcsc = csc_matrix(dat)
datcsr = csr_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat <= dat2, (datsp <= datsp2).todense())
assert_array_equal(datcomplex <= dat2, (datspcomplex <= datsp2).todense())
# mix sparse types
assert_array_equal((datbsr <= datsp2).todense(), dat <= dat2)
assert_array_equal((datcsc <= datsp2).todense(), dat <= dat2)
assert_array_equal((datcsr <= datsp2).todense(), dat <= dat2)
assert_array_equal((datlil <= datsp2).todense(), dat <= dat2)
assert_array_equal((datsp2 <= datbsr).todense(), dat2 <= dat)
assert_array_equal((datsp2 <= datcsc).todense(), dat2 <= dat)
assert_array_equal((datsp2 <= datcsr).todense(), dat2 <= dat)
assert_array_equal((datsp2 <= datlil).todense(), dat2 <= dat)
# sparse/dense
assert_array_equal(datsp <= dat2, dat <= dat2)
assert_array_equal(datspcomplex <= dat2, datcomplex <= dat2)
# sparse/scalar
assert_array_equal((datsp <= 2).todense(), dat <= 2)
assert_array_equal((datsp <= 1).todense(), dat <= 1)
assert_array_equal((datsp <= -1).todense(), dat <= -1)
assert_array_equal((datsp <= -2).todense(), dat <= -2)
assert_array_equal((2 <= datsp).todense(), 2 <= dat)
assert_array_equal((1 <= datsp).todense(), 1 <= dat)
assert_array_equal((-1 <= datsp).todense(), -1 <= dat)
assert_array_equal((-2 <= datsp).todense(), -2 <= dat)
if NumpyVersion(np.__version__) >= '1.8.0':
# data
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
# dense rhs
assert_array_equal(dat <= datsp2, datsp <= dat2)
msg = "Bool comparisons only implemented for BSR, CSC, and CSR."
fails = not isinstance(self, (TestBSR, TestCSC, TestCSR))
for dtype in self.checked_dtypes:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
yield dec.skipif(fails, msg)(check), dtype
def test_ge(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datcomplex = dat.astype(np.complex)
datcomplex[:,0] = 1 + 1j
datspcomplex = self.spmatrix(datcomplex)
datbsr = bsr_matrix(dat)
datcsc = csc_matrix(dat)
datcsr = csr_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat >= dat2, (datsp >= datsp2).todense())
assert_array_equal(datcomplex >= dat2, (datspcomplex >= datsp2).todense())
# mix sparse types
# mix sparse types
assert_array_equal((datbsr >= datsp2).todense(), dat >= dat2)
assert_array_equal((datcsc >= datsp2).todense(), dat >= dat2)
assert_array_equal((datcsr >= datsp2).todense(), dat >= dat2)
assert_array_equal((datlil >= datsp2).todense(), dat >= dat2)
assert_array_equal((datsp2 >= datbsr).todense(), dat2 >= dat)
assert_array_equal((datsp2 >= datcsc).todense(), dat2 >= dat)
assert_array_equal((datsp2 >= datcsr).todense(), dat2 >= dat)
assert_array_equal((datsp2 >= datlil).todense(), dat2 >= dat)
# sparse/dense
assert_array_equal(datsp >= dat2, dat >= dat2)
assert_array_equal(datspcomplex >= dat2, datcomplex >= dat2)
# sparse/scalar
assert_array_equal((datsp >= 2).todense(), dat >= 2)
assert_array_equal((datsp >= 1).todense(), dat >= 1)
assert_array_equal((datsp >= -1).todense(), dat >= -1)
assert_array_equal((datsp >= -2).todense(), dat >= -2)
assert_array_equal((2 >= datsp).todense(), 2 >= dat)
assert_array_equal((1 >= datsp).todense(), 1 >= dat)
assert_array_equal((-1 >= datsp).todense(), -1 >= dat)
assert_array_equal((-2 >= datsp).todense(), -2 >= dat)
if NumpyVersion(np.__version__) >= '1.8.0':
# dense data
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
# dense rhs
assert_array_equal(dat >= datsp2, datsp >= dat2)
msg = "Bool comparisons only implemented for BSR, CSC, and CSR."
fails = not isinstance(self, (TestBSR, TestCSC, TestCSR))
for dtype in self.checked_dtypes:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
yield dec.skipif(fails, msg)(check), dtype
def test_empty(self):
# create empty matrices
assert_equal(self.spmatrix((3,3)).todense(), np.zeros((3,3)))
assert_equal(self.spmatrix((3,3)).nnz, 0)
def test_invalid_shapes(self):
assert_raises(ValueError, self.spmatrix, (-1,3))
assert_raises(ValueError, self.spmatrix, (3,-1))
assert_raises(ValueError, self.spmatrix, (-1,-1))
def test_repr(self):
repr(self.datsp)
def test_str(self):
str(self.datsp)
def test_empty_arithmetic(self):
# Test manipulating empty matrices. Fails in SciPy SVN <= r1768
shape = (5, 5)
for mytype in [np.dtype('int32'), np.dtype('float32'),
np.dtype('float64'), np.dtype('complex64'),
np.dtype('complex128')]:
a = self.spmatrix(shape, dtype=mytype)
b = a + a
c = 2 * a
d = a * a.tocsc()
e = a * a.tocsr()
f = a * a.tocoo()
for m in [a,b,c,d,e,f]:
assert_equal(m.A, a.A*a.A)
# These fail in all revisions <= r1768:
assert_equal(m.dtype,mytype)
assert_equal(m.A.dtype,mytype)
def test_abs(self):
A = matrix([[-1, 0, 17],[0, -5, 0],[1, -4, 0],[0,0,0]],'d')
assert_equal(abs(A),abs(self.spmatrix(A)).todense())
def test_elementwise_power(self):
A = matrix([[-4, -3, -2],[-1, 0, 1],[2, 3, 4]], 'd')
assert_equal(np.power(A, 2), self.spmatrix(A).power(2).todense())
#it's element-wise power function, input has to be a scalar
assert_raises(NotImplementedError, self.spmatrix(A).power, A)
def test_neg(self):
A = matrix([[-1, 0, 17],[0, -5, 0],[1, -4, 0],[0,0,0]],'d')
assert_equal(-A,(-self.spmatrix(A)).todense())
def test_real(self):
D = matrix([[1 + 3j, 2 - 4j]])
A = self.spmatrix(D)
assert_equal(A.real.todense(),D.real)
def test_imag(self):
D = matrix([[1 + 3j, 2 - 4j]])
A = self.spmatrix(D)
assert_equal(A.imag.todense(),D.imag)
def test_diagonal(self):
# Does the matrix's .diagonal() method work?
mats = []
mats.append([[1,0,2]])
mats.append([[1],[0],[2]])
mats.append([[0,1],[0,2],[0,3]])
mats.append([[0,0,1],[0,0,2],[0,3,0]])
mats.append(kron(mats[0],[[1,2]]))
mats.append(kron(mats[0],[[1],[2]]))
mats.append(kron(mats[1],[[1,2],[3,4]]))
mats.append(kron(mats[2],[[1,2],[3,4]]))
mats.append(kron(mats[3],[[1,2],[3,4]]))
mats.append(kron(mats[3],[[1,2,3,4]]))
for m in mats:
assert_equal(self.spmatrix(m).diagonal(),diag(m))
@dec.slow
def test_setdiag(self):
def dense_setdiag(a, v, k):
v = np.asarray(v)
if k >= 0:
n = min(a.shape[0], a.shape[1] - k)
if v.ndim != 0:
n = min(n, len(v))
v = v[:n]
i = np.arange(0, n)
j = np.arange(k, k + n)
a[i,j] = v
elif k < 0:
dense_setdiag(a.T, v, -k)
return
def check_setdiag(a, b, k):
# Check setting diagonal using a scalar, a vector of
# correct length, and too short or too long vectors
for r in [-1, len(np.diag(a, k)), 2, 30]:
if r < 0:
v = int(np.random.randint(1, 20, size=1))
else:
v = np.random.randint(1, 20, size=r)
dense_setdiag(a, v, k)
b.setdiag(v, k)
# check that dense_setdiag worked
d = np.diag(a, k)
if np.asarray(v).ndim == 0:
assert_array_equal(d, v, err_msg=msg + " %d" % (r,))
else:
n = min(len(d), len(v))
assert_array_equal(d[:n], v[:n], err_msg=msg + " %d" % (r,))
# check that sparse setdiag worked
assert_array_equal(b.A, a, err_msg=msg + " %d" % (r,))
# comprehensive test
np.random.seed(1234)
for dtype in [np.int8, np.float64]:
for m in [0, 1, 3, 10]:
for n in [0, 1, 3, 10]:
for k in range(-m+1, n-1):
msg = repr((dtype, m, n, k))
a = np.zeros((m, n), dtype=dtype)
b = self.spmatrix((m, n), dtype=dtype)
check_setdiag(a, b, k)
# check overwriting etc
for k2 in np.random.randint(-m+1, n-1, size=12):
check_setdiag(a, b, k2)
# simpler test case
m = self.spmatrix(np.eye(3))
values = [3, 2, 1]
assert_raises(ValueError, m.setdiag, values, k=4)
m.setdiag(values)
assert_array_equal(m.diagonal(), values)
m.setdiag(values, k=1)
assert_array_equal(m.A, np.array([[3, 3, 0],
[0, 2, 2],
[0, 0, 1]]))
m.setdiag(values, k=-2)
assert_array_equal(m.A, np.array([[3, 3, 0],
[0, 2, 2],
[3, 0, 1]]))
m.setdiag((9,), k=2)
assert_array_equal(m.A[0,2], 9)
m.setdiag((9,), k=-2)
assert_array_equal(m.A[2,0], 9)
def test_nonzero(self):
A = array([[1, 0, 1],[0, 1, 1],[0, 0, 1]])
Asp = self.spmatrix(A)
A_nz = set([tuple(ij) for ij in transpose(A.nonzero())])
Asp_nz = set([tuple(ij) for ij in transpose(Asp.nonzero())])
assert_equal(A_nz, Asp_nz)
def test_getrow(self):
assert_array_equal(self.datsp.getrow(1).todense(), self.dat[1,:])
assert_array_equal(self.datsp.getrow(-1).todense(), self.dat[-1,:])
def test_getcol(self):
assert_array_equal(self.datsp.getcol(1).todense(), self.dat[:,1])
assert_array_equal(self.datsp.getcol(-1).todense(), self.dat[:,-1])
def test_sum(self):
np.random.seed(1234)
dat_1 = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
dat_2 = np.random.rand(40, 40)
dat_3 = np.array([[]])
dat_4 = np.zeros((40, 40))
dat_5 = sparse.rand(40, 40, density=1e-2).A
matrices = [dat_1, dat_2, dat_3, dat_4, dat_5]
def check(dtype, j):
dat = np.matrix(matrices[j], dtype=dtype)
datsp = self.spmatrix(dat, dtype=dtype)
assert_array_almost_equal(dat.sum(), datsp.sum())
assert_equal(dat.sum().dtype, datsp.sum().dtype)
assert_array_almost_equal(dat.sum(axis=None), datsp.sum(axis=None))
assert_equal(dat.sum(axis=None).dtype, datsp.sum(axis=None).dtype)
assert_array_almost_equal(dat.sum(axis=0), datsp.sum(axis=0))
assert_equal(dat.sum(axis=0).dtype, datsp.sum(axis=0).dtype)
assert_array_almost_equal(dat.sum(axis=1), datsp.sum(axis=1))
assert_equal(dat.sum(axis=1).dtype, datsp.sum(axis=1).dtype)
if NumpyVersion(np.__version__) >= '1.7.0':
# np.matrix.sum with negative axis arg doesn't work for < 1.7
assert_array_almost_equal(dat.sum(axis=-2), datsp.sum(axis=-2))
assert_equal(dat.sum(axis=-2).dtype, datsp.sum(axis=-2).dtype)
assert_array_almost_equal(dat.sum(axis=-1), datsp.sum(axis=-1))
assert_equal(dat.sum(axis=-1).dtype, datsp.sum(axis=-1).dtype)
for dtype in self.checked_dtypes:
for j in range(len(matrices)):
yield check, dtype, j
def test_mean(self):
def check(dtype):
dat = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]], dtype=dtype)
datsp = self.spmatrix(dat, dtype=dtype)
assert_array_almost_equal(dat.mean(), datsp.mean())
assert_equal(dat.mean().dtype, datsp.mean().dtype)
assert_array_almost_equal(dat.mean(axis=None), datsp.mean(axis=None))
assert_equal(dat.mean(axis=None).dtype, datsp.mean(axis=None).dtype)
assert_array_almost_equal(dat.mean(axis=0), datsp.mean(axis=0))
assert_equal(dat.mean(axis=0).dtype, datsp.mean(axis=0).dtype)
assert_array_almost_equal(dat.mean(axis=1), datsp.mean(axis=1))
assert_equal(dat.mean(axis=1).dtype, datsp.mean(axis=1).dtype)
if NumpyVersion(np.__version__) >= '1.7.0':
# np.matrix.sum with negative axis arg doesn't work for < 1.7
assert_array_almost_equal(dat.mean(axis=-2), datsp.mean(axis=-2))
assert_equal(dat.mean(axis=-2).dtype, datsp.mean(axis=-2).dtype)
assert_array_almost_equal(dat.mean(axis=-1), datsp.mean(axis=-1))
assert_equal(dat.mean(axis=-1).dtype, datsp.mean(axis=-1).dtype)
for dtype in self.checked_dtypes:
yield check, dtype
def test_expm(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float)
sM = self.spmatrix(M, shape=(3,3), dtype=float)
Mexp = scipy.linalg.expm(M)
sMexp = expm(sM).todense()
assert_array_almost_equal((sMexp - Mexp), zeros((3, 3)))
N = array([[3., 0., 1.], [0., 2., 0.], [0., 0., 0.]])
sN = self.spmatrix(N, shape=(3,3), dtype=float)
Nexp = scipy.linalg.expm(N)
sNexp = expm(sN).todense()
assert_array_almost_equal((sNexp - Nexp), zeros((3, 3)))
def test_inv(self):
def check(dtype):
M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], dtype)
sM = self.spmatrix(M, shape=(3,3), dtype=dtype)
sMinv = inv(sM)
assert_array_almost_equal(sMinv.dot(sM).todense(), np.eye(3))
for dtype in [float]:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
yield check, dtype
def test_from_array(self):
A = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
A = array([[1.0 + 3j, 0, 0],
[0, 2.0 + 5, 0],
[0, 0, 0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16'))
def test_from_matrix(self):
A = matrix([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
assert_array_equal(self.spmatrix(A).todense(), A)
A = matrix([[1.0 + 3j, 0, 0],
[0, 2.0 + 5, 0],
[0, 0, 0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16'))
def test_from_list(self):
A = [[1,0,0],[2,3,4],[0,5,0],[0,0,0]]
assert_array_equal(self.spmatrix(A).todense(), A)
A = [[1.0 + 3j, 0, 0],
[0, 2.0 + 5, 0],
[0, 0, 0]]
assert_array_equal(self.spmatrix(A).toarray(), array(A))
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
assert_array_equal(self.spmatrix(A, dtype='int16').todense(), array(A).astype('int16'))
def test_from_sparse(self):
D = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
S = csr_matrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
S = self.spmatrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
D = array([[1.0 + 3j, 0, 0],
[0, 2.0 + 5, 0],
[0, 0, 0]])
S = csr_matrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16'))
S = self.spmatrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16'))
# def test_array(self):
# """test array(A) where A is in sparse format"""
# assert_equal( array(self.datsp), self.dat )
def test_todense(self):
# Check C-contiguous (default).
chk = self.datsp.todense()
assert_array_equal(chk, self.dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check C-contiguous (with arg).
chk = self.datsp.todense(order='C')
assert_array_equal(chk, self.dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check F-contiguous (with arg).
chk = self.datsp.todense(order='F')
assert_array_equal(chk, self.dat)
assert_(not chk.flags.c_contiguous)
assert_(chk.flags.f_contiguous)
# Check with out argument (array).
out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype)
chk = self.datsp.todense(out=out)
assert_array_equal(self.dat, out)
assert_array_equal(self.dat, chk)
assert_(chk.base is out)
# Check with out array (matrix).
out = np.asmatrix(np.zeros(self.datsp.shape, dtype=self.datsp.dtype))
chk = self.datsp.todense(out=out)
assert_array_equal(self.dat, out)
assert_array_equal(self.dat, chk)
assert_(chk is out)
a = matrix([1.,2.,3.])
dense_dot_dense = a * self.dat
check = a * self.datsp.todense()
assert_array_equal(dense_dot_dense, check)
b = matrix([1.,2.,3.,4.]).T
dense_dot_dense = self.dat * b
check2 = self.datsp.todense() * b
assert_array_equal(dense_dot_dense, check2)
# Check bool data works.
spbool = self.spmatrix(self.dat, dtype=bool)
matbool = self.dat.astype(bool)
assert_array_equal(spbool.todense(), matbool)
def test_toarray(self):
# Check C-contiguous (default).
dat = asarray(self.dat)
chk = self.datsp.toarray()
assert_array_equal(chk, dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check C-contiguous (with arg).
chk = self.datsp.toarray(order='C')
assert_array_equal(chk, dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check F-contiguous (with arg).
chk = self.datsp.toarray(order='F')
assert_array_equal(chk, dat)
assert_(not chk.flags.c_contiguous)
assert_(chk.flags.f_contiguous)
# Check with output arg.
out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype)
self.datsp.toarray(out=out)
assert_array_equal(chk, dat)
# Check that things are fine when we don't initialize with zeros.
out[...] = 1.
self.datsp.toarray(out=out)
assert_array_equal(chk, dat)
a = array([1.,2.,3.])
dense_dot_dense = dot(a, dat)
check = dot(a, self.datsp.toarray())
assert_array_equal(dense_dot_dense, check)
b = array([1.,2.,3.,4.])
dense_dot_dense = dot(dat, b)
check2 = dot(self.datsp.toarray(), b)
assert_array_equal(dense_dot_dense, check2)
# Check bool data works.
spbool = self.spmatrix(self.dat, dtype=bool)
arrbool = dat.astype(bool)
assert_array_equal(spbool.toarray(), arrbool)
def test_astype(self):
D = array([[2.0 + 3j, 0, 0],
[0, 4.0 + 5j, 0],
[0, 0, 0]])
S = self.spmatrix(D)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
for x in supported_dtypes:
assert_equal(S.astype(x).dtype, D.astype(x).dtype) # correct type
assert_equal(S.astype(x).toarray(), D.astype(x)) # correct values
assert_equal(S.astype(x).format, S.format) # format preserved
def test_asfptype(self):
A = self.spmatrix(arange(6,dtype='int32').reshape(2,3))
assert_equal(A.dtype, np.dtype('int32'))
assert_equal(A.asfptype().dtype, np.dtype('float64'))
assert_equal(A.asfptype().format, A.format)
assert_equal(A.astype('int16').asfptype().dtype, np.dtype('float32'))
assert_equal(A.astype('complex128').asfptype().dtype, np.dtype('complex128'))
B = A.asfptype()
C = B.asfptype()
assert_(B is C)
def test_mul_scalar(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
assert_array_equal(dat*2,(datsp*2).todense())
assert_array_equal(dat*17.3,(datsp*17.3).todense())
for dtype in self.checked_dtypes:
yield check, dtype
def test_rmul_scalar(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
assert_array_equal(2*dat,(2*datsp).todense())
assert_array_equal(17.3*dat,(17.3*datsp).todense())
for dtype in self.checked_dtypes:
yield check, dtype
def test_add(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
a = dat.copy()
a[0,2] = 2.0
b = datsp
c = b + a
assert_array_equal(c, b.todense() + a)
c = b + b.tocsr()
assert_array_equal(c.todense(),
b.todense() + b.todense())
for dtype in self.checked_dtypes:
yield check, dtype
def test_radd(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
a = dat.copy()
a[0,2] = 2.0
b = datsp
c = a + b
assert_array_equal(c, a + b.todense())
for dtype in self.checked_dtypes:
yield check, dtype
def test_sub(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
assert_array_equal((datsp - datsp).todense(),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
assert_array_equal((datsp - A).todense(),dat - A.todense())
assert_array_equal((A - datsp).todense(),A.todense() - dat)
for dtype in self.checked_dtypes:
yield check, dtype
def test_rsub(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
assert_array_equal((dat - datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
assert_array_equal((datsp - dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
assert_array_equal((dat - A),dat - A.todense())
assert_array_equal((A - dat),A.todense() - dat)
assert_array_equal(A.todense() - datsp,A.todense() - dat)
assert_array_equal(datsp - A.todense(),dat - A.todense())
for dtype in self.checked_dtypes:
if (dtype == np.dtype('bool')) and (
NumpyVersion(np.__version__) >= '1.9.0.dev'):
# boolean array subtraction deprecated in 1.9.0
continue
yield check, dtype
def test_add0(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
# Adding 0 to a sparse matrix
assert_array_equal((datsp + 0).todense(), dat)
# use sum (which takes 0 as a starting value)
sumS = sum([k * datsp for k in range(1, 3)])
sumD = sum([k * dat for k in range(1, 3)])
assert_almost_equal(sumS.todense(), sumD)
for dtype in self.checked_dtypes:
yield check, dtype
def test_elementwise_multiply(self):
# real/real
A = array([[4,0,9],[2,-3,5]])
B = array([[0,7,0],[0,-4,0]])
Asp = self.spmatrix(A)
Bsp = self.spmatrix(B)
assert_almost_equal(Asp.multiply(Bsp).todense(), A*B) # sparse/sparse
assert_almost_equal(Asp.multiply(B), A*B) # sparse/dense
# complex/complex
C = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
D = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
Csp = self.spmatrix(C)
Dsp = self.spmatrix(D)
assert_almost_equal(Csp.multiply(Dsp).todense(), C*D) # sparse/sparse
assert_almost_equal(Csp.multiply(D), C*D) # sparse/dense
# real/complex
assert_almost_equal(Asp.multiply(Dsp).todense(), A*D) # sparse/sparse
assert_almost_equal(Asp.multiply(D), A*D) # sparse/dense
def test_elementwise_multiply_broadcast(self):
A = array([4])
B = array([[-9]])
C = array([1,-1,0])
D = array([[7,9,-9]])
E = array([[3],[2],[1]])
F = array([[8,6,3],[-4,3,2],[6,6,6]])
G = [1, 2, 3]
H = np.ones((3, 4))
J = H.T
# Rank 1 arrays can't be cast as spmatrices (A and C) so leave
# them out.
Bsp = self.spmatrix(B)
Dsp = self.spmatrix(D)
Esp = self.spmatrix(E)
Fsp = self.spmatrix(F)
Hsp = self.spmatrix(H)
Hspp = self.spmatrix(H[0,None])
Jsp = self.spmatrix(J)
Jspp = self.spmatrix(J[:,0,None])
matrices = [A, B, C, D, E, F, G, H, J]
spmatrices = [Bsp, Dsp, Esp, Fsp, Hsp, Hspp, Jsp, Jspp]
# sparse/sparse
for i in spmatrices:
for j in spmatrices:
try:
dense_mult = np.multiply(i.todense(), j.todense())
except ValueError:
assert_raises(ValueError, i.multiply, j)
continue
sp_mult = i.multiply(j)
if isspmatrix(sp_mult):
assert_almost_equal(sp_mult.todense(), dense_mult)
else:
assert_almost_equal(sp_mult, dense_mult)
# sparse/dense
for i in spmatrices:
for j in matrices:
try:
dense_mult = np.multiply(i.todense(), j)
except ValueError:
assert_raises(ValueError, i.multiply, j)
continue
sp_mult = i.multiply(j)
if isspmatrix(sp_mult):
assert_almost_equal(sp_mult.todense(), dense_mult)
else:
assert_almost_equal(sp_mult, dense_mult)
def test_elementwise_divide(self):
expected = [[1,np.nan,np.nan,1],[1,np.nan,1,np.nan],[np.nan,1,np.nan,np.nan]]
assert_array_equal(todense(self.datsp / self.datsp),expected)
denom = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
res = matrix([[1,np.nan,np.nan,0.5],[-3,np.nan,inf,np.nan],[np.nan,0.25,np.nan,np.nan]],'d')
assert_array_equal(todense(self.datsp / denom),res)
# complex
A = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
B = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
Asp = self.spmatrix(A)
Bsp = self.spmatrix(B)
assert_almost_equal(todense(Asp / Bsp), A/B)
def test_pow(self):
A = matrix([[1,0,2,0],[0,3,4,0],[0,5,0,0],[0,6,7,8]])
B = self.spmatrix(A)
for exponent in [0,1,2,3]:
assert_array_equal((B**exponent).todense(),A**exponent)
# invalid exponents
for exponent in [-1, 2.2, 1 + 3j]:
assert_raises(Exception, B.__pow__, exponent)
# nonsquare matrix
B = self.spmatrix(A[:3,:])
assert_raises(Exception, B.__pow__, 1)
def test_rmatvec(self):
M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
assert_array_almost_equal([1,2,3,4]*M, dot([1,2,3,4], M.toarray()))
row = matrix([[1,2,3,4]])
assert_array_almost_equal(row*M, row*M.todense())
def test_small_multiplication(self):
# test that A*x works for x with shape () (1,) and (1,1)
A = self.spmatrix([[1],[2],[3]])
assert_(isspmatrix(A * array(1)))
assert_equal((A * array(1)).todense(), [[1],[2],[3]])
assert_equal(A * array([1]), array([1,2,3]))
assert_equal(A * array([[1]]), array([[1],[2],[3]]))
def test_binop_custom_type(self):
# Non-regression test: previously, binary operations would raise
# NotImplementedError instead of returning NotImplemented
# (https://docs.python.org/library/constants.html#NotImplemented)
# so overloading Custom + matrix etc. didn't work.
A = self.spmatrix([[1], [2], [3]])
B = BinopTester()
assert_equal(A + B, "matrix on the left")
assert_equal(A - B, "matrix on the left")
assert_equal(A * B, "matrix on the left")
assert_equal(B + A, "matrix on the right")
assert_equal(B - A, "matrix on the right")
assert_equal(B * A, "matrix on the right")
def test_matvec(self):
M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
col = matrix([1,2,3]).T
assert_array_almost_equal(M * col, M.todense() * col)
# check result dimensions (ticket #514)
assert_equal((M * array([1,2,3])).shape,(4,))
assert_equal((M * array([[1],[2],[3]])).shape,(4,1))
assert_equal((M * matrix([[1],[2],[3]])).shape,(4,1))
# check result type
assert_(isinstance(M * array([1,2,3]), ndarray))
assert_(isinstance(M * matrix([1,2,3]).T, matrix))
# ensure exception is raised for improper dimensions
bad_vecs = [array([1,2]), array([1,2,3,4]), array([[1],[2]]),
matrix([1,2,3]), matrix([[1],[2]])]
for x in bad_vecs:
assert_raises(ValueError, M.__mul__, x)
# Should this be supported or not?!
# flat = array([1,2,3])
# assert_array_almost_equal(M*flat, M.todense()*flat)
# Currently numpy dense matrices promote the result to a 1x3 matrix,
# whereas sparse matrices leave the result as a rank-1 array. Which
# is preferable?
# Note: the following command does not work. Both NumPy matrices
# and spmatrices should raise exceptions!
# assert_array_almost_equal(M*[1,2,3], M.todense()*[1,2,3])
# The current relationship between sparse matrix products and array
# products is as follows:
assert_array_almost_equal(M*array([1,2,3]), dot(M.A,[1,2,3]))
assert_array_almost_equal(M*[[1],[2],[3]], asmatrix(dot(M.A,[1,2,3])).T)
# Note that the result of M * x is dense if x has a singleton dimension.
# Currently M.matvec(asarray(col)) is rank-1, whereas M.matvec(col)
# is rank-2. Is this desirable?
def test_matmat_sparse(self):
a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
a2 = array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
b = matrix([[0,1],[1,0],[0,2]],'d')
asp = self.spmatrix(a)
bsp = self.spmatrix(b)
assert_array_almost_equal((asp*bsp).todense(), a*b)
assert_array_almost_equal(asp*b, a*b)
assert_array_almost_equal(a*bsp, a*b)
assert_array_almost_equal(a2*bsp, a*b)
# Now try performing cross-type multplication:
csp = bsp.tocsc()
c = b
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal(asp*c, a*c)
assert_array_almost_equal(a*csp, a*c)
assert_array_almost_equal(a2*csp, a*c)
csp = bsp.tocsr()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal(asp*c, a*c)
assert_array_almost_equal(a*csp, a*c)
assert_array_almost_equal(a2*csp, a*c)
csp = bsp.tocoo()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal(asp*c, a*c)
assert_array_almost_equal(a*csp, a*c)
assert_array_almost_equal(a2*csp, a*c)
# Test provided by Andy Fraser, 2006-03-26
L = 30
frac = .3
random.seed(0) # make runs repeatable
A = zeros((L,2))
for i in xrange(L):
for j in xrange(2):
r = random.random()
if r < frac:
A[i,j] = r/frac
A = self.spmatrix(A)
B = A*A.T
assert_array_almost_equal(B.todense(), A.todense() * A.T.todense())
assert_array_almost_equal(B.todense(), A.todense() * A.todense().T)
# check dimension mismatch 2x2 times 3x2
A = self.spmatrix([[1,2],[3,4]])
B = self.spmatrix([[1,2],[3,4],[5,6]])
assert_raises(ValueError, A.__mul__, B)
def test_matmat_dense(self):
a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
asp = self.spmatrix(a)
# check both array and matrix types
bs = [array([[1,2],[3,4],[5,6]]), matrix([[1,2],[3,4],[5,6]])]
for b in bs:
result = asp*b
assert_(isinstance(result, type(b)))
assert_equal(result.shape, (4,2))
assert_equal(result, dot(a,b))
def test_sparse_format_conversions(self):
A = sparse.kron([[1,0,2],[0,3,4],[5,0,0]], [[1,2],[0,3]])
D = A.todense()
A = self.spmatrix(A)
for format in ['bsr','coo','csc','csr','dia','dok','lil']:
a = A.asformat(format)
assert_equal(a.format,format)
assert_array_equal(a.todense(), D)
b = self.spmatrix(D+3j).asformat(format)
assert_equal(b.format,format)
assert_array_equal(b.todense(), D+3j)
c = eval(format + '_matrix')(A)
assert_equal(c.format,format)
assert_array_equal(c.todense(), D)
def test_tobsr(self):
x = array([[1,0,2,0],[0,0,0,0],[0,0,4,5]])
y = array([[0,1,2],[3,0,5]])
A = kron(x,y)
Asp = self.spmatrix(A)
for format in ['bsr']:
fn = getattr(Asp, 'to' + format)
for X in [1, 2, 3, 6]:
for Y in [1, 2, 3, 4, 6, 12]:
assert_equal(fn(blocksize=(X,Y)).todense(), A)
def test_transpose(self):
dat_1 = self.dat
dat_2 = np.array([[]])
matrices = [dat_1, dat_2]
def check(dtype, j):
dat = np.matrix(matrices[j], dtype=dtype)
datsp = self.spmatrix(dat)
a = datsp.transpose()
b = dat.transpose()
assert_array_equal(a.todense(), b)
assert_array_equal(a.transpose().todense(), dat)
assert_equal(a.dtype, b.dtype)
assert_array_equal(self.spmatrix((3,4)).T.todense(), zeros((4,3)))
for dtype in self.checked_dtypes:
for j in range(len(matrices)):
yield check, dtype, j
def test_add_dense(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
# adding a dense matrix to a sparse matrix
sum1 = dat + datsp
assert_array_equal(sum1, dat + dat)
sum2 = datsp + dat
assert_array_equal(sum2, dat + dat)
for dtype in self.checked_dtypes:
yield check, dtype
def test_sub_dense(self):
# subtracting a dense matrix to/from a sparse matrix
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
# Behavior is different for bool.
if dat.dtype == bool:
sum1 = dat - datsp
assert_array_equal(sum1, dat - dat)
sum2 = datsp - dat
assert_array_equal(sum2, dat - dat)
else:
# Manually add to avoid upcasting from scalar
# multiplication.
sum1 = (dat + dat + dat) - datsp
assert_array_equal(sum1, dat + dat)
sum2 = (datsp + datsp + datsp) - dat
assert_array_equal(sum2, dat + dat)
for dtype in self.checked_dtypes:
if (dtype == np.dtype('bool')) and (
NumpyVersion(np.__version__) >= '1.9.0.dev'):
# boolean array subtraction deprecated in 1.9.0
continue
yield check, dtype
def test_maximum_minimum(self):
A_dense = np.array([[1, 0, 3], [0, 4, 5], [0, 0, 0]])
B_dense = np.array([[1, 1, 2], [0, 3, 6], [1, -1, 0]])
A_dense_cpx = np.array([[1, 0, 3], [0, 4+2j, 5], [0, 1j, -1j]])
def check(dtype, dtype2, btype):
if np.issubdtype(dtype, np.complexfloating):
A = self.spmatrix(A_dense_cpx.astype(dtype))
else:
A = self.spmatrix(A_dense.astype(dtype))
if btype == 'scalar':
B = dtype2.type(1)
elif btype == 'scalar2':
B = dtype2.type(-1)
elif btype == 'dense':
B = B_dense.astype(dtype2)
elif btype == 'sparse':
B = self.spmatrix(B_dense.astype(dtype2))
else:
raise ValueError()
max_s = A.maximum(B)
max_d = np.maximum(todense(A), todense(B))
assert_array_equal(todense(max_s), max_d)
assert_equal(max_s.dtype, max_d.dtype)
min_s = A.minimum(B)
min_d = np.minimum(todense(A), todense(B))
assert_array_equal(todense(min_s), min_d)
assert_equal(min_s.dtype, min_d.dtype)
for dtype in self.checked_dtypes:
for dtype2 in [np.int8, np.float_, np.complex_]:
for btype in ['scalar', 'scalar2', 'dense', 'sparse']:
yield check, np.dtype(dtype), np.dtype(dtype2), btype
def test_copy(self):
# Check whether the copy=True and copy=False keywords work
A = self.datsp
# check that copy preserves format
assert_equal(A.copy().format, A.format)
assert_equal(A.__class__(A,copy=True).format, A.format)
assert_equal(A.__class__(A,copy=False).format, A.format)
assert_equal(A.copy().todense(), A.todense())
assert_equal(A.__class__(A,copy=True).todense(), A.todense())
assert_equal(A.__class__(A,copy=False).todense(), A.todense())
# check that XXX_matrix.toXXX() works
toself = getattr(A,'to' + A.format)
assert_equal(toself().format, A.format)
assert_equal(toself(copy=True).format, A.format)
assert_equal(toself(copy=False).format, A.format)
assert_equal(toself().todense(), A.todense())
assert_equal(toself(copy=True).todense(), A.todense())
assert_equal(toself(copy=False).todense(), A.todense())
# check whether the data is copied?
# TODO: deal with non-indexable types somehow
B = A.copy()
try:
B[0,0] += 1
assert_(B[0,0] != A[0,0])
except NotImplementedError:
# not all sparse matrices can be indexed
pass
except TypeError:
# not all sparse matrices can be indexed
pass
# test that __iter__ is compatible with NumPy matrix
def test_iterator(self):
B = np.matrix(np.arange(50).reshape(5, 10))
A = self.spmatrix(B)
for x, y in zip(A, B):
assert_equal(x.todense(), y)
def test_size_zero_matrix_arithmetic(self):
# Test basic matrix arithmatic with shapes like (0,0), (10,0),
# (0, 3), etc.
mat = np.matrix([])
a = mat.reshape((0, 0))
b = mat.reshape((0, 1))
c = mat.reshape((0, 5))
d = mat.reshape((1, 0))
e = mat.reshape((5, 0))
f = np.matrix(np.ones([5, 5]))
asp = self.spmatrix(a)
bsp = self.spmatrix(b)
csp = self.spmatrix(c)
dsp = self.spmatrix(d)
esp = self.spmatrix(e)
fsp = self.spmatrix(f)
# matrix product.
assert_array_equal(asp.dot(asp).A, np.dot(a, a).A)
assert_array_equal(bsp.dot(dsp).A, np.dot(b, d).A)
assert_array_equal(dsp.dot(bsp).A, np.dot(d, b).A)
assert_array_equal(csp.dot(esp).A, np.dot(c, e).A)
assert_array_equal(csp.dot(fsp).A, np.dot(c, f).A)
assert_array_equal(esp.dot(csp).A, np.dot(e, c).A)
assert_array_equal(dsp.dot(csp).A, np.dot(d, c).A)
assert_array_equal(fsp.dot(esp).A, np.dot(f, e).A)
# bad matrix products
assert_raises(ValueError, dsp.dot, e)
assert_raises(ValueError, asp.dot, d)
# elemente-wise multiplication
assert_array_equal(asp.multiply(asp).A, np.multiply(a, a).A)
assert_array_equal(bsp.multiply(bsp).A, np.multiply(b, b).A)
assert_array_equal(dsp.multiply(dsp).A, np.multiply(d, d).A)
assert_array_equal(asp.multiply(a).A, np.multiply(a, a).A)
assert_array_equal(bsp.multiply(b).A, np.multiply(b, b).A)
assert_array_equal(dsp.multiply(d).A, np.multiply(d, d).A)
assert_array_equal(asp.multiply(6).A, np.multiply(a, 6).A)
assert_array_equal(bsp.multiply(6).A, np.multiply(b, 6).A)
assert_array_equal(dsp.multiply(6).A, np.multiply(d, 6).A)
# bad element-wise multiplication
assert_raises(ValueError, asp.multiply, c)
assert_raises(ValueError, esp.multiply, c)
# Addition
assert_array_equal(asp.__add__(asp).A, a.__add__(a).A)
assert_array_equal(bsp.__add__(bsp).A, b.__add__(b).A)
assert_array_equal(dsp.__add__(dsp).A, d.__add__(d).A)
# bad addition
assert_raises(ValueError, asp.__add__, dsp)
assert_raises(ValueError, bsp.__add__, asp)
def test_size_zero_conversions(self):
mat = np.matrix([])
a = mat.reshape((0, 0))
b = mat.reshape((0, 5))
c = mat.reshape((5, 0))
for m in [a, b, c]:
spm = self.spmatrix(m)
assert_array_equal(spm.tocoo().A, m)
assert_array_equal(spm.tocsr().A, m)
assert_array_equal(spm.tocsc().A, m)
assert_array_equal(spm.tolil().A, m)
assert_array_equal(spm.todok().A, m)
assert_array_equal(spm.tobsr().A, m)
def test_unary_ufunc_overrides(self):
def check(name):
if not HAS_NUMPY_UFUNC:
if name == "sign":
raise nose.SkipTest("sign conflicts with comparison op "
"support on Numpy without __numpy_ufunc__")
if self.spmatrix in (dok_matrix, lil_matrix):
raise nose.SkipTest("Unary ops not implemented for dok/lil "
"with Numpy without __numpy_ufunc__")
ufunc = getattr(np, name)
X = self.spmatrix(np.arange(20).reshape(4, 5) / 20.)
X0 = ufunc(X.toarray())
X2 = ufunc(X)
assert_array_equal(X2.toarray(), X0)
if HAS_NUMPY_UFUNC:
# the out argument doesn't work on Numpy without __numpy_ufunc__
out = np.zeros_like(X0)
X3 = ufunc(X, out=out)
assert_(X3 is out)
assert_array_equal(todense(X3), ufunc(todense(X)))
out = csc_matrix(out.shape, dtype=out.dtype)
out[:,1] = 999
X4 = ufunc(X, out=out)
assert_(X4 is out)
assert_array_equal(todense(X4), ufunc(todense(X)))
for name in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt",
"abs"]:
yield check, name
def test_binary_ufunc_overrides(self):
# data
a = np.array([[1, 2, 3],
[4, 5, 0],
[7, 8, 9]])
b = np.array([[9, 8, 7],
[6, 0, 0],
[3, 2, 1]])
c = 1.0
d = 1 + 2j
e = 5
asp = self.spmatrix(a)
bsp = self.spmatrix(b)
a_items = dict(dense=a, scalar=c, cplx_scalar=d, int_scalar=e, sparse=asp)
b_items = dict(dense=b, scalar=c, cplx_scalar=d, int_scalar=e, sparse=bsp)
@dec.skipif(not HAS_NUMPY_UFUNC, "feature requires Numpy with __numpy_ufunc__")
def check(i, j, dtype):
ax = a_items[i]
bx = b_items[j]
if issparse(ax):
ax = ax.astype(dtype)
if issparse(bx):
bx = bx.astype(dtype)
a = todense(ax)
b = todense(bx)
def check_one(ufunc, allclose=False):
# without out argument
expected = ufunc(a, b)
got = ufunc(ax, bx)
if allclose:
assert_allclose(todense(got), expected,
rtol=5e-15, atol=0)
else:
assert_array_equal(todense(got), expected)
# with out argument
out = np.zeros(got.shape, dtype=got.dtype)
out.fill(np.nan)
got = ufunc(ax, bx, out=out)
assert_(got is out)
if allclose:
assert_allclose(todense(got), expected,
rtol=5e-15, atol=0)
else:
assert_array_equal(todense(got), expected)
out = csr_matrix(got.shape, dtype=out.dtype)
out[0,:] = 999
got = ufunc(ax, bx, out=out)
assert_(got is out)
if allclose:
assert_allclose(todense(got), expected,
rtol=5e-15, atol=0)
else:
assert_array_equal(todense(got), expected)
# -- associative
# multiply
check_one(np.multiply)
# add
if isscalarlike(ax) or isscalarlike(bx):
try:
check_one(np.add)
except NotImplementedError:
# Not implemented for all spmatrix types
pass
else:
check_one(np.add)
# maximum
check_one(np.maximum)
# minimum
check_one(np.minimum)
# -- non-associative
# dot
check_one(np.dot)
# subtract
if isscalarlike(ax) or isscalarlike(bx):
try:
check_one(np.subtract)
except NotImplementedError:
# Not implemented for all spmatrix types
pass
else:
check_one(np.subtract)
# divide
with np.errstate(divide='ignore', invalid='ignore'):
if isscalarlike(bx):
# Rounding error may be different, as the sparse implementation
# computes a/b -> a * (1/b) if b is a scalar
check_one(np.divide, allclose=True)
else:
check_one(np.divide)
# true_divide
if isscalarlike(bx):
check_one(np.true_divide, allclose=True)
else:
check_one(np.true_divide)
for i in a_items.keys():
for j in b_items.keys():
for dtype in [np.int_, np.float_, np.complex_]:
if i == 'sparse' or j == 'sparse':
yield check, i, j, dtype
@dec.skipif(not HAS_NUMPY_UFUNC, "feature requires Numpy with __numpy_ufunc__")
def test_ufunc_object_array(self):
# This tests compatibility with previous Numpy object array
# ufunc behavior. See gh-3345.
a = self.spmatrix([[1, 2]])
b = self.spmatrix([[3], [4]])
c = self.spmatrix([[5], [6]])
# Should distribute the operation across the object array
d = np.multiply(a, np.array([[b], [c]]))
assert_(d.dtype == np.object_)
assert_(d.shape == (2, 1))
assert_allclose(d[0,0].A, (a*b).A)
assert_allclose(d[1,0].A, (a*c).A)
# Lists also get cast to object arrays
d = np.multiply(a, [[b], [c]])
assert_(d.dtype == np.object_)
assert_(d.shape == (2, 1))
assert_allclose(d[0,0].A, (a*b).A)
assert_allclose(d[1,0].A, (a*c).A)
# This returned NotImplemented in Numpy < 1.9; do it properly now
d = np.multiply(np.array([[b], [c]]), a)
assert_(d.dtype == np.object_)
assert_(d.shape == (2, 1))
assert_allclose(d[0,0].A, (b*a).A)
assert_allclose(d[1,0].A, (c*a).A)
d = np.subtract(np.array(b, dtype=object), c)
assert_(isinstance(d, sparse.spmatrix))
assert_allclose(d.A, (b - c).A)
class _TestInplaceArithmetic:
def test_inplace_dense(self):
a = np.ones((3, 4))
b = self.spmatrix(a)
with warnings.catch_warnings():
if not HAS_NUMPY_UFUNC:
warnings.simplefilter("ignore", DeprecationWarning)
x = a.copy()
y = a.copy()
x += a
y += b
assert_array_equal(x, y)
x = a.copy()
y = a.copy()
x -= a
y -= b
assert_array_equal(x, y)
# This is matrix product, from __rmul__
assert_raises(ValueError, operator.imul, x, b)
x = a.copy()
y = a.copy()
x = x.dot(a.T)
y *= b.T
assert_array_equal(x, y)
# Matrix (non-elementwise) floor division is not defined
assert_raises(TypeError, operator.ifloordiv, x, b)
def test_imul_scalar(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
# Avoid implicit casting.
if np.can_cast(type(2), dtype, casting='same_kind'):
a = datsp.copy()
a *= 2
b = dat.copy()
b *= 2
assert_array_equal(b, a.todense())
if np.can_cast(type(17.3), dtype, casting='same_kind'):
a = datsp.copy()
a *= 17.3
b = dat.copy()
b *= 17.3
assert_array_equal(b, a.todense())
for dtype in self.checked_dtypes:
yield check, dtype
def test_idiv_scalar(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
if np.can_cast(type(2), dtype, casting='same_kind'):
a = datsp.copy()
a /= 2
b = dat.copy()
b /= 2
assert_array_equal(b, a.todense())
if np.can_cast(type(17.3), dtype, casting='same_kind'):
a = datsp.copy()
a /= 17.3
b = dat.copy()
b /= 17.3
assert_array_equal(b, a.todense())
for dtype in self.checked_dtypes:
# /= should only be used with float dtypes to avoid implicit
# casting.
if not np.can_cast(dtype, np.int_):
yield check, dtype
def test_inplace_success(self):
# Inplace ops should work even if a specialized version is not
# implemented, falling back to x = x <op> y
a = self.spmatrix(np.eye(5))
b = self.spmatrix(np.eye(5))
bp = self.spmatrix(np.eye(5))
b += a
bp = bp + a
assert_allclose(b.A, bp.A)
b *= a
bp = bp * a
assert_allclose(b.A, bp.A)
b -= a
bp = bp - a
assert_allclose(b.A, bp.A)
assert_raises(TypeError, operator.ifloordiv, a, b)
class _TestGetSet:
def test_getelement(self):
def check(dtype):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
D = array([[1,0,0],
[4,3,0],
[0,2,0],
[0,0,0]], dtype=dtype)
A = self.spmatrix(D)
M,N = D.shape
for i in range(-M, M):
for j in range(-N, N):
assert_equal(A[i,j], D[i,j])
for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]:
assert_raises((IndexError, TypeError), A.__getitem__, ij)
for dtype in supported_dtypes:
yield check, np.dtype(dtype)
def test_setelement(self):
def check(dtype):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
A = self.spmatrix((3,4), dtype=dtype)
A[0, 0] = dtype.type(0) # bug 870
A[1, 2] = dtype.type(4.0)
A[0, 1] = dtype.type(3)
A[2, 0] = dtype.type(2.0)
A[0,-1] = dtype.type(8)
A[-1,-2] = dtype.type(7)
A[0, 1] = dtype.type(5)
if dtype != np.bool_:
assert_array_equal(A.todense(),[[0,5,0,8],[0,0,4,0],[2,0,7,0]])
for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]:
assert_raises(IndexError, A.__setitem__, ij, 123.0)
for v in [[1,2,3], array([1,2,3])]:
assert_raises(ValueError, A.__setitem__, (0,0), v)
if (not np.issubdtype(dtype, np.complexfloating) and
dtype != np.bool_):
for v in [3j]:
assert_raises(TypeError, A.__setitem__, (0,0), v)
for dtype in supported_dtypes:
yield check, np.dtype(dtype)
def test_negative_index_assignment(self):
# Regression test for github issue 4428.
def check(dtype):
A = self.spmatrix((3, 10), dtype=dtype)
A[0, -4] = 1
assert_equal(A[0, -4], 1)
for dtype in self.checked_dtypes:
yield check, np.dtype(dtype)
def test_scalar_assign_2(self):
n, m = (5, 10)
def _test_set(i, j, nitems):
msg = "%r ; %r ; %r" % (i, j, nitems)
A = self.spmatrix((n, m))
A[i, j] = 1
assert_almost_equal(A.sum(), nitems, err_msg=msg)
assert_almost_equal(A[i, j], 1, err_msg=msg)
# [i,j]
for i, j in [(2, 3), (-1, 8), (-1, -2), (array(-1), -2), (-1, array(-2)),
(array(-1), array(-2))]:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
_test_set(i, j, 1)
def test_index_scalar_assign(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
A = self.spmatrix((5, 5))
B = np.zeros((5, 5))
for C in [A, B]:
C[0,1] = 1
C[3,0] = 4
C[3,0] = 9
assert_array_equal(A.toarray(), B)
class _TestSolve:
def test_solve(self):
# Test whether the lu_solve command segfaults, as reported by Nils
# Wagner for a 64-bit machine, 02 March 2005 (EJS)
n = 20
np.random.seed(0) # make tests repeatable
A = zeros((n,n), dtype=complex)
x = np.random.rand(n)
y = np.random.rand(n-1)+1j*np.random.rand(n-1)
r = np.random.rand(n)
for i in range(len(x)):
A[i,i] = x[i]
for i in range(len(y)):
A[i,i+1] = y[i]
A[i+1,i] = conjugate(y[i])
A = self.spmatrix(A)
x = splu(A).solve(r)
assert_almost_equal(A*x,r)
class _TestSlicing:
def test_dtype_preservation(self):
assert_equal(self.spmatrix((1,10), dtype=np.int16)[0,1:5].dtype, np.int16)
assert_equal(self.spmatrix((1,10), dtype=np.int32)[0,1:5].dtype, np.int32)
assert_equal(self.spmatrix((1,10), dtype=np.float32)[0,1:5].dtype, np.float32)
assert_equal(self.spmatrix((1,10), dtype=np.float64)[0,1:5].dtype, np.float64)
def test_get_horiz_slice(self):
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(B[1,:], A[1,:].todense())
assert_array_equal(B[1,2:5], A[1,2:5].todense())
C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]])
D = self.spmatrix(C)
assert_array_equal(C[1, 1:3], D[1, 1:3].todense())
# Now test slicing when a row contains only zeros
E = matrix([[1, 2, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[1, 1:3], F[1, 1:3].todense())
assert_array_equal(E[2, -2:], F[2, -2:].A)
# The following should raise exceptions:
assert_raises(IndexError, A.__getitem__, (slice(None), 11))
assert_raises(IndexError, A.__getitem__, (6, slice(3, 7)))
def test_get_vert_slice(self):
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(B[2:5,0], A[2:5,0].todense())
assert_array_equal(B[:,1], A[:,1].todense())
C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]])
D = self.spmatrix(C)
assert_array_equal(C[1:3, 1], D[1:3, 1].todense())
assert_array_equal(C[:, 2], D[:, 2].todense())
# Now test slicing when a column contains only zeros
E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[:, 1], F[:, 1].todense())
assert_array_equal(E[-2:, 2], F[-2:, 2].todense())
# The following should raise exceptions:
assert_raises(IndexError, A.__getitem__, (slice(None), 11))
assert_raises(IndexError, A.__getitem__, (6, slice(3, 7)))
def test_get_slices(self):
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(A[2:5,0:3].todense(), B[2:5,0:3])
assert_array_equal(A[1:,:-1].todense(), B[1:,:-1])
assert_array_equal(A[:-1,1:].todense(), B[:-1,1:])
# Now test slicing when a column contains only zeros
E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[1:2, 1:2], F[1:2, 1:2].todense())
assert_array_equal(E[:, 1:], F[:, 1:].todense())
def test_non_unit_stride_2d_indexing(self):
# Regression test -- used to silently ignore the stride.
v0 = np.random.rand(50, 50)
try:
v = self.spmatrix(v0)[0:25:2, 2:30:3]
except ValueError:
# if unsupported
raise nose.SkipTest("feature not implemented")
assert_array_equal(v.todense(),
v0[0:25:2, 2:30:3])
def test_slicing_2(self):
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix(B)
# [i,j]
assert_equal(A[2,3], B[2,3])
assert_equal(A[-1,8], B[-1,8])
assert_equal(A[-1,-2],B[-1,-2])
assert_equal(A[array(-1),-2],B[-1,-2])
assert_equal(A[-1,array(-2)],B[-1,-2])
assert_equal(A[array(-1),array(-2)],B[-1,-2])
# [i,1:2]
assert_equal(A[2,:].todense(), B[2,:])
assert_equal(A[2,5:-2].todense(),B[2,5:-2])
assert_equal(A[array(2),5:-2].todense(),B[2,5:-2])
# [1:2,j]
assert_equal(A[:,2].todense(), B[:,2])
assert_equal(A[3:4,9].todense(), B[3:4,9])
assert_equal(A[1:4,-5].todense(),B[1:4,-5])
assert_equal(A[2:-1,3].todense(),B[2:-1,3])
assert_equal(A[2:-1,array(3)].todense(),B[2:-1,3])
# [1:2,1:2]
assert_equal(A[1:2,1:2].todense(),B[1:2,1:2])
assert_equal(A[4:,3:].todense(), B[4:,3:])
assert_equal(A[:4,:5].todense(), B[:4,:5])
assert_equal(A[2:-1,:5].todense(),B[2:-1,:5])
# [i]
assert_equal(A[1,:].todense(), B[1,:])
assert_equal(A[-2,:].todense(),B[-2,:])
assert_equal(A[array(-2),:].todense(),B[-2,:])
# [1:2]
assert_equal(A[1:4].todense(), B[1:4])
assert_equal(A[1:-2].todense(),B[1:-2])
# Check bug reported by Robert Cimrman:
# http://thread.gmane.org/gmane.comp.python.scientific.devel/7986
s = slice(int8(2),int8(4),None)
assert_equal(A[s,:].todense(), B[2:4,:])
assert_equal(A[:,s].todense(), B[:,2:4])
def test_slicing_3(self):
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix(B)
s_ = np.s_
slices = [s_[:2], s_[1:2], s_[3:], s_[3::2],
s_[8:3:-1], s_[4::-2], s_[:5:-1],
0, 1, s_[:], s_[1:5], -1, -2, -5,
array(-1), np.int8(-3)]
def check_1(a):
x = A[a]
y = B[a]
if y.shape == ():
assert_equal(x, y, repr(a))
else:
if x.size == 0 and y.size == 0:
pass
else:
assert_array_equal(x.todense(), y, repr(a))
for j, a in enumerate(slices):
yield check_1, a
def check_2(a, b):
# Indexing np.matrix with 0-d arrays seems to be broken,
# as they seem not to be treated as scalars.
# https://github.com/numpy/numpy/issues/3110
if isinstance(a, np.ndarray):
ai = int(a)
else:
ai = a
if isinstance(b, np.ndarray):
bi = int(b)
else:
bi = b
x = A[a, b]
y = B[ai, bi]
if y.shape == ():
assert_equal(x, y, repr((a, b)))
else:
if x.size == 0 and y.size == 0:
pass
else:
assert_array_equal(x.todense(), y, repr((a, b)))
for i, a in enumerate(slices):
for j, b in enumerate(slices):
yield check_2, a, b
def test_ellipsis_slicing(self):
b = asmatrix(arange(50).reshape(5,10))
a = self.spmatrix(b)
assert_array_equal(a[...].A, b[...].A)
assert_array_equal(a[...,].A, b[...,].A)
assert_array_equal(a[1, ...].A, b[1, ...].A)
assert_array_equal(a[..., 1].A, b[..., 1].A)
assert_array_equal(a[1:, ...].A, b[1:, ...].A)
assert_array_equal(a[..., 1:].A, b[..., 1:].A)
assert_array_equal(a[1:, 1, ...].A, b[1:, 1, ...].A)
assert_array_equal(a[1, ..., 1:].A, b[1, ..., 1:].A)
# These return ints
assert_equal(a[1, 1, ...], b[1, 1, ...])
assert_equal(a[1, ..., 1], b[1, ..., 1])
@dec.skipif(NumpyVersion(np.__version__) >= '1.9.0.dev')
def test_multiple_ellipsis_slicing(self):
b = asmatrix(arange(50).reshape(5,10))
a = self.spmatrix(b)
assert_array_equal(a[..., ...].A, b[..., ...].A)
assert_array_equal(a[..., ..., ...].A, b[..., ..., ...].A)
assert_array_equal(a[1, ..., ...].A, b[1, ..., ...].A)
assert_array_equal(a[1:, ..., ...].A, b[1:, ..., ...].A)
assert_array_equal(a[..., ..., 1:].A, b[..., ..., 1:].A)
# Bug in NumPy's slicing
assert_array_equal(a[..., ..., 1].A, b[..., ..., 1].A.reshape((5,1)))
class _TestSlicingAssign:
def test_slice_scalar_assign(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
A = self.spmatrix((5, 5))
B = np.zeros((5, 5))
for C in [A, B]:
C[0:1,1] = 1
C[3:0,0] = 4
C[3:4,0] = 9
C[0,4:] = 1
C[3::-1,4:] = 9
assert_array_equal(A.toarray(), B)
def test_slice_assign_2(self):
n, m = (5, 10)
def _test_set(i, j):
msg = "i=%r; j=%r" % (i, j)
A = self.spmatrix((n, m))
A[i, j] = 1
B = np.zeros((n, m))
B[i, j] = 1
assert_array_almost_equal(A.todense(), B, err_msg=msg)
# [i,1:2]
for i, j in [(2, slice(3)), (2, slice(None, 10, 4)), (2, slice(5, -2)),
(array(2), slice(5, -2))]:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
_test_set(i, j)
def test_self_self_assignment(self):
# Tests whether a row of one lil_matrix can be assigned to
# another.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
B = self.spmatrix((4,3))
B[0,0] = 2
B[1,2] = 7
B[2,1] = 3
B[3,0] = 10
A = B / 10
B[0,:] = A[0,:]
assert_array_equal(A[0,:].A, B[0,:].A)
A = B / 10
B[:,:] = A[:1,:1]
assert_equal(A[0,0], B[3,2])
A = B / 10
B[:-1,0] = A[0,:].T
assert_array_equal(A[0,:].A.T, B[:-1,0].A)
def test_slice_assignment(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
B = self.spmatrix((4,3))
B[0,0] = 5
B[1,2] = 3
B[2,1] = 7
expected = array([[10,0,0],
[0,0,6],
[0,14,0],
[0,0,0]])
B[:,:] = B+B
assert_array_equal(B.todense(),expected)
block = [[1,0],[0,4]]
B[:2,:2] = csc_matrix(array(block))
assert_array_equal(B.todense()[:2,:2],block)
def test_set_slice(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
A = self.spmatrix((5,10))
B = matrix(zeros((5,10), float))
s_ = np.s_
slices = [s_[:2], s_[1:2], s_[3:], s_[3::2],
s_[8:3:-1], s_[4::-2], s_[:5:-1],
0, 1, s_[:], s_[1:5], -1, -2, -5,
array(-1), np.int8(-3)]
for j, a in enumerate(slices):
A[a] = j
B[a] = j
assert_array_equal(A.todense(), B, repr(a))
for i, a in enumerate(slices):
for j, b in enumerate(slices):
A[a,b] = 10*i + 1000*(j+1)
B[a,b] = 10*i + 1000*(j+1)
assert_array_equal(A.todense(), B, repr((a, b)))
A[0, 1:10:2] = xrange(1,10,2)
B[0, 1:10:2] = xrange(1,10,2)
assert_array_equal(A.todense(), B)
A[1:5:2,0] = np.array(range(1,5,2))[:,None]
B[1:5:2,0] = np.array(range(1,5,2))[:,None]
assert_array_equal(A.todense(), B)
# The next commands should raise exceptions
assert_raises(ValueError, A.__setitem__, (0, 0), list(range(100)))
assert_raises(ValueError, A.__setitem__, (0, 0), arange(100))
assert_raises(ValueError, A.__setitem__, (0, slice(None)),
list(range(100)))
assert_raises(ValueError, A.__setitem__, (slice(None), 1),
list(range(100)))
assert_raises(ValueError, A.__setitem__, (slice(None), 1), A.copy())
assert_raises(ValueError, A.__setitem__,
([[1, 2, 3], [0, 3, 4]], [1, 2, 3]), [1, 2, 3, 4])
assert_raises(ValueError, A.__setitem__,
([[1, 2, 3], [0, 3, 4], [4, 1, 3]],
[[1, 2, 4], [0, 1, 3]]), [2, 3, 4])
class _TestFancyIndexing:
"""Tests fancy indexing features. The tests for any matrix formats
that implement these features should derive from this class.
"""
def test_bad_index(self):
A = self.spmatrix(np.zeros([5, 5]))
assert_raises((IndexError, ValueError, TypeError), A.__getitem__, "foo")
assert_raises((IndexError, ValueError, TypeError), A.__getitem__, (2, "foo"))
assert_raises((IndexError, ValueError), A.__getitem__,
([1, 2, 3], [1, 2, 3, 4]))
def test_fancy_indexing(self):
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix(B)
# [i]
assert_equal(A[[1,3]].todense(), B[[1,3]])
# [i,[1,2]]
assert_equal(A[3,[1,3]].todense(), B[3,[1,3]])
assert_equal(A[-1,[2,-5]].todense(),B[-1,[2,-5]])
assert_equal(A[array(-1),[2,-5]].todense(),B[-1,[2,-5]])
assert_equal(A[-1,array([2,-5])].todense(),B[-1,[2,-5]])
assert_equal(A[array(-1),array([2,-5])].todense(),B[-1,[2,-5]])
# [1:2,[1,2]]
assert_equal(A[:,[2,8,3,-1]].todense(),B[:,[2,8,3,-1]])
assert_equal(A[3:4,[9]].todense(), B[3:4,[9]])
assert_equal(A[1:4,[-1,-5]].todense(), B[1:4,[-1,-5]])
assert_equal(A[1:4,array([-1,-5])].todense(), B[1:4,[-1,-5]])
# [[1,2],j]
assert_equal(A[[1,3],3].todense(), B[[1,3],3])
assert_equal(A[[2,-5],-4].todense(), B[[2,-5],-4])
assert_equal(A[array([2,-5]),-4].todense(), B[[2,-5],-4])
assert_equal(A[[2,-5],array(-4)].todense(), B[[2,-5],-4])
assert_equal(A[array([2,-5]),array(-4)].todense(), B[[2,-5],-4])
# [[1,2],1:2]
assert_equal(A[[1,3],:].todense(), B[[1,3],:])
assert_equal(A[[2,-5],8:-1].todense(),B[[2,-5],8:-1])
assert_equal(A[array([2,-5]),8:-1].todense(),B[[2,-5],8:-1])
# [[1,2],[1,2]]
assert_equal(todense(A[[1,3],[2,4]]), B[[1,3],[2,4]])
assert_equal(todense(A[[-1,-3],[2,-4]]), B[[-1,-3],[2,-4]])
assert_equal(todense(A[array([-1,-3]),[2,-4]]), B[[-1,-3],[2,-4]])
assert_equal(todense(A[[-1,-3],array([2,-4])]), B[[-1,-3],[2,-4]])
assert_equal(todense(A[array([-1,-3]),array([2,-4])]), B[[-1,-3],[2,-4]])
# [[[1],[2]],[1,2]]
assert_equal(A[[[1],[3]],[2,4]].todense(), B[[[1],[3]],[2,4]])
assert_equal(A[[[-1],[-3],[-2]],[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[array([[-1],[-3],[-2]]),[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[[[-1],[-3],[-2]],array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[array([[-1],[-3],[-2]]),array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]])
# [[1,2]]
assert_equal(A[[1,3]].todense(), B[[1,3]])
assert_equal(A[[-1,-3]].todense(),B[[-1,-3]])
assert_equal(A[array([-1,-3])].todense(),B[[-1,-3]])
# [[1,2],:][:,[1,2]]
assert_equal(A[[1,3],:][:,[2,4]].todense(), B[[1,3],:][:,[2,4]])
assert_equal(A[[-1,-3],:][:,[2,-4]].todense(), B[[-1,-3],:][:,[2,-4]])
assert_equal(A[array([-1,-3]),:][:,array([2,-4])].todense(), B[[-1,-3],:][:,[2,-4]])
# [:,[1,2]][[1,2],:]
assert_equal(A[:,[1,3]][[2,4],:].todense(), B[:,[1,3]][[2,4],:])
assert_equal(A[:,[-1,-3]][[2,-4],:].todense(), B[:,[-1,-3]][[2,-4],:])
assert_equal(A[:,array([-1,-3])][array([2,-4]),:].todense(), B[:,[-1,-3]][[2,-4],:])
# Check bug reported by Robert Cimrman:
# http://thread.gmane.org/gmane.comp.python.scientific.devel/7986
s = slice(int8(2),int8(4),None)
assert_equal(A[s,:].todense(), B[2:4,:])
assert_equal(A[:,s].todense(), B[:,2:4])
def test_fancy_indexing_randomized(self):
np.random.seed(1234) # make runs repeatable
NUM_SAMPLES = 50
M = 6
N = 4
D = np.asmatrix(np.random.rand(M,N))
D = np.multiply(D, D > 0.5)
I = np.random.random_integers(-M + 1, M - 1, size=NUM_SAMPLES)
J = np.random.random_integers(-N + 1, N - 1, size=NUM_SAMPLES)
S = self.spmatrix(D)
SIJ = S[I,J]
if isspmatrix(SIJ):
SIJ = SIJ.todense()
assert_equal(SIJ, D[I,J])
I_bad = I + M
J_bad = J - N
assert_raises(IndexError, S.__getitem__, (I_bad,J))
assert_raises(IndexError, S.__getitem__, (I,J_bad))
def test_fancy_indexing_boolean(self):
np.random.seed(1234) # make runs repeatable
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix(B)
I = np.array(np.random.randint(0, 2, size=5), dtype=bool)
J = np.array(np.random.randint(0, 2, size=10), dtype=bool)
X = np.array(np.random.randint(0, 2, size=(5, 10)), dtype=bool)
assert_equal(todense(A[I]), B[I])
assert_equal(todense(A[:,J]), B[:, J])
assert_equal(todense(A[X]), B[X])
assert_equal(todense(A[B > 9]), B[B > 9])
I = np.array([True, False, True, True, False])
J = np.array([False, True, True, False, True])
assert_equal(todense(A[I, J]), B[I, J])
Z1 = np.zeros((6, 11), dtype=bool)
Z2 = np.zeros((6, 11), dtype=bool)
Z2[0,-1] = True
Z3 = np.zeros((6, 11), dtype=bool)
Z3[-1,0] = True
assert_equal(A[Z1], np.array([]))
assert_raises(IndexError, A.__getitem__, Z2)
assert_raises(IndexError, A.__getitem__, Z3)
assert_raises((IndexError, ValueError), A.__getitem__, (X, 1))
def test_fancy_indexing_sparse_boolean(self):
np.random.seed(1234) # make runs repeatable
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix(B)
X = np.array(np.random.randint(0, 2, size=(5, 10)), dtype=bool)
Xsp = csr_matrix(X)
assert_equal(todense(A[Xsp]), B[X])
assert_equal(todense(A[A > 9]), B[B > 9])
Z = np.array(np.random.randint(0, 2, size=(5, 11)), dtype=bool)
Y = np.array(np.random.randint(0, 2, size=(6, 10)), dtype=bool)
Zsp = csr_matrix(Z)
Ysp = csr_matrix(Y)
assert_raises(IndexError, A.__getitem__, Zsp)
assert_raises(IndexError, A.__getitem__, Ysp)
assert_raises((IndexError, ValueError), A.__getitem__, (Xsp, 1))
def test_fancy_indexing_regression_3087(self):
mat = self.spmatrix(array([[1, 0, 0], [0,1,0], [1,0,0]]))
desired_cols = np.ravel(mat.sum(0)) > 0
assert_equal(mat[:, desired_cols].A, [[1, 0], [0, 1], [1, 0]])
def test_fancy_indexing_seq_assign(self):
mat = self.spmatrix(array([[1, 0], [0, 1]]))
assert_raises(ValueError, mat.__setitem__, (0, 0), np.array([1,2]))
def test_fancy_indexing_empty(self):
B = asmatrix(arange(50).reshape(5,10))
B[1,:] = 0
B[:,2] = 0
B[3,6] = 0
A = self.spmatrix(B)
K = np.array([False, False, False, False, False])
assert_equal(todense(A[K]), B[K])
K = np.array([], dtype=int)
assert_equal(todense(A[K]), B[K])
assert_equal(todense(A[K,K]), B[K,K])
J = np.array([0, 1, 2, 3, 4], dtype=int)[:,None]
assert_equal(todense(A[K,J]), B[K,J])
assert_equal(todense(A[J,K]), B[J,K])
@contextlib.contextmanager
def check_remains_sorted(X):
"""Checks that sorted indices property is retained through an operation
"""
if not hasattr(X, 'has_sorted_indices') or not X.has_sorted_indices:
yield
return
yield
indices = X.indices.copy()
X.has_sorted_indices = False
X.sort_indices()
assert_array_equal(indices, X.indices,
'Expected sorted indices, found unsorted')
class _TestFancyIndexingAssign:
def test_bad_index_assign(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
A = self.spmatrix(np.zeros([5, 5]))
assert_raises((IndexError, ValueError, TypeError), A.__setitem__, "foo", 2)
assert_raises((IndexError, ValueError, TypeError), A.__setitem__, (2, "foo"), 5)
def test_fancy_indexing_set(self):
n, m = (5, 10)
def _test_set_slice(i, j):
A = self.spmatrix((n, m))
with check_remains_sorted(A):
A[i, j] = 1
B = asmatrix(np.zeros((n, m)))
B[i, j] = 1
assert_array_almost_equal(A.todense(), B)
# [1:2,1:2]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
for i, j in [((2, 3, 4), slice(None, 10, 4)),
(np.arange(3), slice(5, -2)),
(slice(2, 5), slice(5, -2))]:
_test_set_slice(i, j)
for i, j in [(np.arange(3), np.arange(3)), ((0, 3, 4), (1, 2, 4))]:
_test_set_slice(i, j)
def test_fancy_assignment_dtypes(self):
def check(dtype):
A = self.spmatrix((5, 5), dtype=dtype)
A[[0,1],[0,1]] = dtype.type(1)
assert_equal(A.sum(), dtype.type(1)*2)
A[0:2,0:2] = dtype.type(1.0)
assert_equal(A.sum(), dtype.type(1)*4)
A[2,2] = dtype.type(1.0)
assert_equal(A.sum(), dtype.type(1)*4 + dtype.type(1))
for dtype in supported_dtypes:
yield check, np.dtype(dtype)
def test_sequence_assignment(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
A = self.spmatrix((4,3))
B = self.spmatrix(eye(3,4))
i0 = [0,1,2]
i1 = (0,1,2)
i2 = array(i0)
with check_remains_sorted(A):
A[0,i0] = B[i0,0].T
A[1,i1] = B[i1,1].T
A[2,i2] = B[i2,2].T
assert_array_equal(A.todense(),B.T.todense())
# column slice
A = self.spmatrix((2,3))
with check_remains_sorted(A):
A[1,1:3] = [10,20]
assert_array_equal(A.todense(), [[0,0,0],[0,10,20]])
# row slice
A = self.spmatrix((3,2))
with check_remains_sorted(A):
A[1:3,1] = [[10],[20]]
assert_array_equal(A.todense(), [[0,0],[0,10],[0,20]])
# both slices
A = self.spmatrix((3,3))
B = asmatrix(np.zeros((3,3)))
with check_remains_sorted(A):
for C in [A, B]:
C[[0,1,2], [0,1,2]] = [4,5,6]
assert_array_equal(A.toarray(), B)
# both slices (2)
A = self.spmatrix((4, 3))
with check_remains_sorted(A):
A[(1, 2, 3), (0, 1, 2)] = [1, 2, 3]
assert_almost_equal(A.sum(), 6)
B = asmatrix(np.zeros((4, 3)))
B[(1, 2, 3), (0, 1, 2)] = [1, 2, 3]
assert_array_equal(A.todense(), B)
def test_fancy_assign_empty(self):
B = asmatrix(arange(50).reshape(5,10))
B[1,:] = 0
B[:,2] = 0
B[3,6] = 0
A = self.spmatrix(B)
K = np.array([False, False, False, False, False])
A[K] = 42
assert_equal(todense(A), B)
K = np.array([], dtype=int)
A[K] = 42
assert_equal(todense(A), B)
A[K,K] = 42
assert_equal(todense(A), B)
J = np.array([0, 1, 2, 3, 4], dtype=int)[:,None]
A[K,J] = 42
assert_equal(todense(A), B)
A[J,K] = 42
assert_equal(todense(A), B)
class _TestFancyMultidim:
def test_fancy_indexing_ndarray(self):
sets = [
(np.array([[1], [2], [3]]), np.array([3, 4, 2])),
(np.array([[1], [2], [3]]), np.array([[3, 4, 2]])),
(np.array([[1, 2, 3]]), np.array([[3], [4], [2]])),
(np.array([1, 2, 3]), np.array([[3], [4], [2]])),
(np.array([[1, 2, 3], [3, 4, 2]]),
np.array([[5, 6, 3], [2, 3, 1]]))
]
# These inputs generate 3-D outputs
# (np.array([[[1], [2], [3]], [[3], [4], [2]]]),
# np.array([[[5], [6], [3]], [[2], [3], [1]]])),
for I, J in sets:
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
SIJ = S[I,J]
if isspmatrix(SIJ):
SIJ = SIJ.todense()
assert_equal(SIJ, D[I,J])
I_bad = I + 5
J_bad = J + 7
assert_raises(IndexError, S.__getitem__, (I_bad,J))
assert_raises(IndexError, S.__getitem__, (I,J_bad))
# This would generate 3-D arrays -- not supported
assert_raises(IndexError, S.__getitem__, ([I, I], slice(None)))
assert_raises(IndexError, S.__getitem__, (slice(None), [J, J]))
class _TestFancyMultidimAssign:
def test_fancy_assign_ndarray(self):
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
X = np.random.rand(2, 3)
I = np.array([[1, 2, 3], [3, 4, 2]])
J = np.array([[5, 6, 3], [2, 3, 1]])
with check_remains_sorted(S):
S[I,J] = X
D[I,J] = X
assert_equal(S.todense(), D)
I_bad = I + 5
J_bad = J + 7
C = [1, 2, 3]
with check_remains_sorted(S):
S[I,J] = C
D[I,J] = C
assert_equal(S.todense(), D)
with check_remains_sorted(S):
S[I,J] = 3
D[I,J] = 3
assert_equal(S.todense(), D)
assert_raises(IndexError, S.__setitem__, (I_bad,J), C)
assert_raises(IndexError, S.__setitem__, (I,J_bad), C)
def test_fancy_indexing_multidim_set(self):
n, m = (5, 10)
def _test_set_slice(i, j):
A = self.spmatrix((n, m))
with check_remains_sorted(A):
A[i, j] = 1
B = asmatrix(np.zeros((n, m)))
B[i, j] = 1
assert_array_almost_equal(A.todense(), B)
# [[[1, 2], [1, 2]], [1, 2]]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
for i, j in [(np.array([[1, 2], [1, 3]]), [1, 3]),
(np.array([0, 4]), [[0, 3], [1, 2]]),
([[1, 2, 3], [0, 2, 4]], [[0, 4, 3], [4, 1, 2]])]:
_test_set_slice(i, j)
def test_fancy_assign_list(self):
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
X = np.random.rand(2, 3)
I = [[1, 2, 3], [3, 4, 2]]
J = [[5, 6, 3], [2, 3, 1]]
S[I,J] = X
D[I,J] = X
assert_equal(S.todense(), D)
I_bad = [[ii + 5 for ii in i] for i in I]
J_bad = [[jj + 7 for jj in j] for j in J]
C = [1, 2, 3]
S[I,J] = C
D[I,J] = C
assert_equal(S.todense(), D)
S[I,J] = 3
D[I,J] = 3
assert_equal(S.todense(), D)
assert_raises(IndexError, S.__setitem__, (I_bad,J), C)
assert_raises(IndexError, S.__setitem__, (I,J_bad), C)
def test_fancy_assign_slice(self):
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
I = [[1, 2, 3], [3, 4, 2]]
J = [[5, 6, 3], [2, 3, 1]]
I_bad = [[ii + 5 for ii in i] for i in I]
J_bad = [[jj + 7 for jj in j] for j in J]
C = [1, 2, 3, 4, 5, 6, 7]
assert_raises(IndexError, S.__setitem__, (I_bad, slice(None)), C)
assert_raises(IndexError, S.__setitem__, (slice(None), J_bad), C)
class _TestArithmetic:
"""
Test real/complex arithmetic
"""
def __arith_init(self):
# these can be represented exactly in FP (so arithmetic should be exact)
self.__A = matrix([[-1.5, 6.5, 0, 2.25, 0, 0],
[3.125, -7.875, 0.625, 0, 0, 0],
[0, 0, -0.125, 1.0, 0, 0],
[0, 0, 8.375, 0, 0, 0]],'float64')
self.__B = matrix([[0.375, 0, 0, 0, -5, 2.5],
[14.25, -3.75, 0, 0, -0.125, 0],
[0, 7.25, 0, 0, 0, 0],
[18.5, -0.0625, 0, 0, 0, 0]],'complex128')
self.__B.imag = matrix([[1.25, 0, 0, 0, 6, -3.875],
[2.25, 4.125, 0, 0, 0, 2.75],
[0, 4.125, 0, 0, 0, 0],
[-0.0625, 0, 0, 0, 0, 0]],'float64')
# fractions are all x/16ths
assert_array_equal((self.__A*16).astype('int32'),16*self.__A)
assert_array_equal((self.__B.real*16).astype('int32'),16*self.__B.real)
assert_array_equal((self.__B.imag*16).astype('int32'),16*self.__B.imag)
self.__Asp = self.spmatrix(self.__A)
self.__Bsp = self.spmatrix(self.__B)
def test_add_sub(self):
self.__arith_init()
# basic tests
assert_array_equal((self.__Asp+self.__Bsp).todense(),self.__A+self.__B)
# check conversions
for x in supported_dtypes:
A = self.__A.astype(x)
Asp = self.spmatrix(A)
for y in supported_dtypes:
if not np.issubdtype(y, np.complexfloating):
B = self.__B.real.astype(y)
else:
B = self.__B.astype(y)
Bsp = self.spmatrix(B)
# addition
D1 = A + B
S1 = Asp + Bsp
assert_equal(S1.dtype,D1.dtype)
assert_array_equal(S1.todense(),D1)
assert_array_equal(Asp + B,D1) # check sparse + dense
assert_array_equal(A + Bsp,D1) # check dense + sparse
# subtraction
if (np.dtype('bool') in [x, y]) and (
NumpyVersion(np.__version__) >= '1.9.0.dev'):
# boolean array subtraction deprecated in 1.9.0
continue
D1 = A - B
S1 = Asp - Bsp
assert_equal(S1.dtype,D1.dtype)
assert_array_equal(S1.todense(),D1)
assert_array_equal(Asp - B,D1) # check sparse - dense
assert_array_equal(A - Bsp,D1) # check dense - sparse
def test_mu(self):
self.__arith_init()
# basic tests
assert_array_equal((self.__Asp*self.__Bsp.T).todense(),self.__A*self.__B.T)
for x in supported_dtypes:
A = self.__A.astype(x)
Asp = self.spmatrix(A)
for y in supported_dtypes:
if np.issubdtype(y, np.complexfloating):
B = self.__B.astype(y)
else:
B = self.__B.real.astype(y)
Bsp = self.spmatrix(B)
D1 = A * B.T
S1 = Asp * Bsp.T
assert_allclose(S1.todense(), D1,
atol=1e-14*abs(D1).max())
assert_equal(S1.dtype,D1.dtype)
class _TestMinMax(object):
def test_minmax(self):
for dtype in [np.float32, np.float64, np.int32, np.int64, np.complex128]:
D = np.arange(20, dtype=dtype).reshape(5,4)
X = self.spmatrix(D)
assert_equal(X.min(), 0)
assert_equal(X.max(), 19)
assert_equal(X.min().dtype, dtype)
assert_equal(X.max().dtype, dtype)
D *= -1
X = self.spmatrix(D)
assert_equal(X.min(), -19)
assert_equal(X.max(), 0)
D += 5
X = self.spmatrix(D)
assert_equal(X.min(), -14)
assert_equal(X.max(), 5)
# try a fully dense matrix
X = self.spmatrix(np.arange(1, 10).reshape(3, 3))
assert_equal(X.min(), 1)
assert_equal(X.min().dtype, X.dtype)
X = -X
assert_equal(X.max(), -1)
# and a fully sparse matrix
Z = self.spmatrix(np.zeros(1))
assert_equal(Z.min(), 0)
assert_equal(Z.max(), 0)
assert_equal(Z.max().dtype, Z.dtype)
# another test
D = np.arange(20, dtype=float).reshape(5,4)
D[0:2, :] = 0
X = self.spmatrix(D)
assert_equal(X.min(), 0)
assert_equal(X.max(), 19)
# zero-size matrices
for D in [np.zeros((0, 0)), np.zeros((0, 10)), np.zeros((10, 0))]:
X = self.spmatrix(D)
assert_raises(ValueError, X.min)
assert_raises(ValueError, X.max)
def test_minmax_axis(self):
D = np.matrix(np.arange(50).reshape(5,10))
# completely empty rows, leaving some completely full:
D[1, :] = 0
# empty at end for reduceat:
D[:, 9] = 0
# partial rows/cols:
D[3, 3] = 0
# entries on either side of 0:
D[2, 2] = -1
X = self.spmatrix(D)
if NumpyVersion(np.__version__) >= '1.7.0':
# np.matrix.sum with negative axis arg doesn't work for < 1.7
axes = [-2, -1, 0, 1]
else:
axes = [0, 1]
for axis in axes:
assert_array_equal(X.max(axis=axis).A, D.max(axis=axis).A)
assert_array_equal(X.min(axis=axis).A, D.min(axis=axis).A)
# full matrix
D = np.matrix(np.arange(1, 51).reshape(10, 5))
X = self.spmatrix(D)
for axis in axes:
assert_array_equal(X.max(axis=axis).A, D.max(axis=axis).A)
assert_array_equal(X.min(axis=axis).A, D.min(axis=axis).A)
# empty matrix
D = np.matrix(np.zeros((10, 5)))
X = self.spmatrix(D)
for axis in axes:
assert_array_equal(X.max(axis=axis).A, D.max(axis=axis).A)
assert_array_equal(X.min(axis=axis).A, D.min(axis=axis).A)
if NumpyVersion(np.__version__) >= '1.7.0':
axes_even = [0, -2]
axes_odd = [1, -1]
else:
axes_even = [0]
axes_odd = [1]
# zero-size matrices
D = np.zeros((0, 10))
X = self.spmatrix(D)
for axis in axes_even:
assert_raises(ValueError, X.min, axis=axis)
assert_raises(ValueError, X.max, axis=axis)
for axis in axes_odd:
assert_array_equal(np.zeros((0, 1)), X.min(axis=axis).A)
assert_array_equal(np.zeros((0, 1)), X.max(axis=axis).A)
D = np.zeros((10, 0))
X = self.spmatrix(D)
for axis in axes_odd:
assert_raises(ValueError, X.min, axis=axis)
assert_raises(ValueError, X.max, axis=axis)
for axis in axes_even:
assert_array_equal(np.zeros((1, 0)), X.min(axis=axis).A)
assert_array_equal(np.zeros((1, 0)), X.max(axis=axis).A)
class _TestGetNnzAxis(object):
def test_getnnz_axis(self):
dat = np.matrix([[0, 2],
[3, 5],
[-6, 9]])
bool_dat = dat.astype(bool).A
datsp = self.spmatrix(dat)
assert_array_equal(bool_dat.sum(axis=None), datsp.getnnz(axis=None))
assert_array_equal(bool_dat.sum(), datsp.getnnz())
assert_array_equal(bool_dat.sum(axis=0), datsp.getnnz(axis=0))
assert_array_equal(bool_dat.sum(axis=1), datsp.getnnz(axis=1))
if NumpyVersion(np.__version__) >= '1.7.0':
# np.matrix.sum with negative axis arg doesn't work for < 1.7
assert_array_equal(bool_dat.sum(axis=-2), datsp.getnnz(axis=-2))
assert_array_equal(bool_dat.sum(axis=-1), datsp.getnnz(axis=-1))
assert_raises(ValueError, datsp.getnnz, axis=2)
#------------------------------------------------------------------------------
# Tailored base class for generic tests
#------------------------------------------------------------------------------
def _possibly_unimplemented(cls, require=True):
"""
Construct a class that either runs tests as usual (require=True),
or each method raises SkipTest if it encounters a common error.
"""
if require:
return cls
else:
def wrap(fc):
def wrapper(*a, **kw):
try:
return fc(*a, **kw)
except (NotImplementedError, TypeError, ValueError,
IndexError, AttributeError):
raise nose.SkipTest("feature not implemented")
wrapper.__name__ = fc.__name__
return wrapper
new_dict = dict(cls.__dict__)
for name, func in cls.__dict__.items():
if name.startswith('test_'):
new_dict[name] = wrap(func)
return type(cls.__name__ + "NotImplemented",
cls.__bases__,
new_dict)
def sparse_test_class(getset=True, slicing=True, slicing_assign=True,
fancy_indexing=True, fancy_assign=True,
fancy_multidim_indexing=True, fancy_multidim_assign=True,
minmax=True, nnz_axis=True):
"""
Construct a base class, optionally converting some of the tests in
the suite to check that the feature is not implemented.
"""
bases = (_TestCommon,
_possibly_unimplemented(_TestGetSet, getset),
_TestSolve,
_TestInplaceArithmetic,
_TestArithmetic,
_possibly_unimplemented(_TestSlicing, slicing),
_possibly_unimplemented(_TestSlicingAssign, slicing_assign),
_possibly_unimplemented(_TestFancyIndexing, fancy_indexing),
_possibly_unimplemented(_TestFancyIndexingAssign,
fancy_assign),
_possibly_unimplemented(_TestFancyMultidim,
fancy_indexing and fancy_multidim_indexing),
_possibly_unimplemented(_TestFancyMultidimAssign,
fancy_multidim_assign and fancy_assign),
_possibly_unimplemented(_TestMinMax, minmax),
_possibly_unimplemented(_TestGetNnzAxis, nnz_axis))
# check that test names do not clash
names = {}
for cls in bases:
for name in cls.__dict__:
if not name.startswith('test_'):
continue
old_cls = names.get(name)
if old_cls is not None:
raise ValueError("Test class %s overloads test %s defined in %s" % (
cls.__name__, name, old_cls.__name__))
names[name] = cls
return type("TestBase", bases, {})
#------------------------------------------------------------------------------
# Matrix class based tests
#------------------------------------------------------------------------------
class TestCSR(sparse_test_class()):
spmatrix = csr_matrix
checked_dtypes = [np.bool_, np.int_, np.float_, np.complex_]
def test_constructor1(self):
b = matrix([[0,4,0],
[3,0,0],
[0,2,0]],'d')
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[4,3,2])
assert_array_equal(bsp.indices,[1,0,1])
assert_array_equal(bsp.indptr,[0,1,2,3])
assert_equal(bsp.getnnz(),3)
assert_equal(bsp.getformat(),'csr')
assert_array_equal(bsp.todense(),b)
def test_constructor2(self):
b = zeros((6,6),'d')
b[3,4] = 5
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[5])
assert_array_equal(bsp.indices,[4])
assert_array_equal(bsp.indptr,[0,0,0,0,1,1,1])
assert_array_almost_equal(bsp.todense(),b)
def test_constructor3(self):
b = matrix([[1,0],
[0,2],
[3,0]],'d')
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[1,2,3])
assert_array_equal(bsp.indices,[0,1,0])
assert_array_equal(bsp.indptr,[0,1,2,3])
assert_array_almost_equal(bsp.todense(),b)
### currently disabled
## def test_constructor4(self):
## """try using int64 indices"""
## data = arange( 6 ) + 1
## col = array( [1, 2, 1, 0, 0, 2], dtype='int64' )
## ptr = array( [0, 2, 4, 6], dtype='int64' )
##
## a = csr_matrix( (data, col, ptr), shape = (3,3) )
##
## b = matrix([[0,1,2],
## [4,3,0],
## [5,0,6]],'d')
##
## assert_equal(a.indptr.dtype,numpy.dtype('int64'))
## assert_equal(a.indices.dtype,numpy.dtype('int64'))
## assert_array_equal(a.todense(),b)
def test_constructor4(self):
# using (data, ij) format
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
ij = vstack((row,col))
csr = csr_matrix((data,ij),(4,3))
assert_array_equal(arange(12).reshape(4,3),csr.todense())
def test_constructor5(self):
# infer dimensions from arrays
indptr = array([0,1,3,3])
indices = array([0,5,1,2])
data = array([1,2,3,4])
csr = csr_matrix((data, indices, indptr))
assert_array_equal(csr.shape,(3,6))
def test_sort_indices(self):
data = arange(5)
indices = array([7, 2, 1, 5, 4])
indptr = array([0, 3, 5])
asp = csr_matrix((data, indices, indptr), shape=(2,10))
bsp = asp.copy()
asp.sort_indices()
assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
assert_array_equal(asp.todense(),bsp.todense())
def test_eliminate_zeros(self):
data = array([1, 0, 0, 0, 2, 0, 3, 0])
indices = array([1, 2, 3, 4, 5, 6, 7, 8])
indptr = array([0, 3, 8])
asp = csr_matrix((data, indices, indptr), shape=(2,10))
bsp = asp.copy()
asp.eliminate_zeros()
assert_array_equal(asp.nnz, 3)
assert_array_equal(asp.data,[1, 2, 3])
assert_array_equal(asp.todense(),bsp.todense())
def test_ufuncs(self):
X = csr_matrix(np.arange(20).reshape(4, 5) / 20.)
for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt"]:
assert_equal(hasattr(csr_matrix, f), True)
X2 = getattr(X, f)()
assert_equal(X.shape, X2.shape)
assert_array_equal(X.indices, X2.indices)
assert_array_equal(X.indptr, X2.indptr)
assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray()))
def test_unsorted_arithmetic(self):
data = arange(5)
indices = array([7, 2, 1, 5, 4])
indptr = array([0, 3, 5])
asp = csr_matrix((data, indices, indptr), shape=(2,10))
data = arange(6)
indices = array([8, 1, 5, 7, 2, 4])
indptr = array([0, 2, 6])
bsp = csr_matrix((data, indices, indptr), shape=(2,10))
assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense())
def test_fancy_indexing_broadcast(self):
# broadcasting indexing mode is supported
I = np.array([[1], [2], [3]])
J = np.array([3, 4, 2])
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
SIJ = S[I,J]
if isspmatrix(SIJ):
SIJ = SIJ.todense()
assert_equal(SIJ, D[I,J])
def test_has_sorted_indices(self):
"Ensure has_sorted_indices memoizes sorted state for sort_indices"
sorted_inds = np.array([0, 1])
unsorted_inds = np.array([1, 0])
data = np.array([1, 1])
indptr = np.array([0, 2])
M = csr_matrix((data, sorted_inds, indptr)).copy()
assert_equal(True, M.has_sorted_indices)
M = csr_matrix((data, unsorted_inds, indptr)).copy()
assert_equal(False, M.has_sorted_indices)
# set by sorting
M.sort_indices()
assert_equal(True, M.has_sorted_indices)
assert_array_equal(M.indices, sorted_inds)
M = csr_matrix((data, unsorted_inds, indptr)).copy()
# set manually (although underlyingly unsorted)
M.has_sorted_indices = True
assert_equal(True, M.has_sorted_indices)
assert_array_equal(M.indices, unsorted_inds)
# ensure sort bypassed when has_sorted_indices == True
M.sort_indices()
assert_array_equal(M.indices, unsorted_inds)
def test_has_canonical_format(self):
"Ensure has_canonical_format memoizes state for sum_duplicates"
M = csr_matrix((np.array([2]), np.array([0]), np.array([0, 1])))
assert_equal(True, M.has_canonical_format)
indices = np.array([0, 0]) # contains duplicate
data = np.array([1, 1])
indptr = np.array([0, 2])
M = csr_matrix((data, indices, indptr)).copy()
assert_equal(False, M.has_canonical_format)
# set by deduplicating
M.sum_duplicates()
assert_equal(True, M.has_canonical_format)
assert_equal(1, len(M.indices))
M = csr_matrix((data, indices, indptr)).copy()
# set manually (although underlyingly duplicated)
M.has_canonical_format = True
assert_equal(True, M.has_canonical_format)
assert_equal(2, len(M.indices)) # unaffected content
# ensure deduplication bypassed when has_canonical_format == True
M.sum_duplicates()
assert_equal(2, len(M.indices)) # unaffected content
class TestCSC(sparse_test_class()):
spmatrix = csc_matrix
checked_dtypes = [np.bool_, np.int_, np.float_, np.complex_]
def test_constructor1(self):
b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d')
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[1,2,1,3])
assert_array_equal(bsp.indices,[0,2,1,2])
assert_array_equal(bsp.indptr,[0,1,2,3,4])
assert_equal(bsp.getnnz(),4)
assert_equal(bsp.shape,b.shape)
assert_equal(bsp.getformat(),'csc')
def test_constructor2(self):
b = zeros((6,6),'d')
b[2,4] = 5
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[5])
assert_array_equal(bsp.indices,[2])
assert_array_equal(bsp.indptr,[0,0,0,0,0,1,1])
def test_constructor3(self):
b = matrix([[1,0],[0,0],[0,2]],'d')
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[1,2])
assert_array_equal(bsp.indices,[0,2])
assert_array_equal(bsp.indptr,[0,1,2])
def test_constructor4(self):
# using (data, ij) format
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
ij = vstack((row,col))
csc = csc_matrix((data,ij),(4,3))
assert_array_equal(arange(12).reshape(4,3),csc.todense())
def test_constructor5(self):
# infer dimensions from arrays
indptr = array([0,1,3,3])
indices = array([0,5,1,2])
data = array([1,2,3,4])
csc = csc_matrix((data, indices, indptr))
assert_array_equal(csc.shape,(6,3))
def test_eliminate_zeros(self):
data = array([1, 0, 0, 0, 2, 0, 3, 0])
indices = array([1, 2, 3, 4, 5, 6, 7, 8])
indptr = array([0, 3, 8])
asp = csc_matrix((data, indices, indptr), shape=(10,2))
bsp = asp.copy()
asp.eliminate_zeros()
assert_array_equal(asp.nnz, 3)
assert_array_equal(asp.data,[1, 2, 3])
assert_array_equal(asp.todense(),bsp.todense())
def test_sort_indices(self):
data = arange(5)
row = array([7, 2, 1, 5, 4])
ptr = [0, 3, 5]
asp = csc_matrix((data, row, ptr), shape=(10,2))
bsp = asp.copy()
asp.sort_indices()
assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
assert_array_equal(asp.todense(),bsp.todense())
def test_ufuncs(self):
X = csc_matrix(np.arange(21).reshape(7, 3) / 21.)
for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt"]:
assert_equal(hasattr(csr_matrix, f), True)
X2 = getattr(X, f)()
assert_equal(X.shape, X2.shape)
assert_array_equal(X.indices, X2.indices)
assert_array_equal(X.indptr, X2.indptr)
assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray()))
def test_unsorted_arithmetic(self):
data = arange(5)
indices = array([7, 2, 1, 5, 4])
indptr = array([0, 3, 5])
asp = csc_matrix((data, indices, indptr), shape=(10,2))
data = arange(6)
indices = array([8, 1, 5, 7, 2, 4])
indptr = array([0, 2, 6])
bsp = csc_matrix((data, indices, indptr), shape=(10,2))
assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense())
def test_fancy_indexing_broadcast(self):
# broadcasting indexing mode is supported
I = np.array([[1], [2], [3]])
J = np.array([3, 4, 2])
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
SIJ = S[I,J]
if isspmatrix(SIJ):
SIJ = SIJ.todense()
assert_equal(SIJ, D[I,J])
class TestDOK(sparse_test_class(minmax=False, nnz_axis=False)):
spmatrix = dok_matrix
checked_dtypes = [np.int_, np.float_, np.complex_]
def test_mult(self):
A = dok_matrix((10,10))
A[0,3] = 10
A[5,6] = 20
D = A*A.T
E = A*A.H
assert_array_equal(D.A, E.A)
def test_add_nonzero(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
A = self.spmatrix((3,2))
A[0,1] = -10
A[2,0] = 20
A = A + 10
B = matrix([[10, 0], [10, 10], [30, 10]])
assert_array_equal(A.todense(), B)
A = A + 1j
B = B + 1j
assert_array_equal(A.todense(), B)
def test_dok_divide_scalar(self):
A = self.spmatrix((3,2))
A[0,1] = -10
A[2,0] = 20
assert_array_equal((A/1j).todense(), A.todense()/1j)
assert_array_equal((A/9).todense(), A.todense()/9)
def test_convert(self):
# Test provided by Andrew Straw. Fails in SciPy <= r1477.
(m, n) = (6, 7)
a = dok_matrix((m, n))
# set a few elements, but none in the last column
a[2,1] = 1
a[0,2] = 2
a[3,1] = 3
a[1,5] = 4
a[4,3] = 5
a[4,2] = 6
# assert that the last column is all zeros
assert_array_equal(a.toarray()[:,n-1], zeros(m,))
# make sure it still works for CSC format
csc = a.tocsc()
assert_array_equal(csc.toarray()[:,n-1], zeros(m,))
# now test CSR
(m, n) = (n, m)
b = a.transpose()
assert_equal(b.shape, (m, n))
# assert that the last row is all zeros
assert_array_equal(b.toarray()[m-1,:], zeros(n,))
# make sure it still works for CSR format
csr = b.tocsr()
assert_array_equal(csr.toarray()[m-1,:], zeros(n,))
def test_ctor(self):
# Empty ctor
assert_raises(TypeError, dok_matrix)
# Dense ctor
b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d')
A = dok_matrix(b)
assert_equal(b.dtype, A.dtype)
assert_equal(A.todense(), b)
# Sparse ctor
c = csr_matrix(b)
assert_equal(A.todense(), c.todense())
data = [[0, 1, 2], [3, 0, 0]]
d = dok_matrix(data, dtype=np.float32)
assert_equal(d.dtype, np.float32)
da = d.toarray()
assert_equal(da.dtype, np.float32)
assert_array_equal(da, data)
def test_resize(self):
# A couple basic tests of the resize() method.
#
# resize(shape) resizes the array in-place.
a = dok_matrix((5,5))
a[:,0] = 1
a.resize((2,2))
expected1 = array([[1,0],[1,0]])
assert_array_equal(a.todense(), expected1)
a.resize((3,2))
expected2 = array([[1,0],[1,0],[0,0]])
assert_array_equal(a.todense(), expected2)
def test_ticket1160(self):
# Regression test for ticket #1160.
a = dok_matrix((3,3))
a[0,0] = 0
# This assert would fail, because the above assignment would
# incorrectly call __set_item__ even though the value was 0.
assert_((0,0) not in a.keys(), "Unexpected entry (0,0) in keys")
# Slice assignments were also affected.
b = dok_matrix((3,3))
b[:,0] = 0
assert_(len(b.keys()) == 0, "Unexpected entries in keys")
##
## TODO: The DOK matrix currently returns invalid results rather
## than raising errors in some indexing operations
##
@dec.knownfailureif(True, "known deficiency in DOK")
def test_fancy_indexing(self):
pass
@dec.knownfailureif(True, "known deficiency in DOK")
def test_add_sub(self):
pass
class TestLIL(sparse_test_class(minmax=False)):
spmatrix = lil_matrix
checked_dtypes = [np.int_, np.float_, np.complex_]
def test_dot(self):
A = matrix(zeros((10,10)))
A[0,3] = 10
A[5,6] = 20
B = lil_matrix((10,10))
B[0,3] = 10
B[5,6] = 20
assert_array_equal(A * A.T, (B * B.T).todense())
assert_array_equal(A * A.H, (B * B.H).todense())
def test_scalar_mul(self):
x = lil_matrix((3,3))
x[0,0] = 2
x = x*2
assert_equal(x[0,0],4)
x = x*0
assert_equal(x[0,0],0)
def test_reshape(self):
x = lil_matrix((4,3))
x[0,0] = 1
x[2,1] = 3
x[3,2] = 5
x[0,2] = 7
for s in [(12,1),(1,12)]:
assert_array_equal(x.reshape(s).todense(),
x.todense().reshape(s))
def test_inplace_ops(self):
A = lil_matrix([[0,2,3],[4,0,6]])
B = lil_matrix([[0,1,0],[0,2,3]])
data = {'add': (B,A + B),
'sub': (B,A - B),
'mul': (3,A * 3)}
for op,(other,expected) in data.items():
result = A.copy()
getattr(result, '__i%s__' % op)(other)
assert_array_equal(result.todense(), expected.todense())
# Ticket 1604.
A = lil_matrix((1,3), dtype=np.dtype('float64'))
B = array([0.1,0.1,0.1])
A[0,:] += B
assert_array_equal(A[0,:].toarray().squeeze(), B)
def test_lil_iteration(self):
row_data = [[1,2,3],[4,5,6]]
B = lil_matrix(array(row_data))
for r,row in enumerate(B):
assert_array_equal(row.todense(),array(row_data[r],ndmin=2))
def test_lil_from_csr(self):
# Tests whether a lil_matrix can be constructed from a
# csr_matrix.
B = lil_matrix((10,10))
B[0,3] = 10
B[5,6] = 20
B[8,3] = 30
B[3,8] = 40
B[8,9] = 50
C = B.tocsr()
D = lil_matrix(C)
assert_array_equal(C.A, D.A)
def test_fancy_indexing_lil(self):
M = asmatrix(arange(25).reshape(5,5))
A = lil_matrix(M)
assert_equal(A[array([1,2,3]),2:3].todense(), M[array([1,2,3]),2:3])
def test_point_wise_multiply(self):
l = lil_matrix((4,3))
l[0,0] = 1
l[1,1] = 2
l[2,2] = 3
l[3,1] = 4
m = lil_matrix((4,3))
m[0,0] = 1
m[0,1] = 2
m[2,2] = 3
m[3,1] = 4
m[3,2] = 4
assert_array_equal(l.multiply(m).todense(),
m.multiply(l).todense())
assert_array_equal(l.multiply(m).todense(),
[[1,0,0],
[0,0,0],
[0,0,9],
[0,16,0]])
def test_lil_multiply_removal(self):
# Ticket #1427.
a = lil_matrix(np.ones((3,3)))
a *= 2.
a[0, :] = 0
class TestCOO(sparse_test_class(getset=False,
slicing=False, slicing_assign=False,
fancy_indexing=False, fancy_assign=False)):
spmatrix = coo_matrix
checked_dtypes = [np.int_, np.float_, np.complex_]
def test_constructor1(self):
# unsorted triplet format
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
coo = coo_matrix((data,(row,col)),(4,3))
assert_array_equal(arange(12).reshape(4,3),coo.todense())
def test_constructor2(self):
# unsorted triplet format with duplicates (which are summed)
row = array([0,1,2,2,2,2,0,0,2,2])
col = array([0,2,0,2,1,1,1,0,0,2])
data = array([2,9,-4,5,7,0,-1,2,1,-5])
coo = coo_matrix((data,(row,col)),(3,3))
mat = matrix([[4,-1,0],[0,0,9],[-3,7,0]])
assert_array_equal(mat,coo.todense())
def test_constructor3(self):
# empty matrix
coo = coo_matrix((4,3))
assert_array_equal(coo.shape,(4,3))
assert_array_equal(coo.row,[])
assert_array_equal(coo.col,[])
assert_array_equal(coo.data,[])
assert_array_equal(coo.todense(),zeros((4,3)))
def test_constructor4(self):
# from dense matrix
mat = array([[0,1,0,0],
[7,0,3,0],
[0,4,0,0]])
coo = coo_matrix(mat)
assert_array_equal(coo.todense(),mat)
# upgrade rank 1 arrays to row matrix
mat = array([0,1,0,0])
coo = coo_matrix(mat)
assert_array_equal(coo.todense(),mat.reshape(1,-1))
# COO does not have a __getitem__ to support iteration
def test_iterator(self):
pass
def test_todia_all_zeros(self):
zeros = [[0, 0]]
dia = coo_matrix(zeros).todia()
assert_array_equal(dia.A, zeros)
def test_sum_duplicates(self):
coo = coo_matrix((4,3))
coo.sum_duplicates()
coo = coo_matrix(([1,2], ([1,0], [1,0])))
coo.sum_duplicates()
assert_array_equal(coo.A, [[2,0],[0,1]])
coo = coo_matrix(([1,2], ([1,1], [1,1])))
coo.sum_duplicates()
assert_array_equal(coo.A, [[0,0],[0,3]])
assert_array_equal(coo.row, [1])
assert_array_equal(coo.col, [1])
assert_array_equal(coo.data, [3])
def test_todok_duplicates(self):
coo = coo_matrix(([1,1,1,1], ([0,2,2,0], [0,1,1,0])))
dok = coo.todok()
assert_array_equal(dok.A, coo.A)
class TestDIA(sparse_test_class(getset=False, slicing=False, slicing_assign=False,
fancy_indexing=False, fancy_assign=False,
minmax=False, nnz_axis=False)):
spmatrix = dia_matrix
checked_dtypes = [np.int_, np.float_, np.complex_]
def test_constructor1(self):
D = matrix([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
data = np.array([[1,2,3,4]]).repeat(3,axis=0)
offsets = np.array([0,-1,2])
assert_equal(dia_matrix((data,offsets), shape=(4,4)).todense(), D)
# DIA does not have a __getitem__ to support iteration
def test_iterator(self):
pass
@with_64bit_maxval_limit(3)
def test_setdiag_dtype(self):
m = dia_matrix(np.eye(3))
assert_equal(m.offsets.dtype, np.int32)
m.setdiag((3,), k=2)
assert_equal(m.offsets.dtype, np.int32)
m = dia_matrix(np.eye(4))
assert_equal(m.offsets.dtype, np.int64)
m.setdiag((3,), k=3)
assert_equal(m.offsets.dtype, np.int64)
class TestBSR(sparse_test_class(getset=False,
slicing=False, slicing_assign=False,
fancy_indexing=False, fancy_assign=False,
nnz_axis=False)):
spmatrix = bsr_matrix
checked_dtypes = [np.int_, np.float_, np.complex_]
def test_constructor1(self):
# check native BSR format constructor
indptr = array([0,2,2,4])
indices = array([0,2,2,3])
data = zeros((4,2,3))
data[0] = array([[0, 1, 2],
[3, 0, 5]])
data[1] = array([[0, 2, 4],
[6, 0, 10]])
data[2] = array([[0, 4, 8],
[12, 0, 20]])
data[3] = array([[0, 5, 10],
[15, 0, 25]])
A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]])
Asp = bsr_matrix((data,indices,indptr),shape=(6,12))
assert_equal(Asp.todense(),A)
# infer shape from arrays
Asp = bsr_matrix((data,indices,indptr))
assert_equal(Asp.todense(),A)
def test_constructor2(self):
# construct from dense
# test zero mats
for shape in [(1,1), (5,1), (1,10), (10,4), (3,7), (2,1)]:
A = zeros(shape)
assert_equal(bsr_matrix(A).todense(),A)
A = zeros((4,6))
assert_equal(bsr_matrix(A,blocksize=(2,2)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]])
assert_equal(bsr_matrix(A).todense(),A)
assert_equal(bsr_matrix(A,shape=(6,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(1,1)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,6)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(3,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(6,12)).todense(),A)
A = kron([[1,0,2,0],[0,1,0,0],[0,0,0,0]], [[0,1,2],[3,0,5]])
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
def test_eliminate_zeros(self):
data = kron([1, 0, 0, 0, 2, 0, 3, 0], [[1,1],[1,1]]).T
data = data.reshape(-1,2,2)
indices = array([1, 2, 3, 4, 5, 6, 7, 8])
indptr = array([0, 3, 8])
asp = bsr_matrix((data, indices, indptr), shape=(4,20))
bsp = asp.copy()
asp.eliminate_zeros()
assert_array_equal(asp.nnz, 3*4)
assert_array_equal(asp.todense(),bsp.todense())
def test_bsr_matvec(self):
A = bsr_matrix(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5))
x = arange(A.shape[1]).reshape(-1,1)
assert_equal(A*x, A.todense()*x)
def test_bsr_matvecs(self):
A = bsr_matrix(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5))
x = arange(A.shape[1]*6).reshape(-1,6)
assert_equal(A*x, A.todense()*x)
@dec.knownfailureif(True, "BSR not implemented")
def test_iterator(self):
pass
@dec.knownfailureif(True, "known deficiency in BSR")
def test_setdiag(self):
pass
#------------------------------------------------------------------------------
# Tests for non-canonical representations (with duplicates, unsorted indices)
#------------------------------------------------------------------------------
def _same_sum_duplicate(data, *inds, **kwargs):
"""Duplicates entries to produce the same matrix"""
indptr = kwargs.pop('indptr', None)
if np.issubdtype(data.dtype, np.bool_) or \
np.issubdtype(data.dtype, np.unsignedinteger):
if indptr is None:
return (data,) + inds
else:
return (data,) + inds + (indptr,)
zeros_pos = (data == 0).nonzero()
# duplicate data
data = data.repeat(2, axis=0)
data[::2] -= 1
data[1::2] = 1
# don't spoil all explicit zeros
if zeros_pos[0].size > 0:
pos = tuple(p[0] for p in zeros_pos)
pos1 = (2*pos[0],) + pos[1:]
pos2 = (2*pos[0]+1,) + pos[1:]
data[pos1] = 0
data[pos2] = 0
inds = tuple(indices.repeat(2) for indices in inds)
if indptr is None:
return (data,) + inds
else:
return (data,) + inds + (indptr * 2,)
class _NonCanonicalMixin(object):
def spmatrix(self, D, **kwargs):
"""Replace D with a non-canonical equivalent: containing
duplicate elements and explicit zeros"""
construct = super(_NonCanonicalMixin, self).spmatrix
M = construct(D, **kwargs)
zero_pos = (M.A == 0).nonzero()
has_zeros = (zero_pos[0].size > 0)
if has_zeros:
k = zero_pos[0].size//2
M = self._insert_explicit_zero(M,
zero_pos[0][k],
zero_pos[1][k])
arg1 = self._arg1_for_noncanonical(M)
if 'shape' not in kwargs:
kwargs['shape'] = M.shape
NC = construct(arg1, **kwargs)
# check that result is valid
assert_allclose(NC.A, M.A)
# check that at least one explicit zero
if has_zeros:
assert_((NC.data == 0).any())
return NC
@dec.knownfailureif(True, 'abs broken with non-canonical matrix')
def test_abs(self):
pass
@dec.knownfailureif(True, 'bool(matrix) broken with non-canonical matrix')
def test_bool(self):
pass
@dec.knownfailureif(True, 'min/max broken with non-canonical matrix')
def test_minmax(self):
pass
@dec.knownfailureif(True, 'format conversion broken with non-canonical matrix')
def test_sparse_format_conversions(self):
pass
@dec.knownfailureif(True, 'unary ufunc overrides broken with non-canonical matrix')
def test_unary_ufunc_overrides(self):
pass
@dec.knownfailureif(True, 'some binary ufuncs fail with scalars for noncanonical matrices')
def test_binary_ufunc_overrides(self):
pass
@dec.knownfailureif(True, 'getnnz-axis broken with non-canonical matrix')
def test_getnnz_axis(self):
pass
class _NonCanonicalCompressedMixin(_NonCanonicalMixin):
def _arg1_for_noncanonical(self, M):
"""Return non-canonical constructor arg1 equivalent to M"""
data, indices, indptr = _same_sum_duplicate(M.data, M.indices,
indptr=M.indptr)
# unsorted
for start, stop in izip(indptr, indptr[1:]):
indices[start:stop] = indices[start:stop][::-1].copy()
data[start:stop] = data[start:stop][::-1].copy()
return data, indices, indptr
def _insert_explicit_zero(self, M, i, j):
M[i,j] = 0
return M
class _NonCanonicalCSMixin(_NonCanonicalCompressedMixin):
@dec.knownfailureif(True, '__getitem__ with non-canonical matrix broken for sparse boolean index due to __gt__')
def test_fancy_indexing_sparse_boolean(self):
pass
@dec.knownfailureif(True, 'broadcasting element-wise multiply broken with non-canonical matrix')
def test_elementwise_multiply_broadcast(self):
pass
@dec.knownfailureif(True, 'inverse broken with non-canonical matrix')
def test_inv(self):
pass
@dec.knownfailureif(True, 'solve broken with non-canonical matrix')
def test_solve(self):
pass
class TestCSRNonCanonical(_NonCanonicalCSMixin, TestCSR):
@dec.knownfailureif(True, 'nnz counts explicit zeros')
def test_empty(self):
pass
class TestCSCNonCanonical(_NonCanonicalCSMixin, TestCSC):
@dec.knownfailureif(True, 'nnz counts explicit zeros')
def test_empty(self):
pass
@dec.knownfailureif(True, 'nonzero reports explicit zeros')
def test_nonzero(self):
pass
class TestBSRNonCanonical(_NonCanonicalCompressedMixin, TestBSR):
def _insert_explicit_zero(self, M, i, j):
x = M.tocsr()
x[i,j] = 0
return x.tobsr(blocksize=M.blocksize)
@dec.knownfailureif(True, 'unary ufunc overrides broken with non-canonical BSR')
def test_diagonal(self):
pass
@dec.knownfailureif(True, 'unary ufunc overrides broken with non-canonical BSR')
def test_expm(self):
pass
@dec.knownfailureif(True, 'inequalities require sum_duplicates, not implemented for BSR')
def test_eq(self):
pass
@dec.knownfailureif(True, 'inequalities require sum_duplicates, not implemented for BSR')
def test_ne(self):
pass
@dec.knownfailureif(True, 'inequalities require sum_duplicates, not implemented for BSR')
def test_gt(self):
pass
@dec.knownfailureif(True, 'inequalities require sum_duplicates, not implemented for BSR')
def test_lt(self):
pass
@dec.knownfailureif(True, 'inequalities require sum_duplicates, not implemented for BSR')
def test_ge(self):
pass
@dec.knownfailureif(True, 'inequalities require sum_duplicates, not implemented for BSR')
def test_le(self):
pass
@dec.knownfailureif(True, 'maximum and minimum fail for non-canonical BSR')
def test_maximum_minimum(self):
pass
@dec.knownfailureif(True, 'nnz counts explicit zeros')
def test_empty(self):
pass
class TestCOONonCanonical(_NonCanonicalMixin, TestCOO):
def _arg1_for_noncanonical(self, M):
"""Return non-canonical constructor arg1 equivalent to M"""
data, row, col = _same_sum_duplicate(M.data, M.row, M.col)
return data, (row, col)
def _insert_explicit_zero(self, M, i, j):
M.data = np.r_[M.data.dtype.type(0), M.data]
M.row = np.r_[M.row.dtype.type(i), M.row]
M.col = np.r_[M.col.dtype.type(j), M.col]
return M
def test_setdiag_noncanonical(self):
m = self.spmatrix(np.eye(3))
m.sum_duplicates()
m.setdiag([3, 2], k=1)
m.sum_duplicates()
assert_(np.all(np.diff(m.col) >= 0))
@dec.knownfailureif(True, 'nnz counts explicit zeros')
def test_empty(self):
pass
class Test64Bit(object):
TEST_CLASSES = [TestBSR, TestCOO, TestCSC, TestCSR, TestDIA,
# lil/dok->other conversion operations have get_index_dtype
TestDOK, TestLIL
]
MAT_CLASSES = [bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dia_matrix]
# The following features are missing, so skip the tests:
SKIP_TESTS = {
'test_expm': 'expm for 64-bit indices not available',
'test_solve': 'linsolve for 64-bit indices not available'
}
def _create_some_matrix(self, mat_cls, m, n):
return mat_cls(np.random.rand(m, n))
def _compare_index_dtype(self, m, dtype):
dtype = np.dtype(dtype)
if isinstance(m, csc_matrix) or isinstance(m, csr_matrix) \
or isinstance(m, bsr_matrix):
return (m.indices.dtype == dtype) and (m.indptr.dtype == dtype)
elif isinstance(m, coo_matrix):
return (m.row.dtype == dtype) and (m.col.dtype == dtype)
elif isinstance(m, dia_matrix):
return (m.offsets.dtype == dtype)
else:
raise ValueError("matrix %r has no integer indices" % (m,))
def test_decorator_maxval_limit(self):
# Test that the with_64bit_maxval_limit decorator works
@with_64bit_maxval_limit(maxval_limit=10)
def check(mat_cls):
m = mat_cls(np.random.rand(10, 1))
assert_(self._compare_index_dtype(m, np.int32))
m = mat_cls(np.random.rand(11, 1))
assert_(self._compare_index_dtype(m, np.int64))
for mat_cls in self.MAT_CLASSES:
yield check, mat_cls
def test_decorator_maxval_random(self):
# Test that the with_64bit_maxval_limit decorator works (2)
@with_64bit_maxval_limit(random=True)
def check(mat_cls):
seen_32 = False
seen_64 = False
for k in range(100):
m = self._create_some_matrix(mat_cls, 9, 9)
seen_32 = seen_32 or self._compare_index_dtype(m, np.int32)
seen_64 = seen_64 or self._compare_index_dtype(m, np.int64)
if seen_32 and seen_64:
break
else:
raise AssertionError("both 32 and 64 bit indices not seen")
for mat_cls in self.MAT_CLASSES:
yield check, mat_cls
def _check_resiliency(self, **kw):
# Resiliency test, to check that sparse matrices deal reasonably
# with varying index data types.
skip = kw.pop('skip', ())
@with_64bit_maxval_limit(**kw)
def check(cls, method_name):
instance = cls()
if hasattr(instance, 'setup'):
instance.setup()
try:
getattr(instance, method_name)()
finally:
if hasattr(instance, 'teardown'):
instance.teardown()
for cls in self.TEST_CLASSES:
for method_name in dir(cls):
method = getattr(cls, method_name)
if (method_name.startswith('test_') and
not getattr(method, 'slow', False) and
(cls.__name__ + '.' + method_name) not in skip):
msg = self.SKIP_TESTS.get(method_name)
yield dec.skipif(msg, msg)(check), cls, method_name
def test_resiliency_limit_10(self):
for t in self._check_resiliency(maxval_limit=10):
yield t
def test_resiliency_random(self):
# bsr_matrix.eliminate_zeros relies on csr_matrix constructor
# not making copies of index arrays --- this is not
# necessarily true when we pick the index data type randomly
skip = ['TestBSR.test_eliminate_zeros']
for t in self._check_resiliency(random=True, skip=skip):
yield t
def test_resiliency_all_32(self):
for t in self._check_resiliency(fixed_dtype=np.int32):
yield t
def test_resiliency_all_64(self):
for t in self._check_resiliency(fixed_dtype=np.int64):
yield t
def test_no_64(self):
for t in self._check_resiliency(assert_32bit=True):
yield t
def test_downcast_intp(self):
# Check that bincount and ufunc.reduceat intp downcasts are
# dealt with. The point here is to trigger points in the code
# that can fail on 32-bit systems when using 64-bit indices,
# due to use of functions that only work with intp-size
# indices.
@with_64bit_maxval_limit(fixed_dtype=np.int64,
downcast_maxval=1)
def check_limited():
# These involve indices larger than `downcast_maxval`
a = csc_matrix([[1, 2], [3, 4], [5, 6]])
assert_raises(AssertionError, a.getnnz, axis=1)
assert_raises(AssertionError, a.sum, axis=0)
a = csr_matrix([[1, 2, 3], [3, 4, 6]])
assert_raises(AssertionError, a.getnnz, axis=0)
a = coo_matrix([[1, 2, 3], [3, 4, 5]])
assert_raises(AssertionError, a.getnnz, axis=0)
@with_64bit_maxval_limit(fixed_dtype=np.int64)
def check_unlimited():
# These involve indices larger than `downcast_maxval`
a = csc_matrix([[1, 2], [3, 4], [5, 6]])
a.getnnz(axis=1)
a.sum(axis=0)
a = csr_matrix([[1, 2, 3], [3, 4, 6]])
a.getnnz(axis=0)
a = coo_matrix([[1, 2, 3], [3, 4, 5]])
a.getnnz(axis=0)
check_limited()
check_unlimited()
if __name__ == "__main__":
run_module_suite()
| nvoron23/scipy | scipy/sparse/tests/test_base.py | Python | bsd-3-clause | 151,696 |
from typing import Any
from django.core.management.base import CommandParser
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.message import maybe_update_first_visible_message_id
from zerver.models import Realm
class Command(ZulipBaseCommand):
help = """Calculate the value of first visible message ID and store it in cache"""
def add_arguments(self, parser: CommandParser) -> None:
self.add_realm_args(parser)
parser.add_argument(
'--lookback-hours',
type=int,
help="Period a bit larger than that of the cron job that runs "
"this command so that the lookback periods are sure to overlap.",
required=True,
)
def handle(self, *args: Any, **options: Any) -> None:
target_realm = self.get_realm(options)
if target_realm is None:
realms = Realm.objects.all()
else:
realms = [target_realm]
for realm in realms:
maybe_update_first_visible_message_id(realm, options['lookback_hours'])
| showell/zulip | zilencer/management/commands/calculate_first_visible_message_id.py | Python | apache-2.0 | 1,074 |
import os
gettext = lambda s: s
DATA_DIR = os.path.dirname(os.path.dirname(__file__))
"""
Django settings for flex project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's$r0^f&ny*lp!cqf=5l%$o3@)mkiu$=7=-b!lu+4gyv45&4vss'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
ROOT_URLCONF = 'flex.urls'
WSGI_APPLICATION = 'flex.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(DATA_DIR, 'media')
STATIC_ROOT = os.path.join(DATA_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'flex', 'static'),
)
SITE_ID = 1
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware'
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.core.context_processors.debug',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.csrf',
'django.core.context_processors.tz',
'sekizai.context_processors.sekizai',
'django.core.context_processors.static',
'cms.context_processors.cms_settings'
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'flex', 'templates'),
)
INSTALLED_APPS = (
'djangocms_admin_style',
'djangocms_text_ckeditor',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'django.contrib.messages',
'cms',
'menus',
'sekizai',
'treebeard',
'djangocms_style',
'djangocms_column',
'djangocms_file',
'djangocms_flash',
'djangocms_googlemap',
'djangocms_inherit',
'djangocms_link',
'djangocms_picture',
'djangocms_teaser',
'djangocms_video',
'reversion',
'flex',
'notcms',
'plugins_layout',
)
LANGUAGES = (
## Customize this
('en', gettext('en')),
)
'''
CMS_LANGUAGES = {
## Customize this
'default': {
'public': True,
'hide_untranslated': False,
'redirect_on_fallback': True,
},
#1: [
# {
# 'public': True,
# 'code': 'en',
# 'hide_untranslated': False,
# 'name': gettext('en'),
# 'redirect_on_fallback': True,
# },
#],
}
'''
CMS_TEMPLATES = (
## Customize this
('fullwidth.html', 'Fullwidth'),
('sidebar_left.html', 'Sidebar Left'),
('sidebar_right.html', 'Sidebar Right')
)
CMS_CACHE_DURATIONS = {
'default': 1,
'menus': 1,
'permissions': 1
}
CMS_PERMISSION = False
CMS_PLACEHOLDER_CACHE = False
CMS_PLACEHOLDER_CONF = {
'page_layout': {
'plugins': ['NewColumnPlugin', 'ContainerPlugin']
}
}
DATABASES = {
'default':
{'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'project.db', 'HOST': 'localhost', 'USER': '', 'PASSWORD': '', 'PORT': ''}
}
MIGRATION_MODULES = {
'djangocms_column': 'djangocms_column.migrations_django',
'djangocms_flash': 'djangocms_flash.migrations_django',
'djangocms_googlemap': 'djangocms_googlemap.migrations_django',
'djangocms_inherit': 'djangocms_inherit.migrations_django',
'djangocms_link': 'djangocms_link.migrations_django',
'djangocms_style': 'djangocms_style.migrations_django',
'djangocms_file': 'djangocms_file.migrations_django',
'djangocms_picture': 'djangocms_picture.migrations_django',
'djangocms_teaser': 'djangocms_teaser.migrations_django',
'djangocms_video': 'djangocms_video.migrations_django'
}
| samastur/flexlayout | flex/settings.py | Python | mit | 5,343 |
input = """
a :- a.
a | b.
"""
output = """
a :- a.
a | b.
"""
| veltri/DLV2 | tests/parser/bug.40.test.py | Python | apache-2.0 | 67 |
import unittest
import os
import tempfile
import numpy as np
from numpy import ma
from numpy.testing import assert_array_almost_equal
from netCDF4p import Dataset, default_fillvals
# Test automatic scaling of variables (set_auto_scale())
class SetAutoScaleTestBase(unittest.TestCase):
"""Base object for tests checking the functionality of set_auto_scale()"""
def setUp(self):
self.testfile = tempfile.mktemp(".nc")
self.fillval = default_fillvals["i2"]
self.missing_value = -9999
self.v = np.array([0, 5, 4, self.missing_value], dtype = "i2")
self.v_ma = ma.array([0, 5, 4, self.missing_value], dtype = "i2",
mask = [True, False, False, True], fill_value = self.fillval)
self.scale_factor = 10.
self.add_offset = 5.
self.v_scaled = self.v * self.scale_factor + self.add_offset
self.v_ma_scaled = self.v_ma * self.scale_factor + self.add_offset
f = Dataset(self.testfile, 'w')
x = f.createDimension('x', None)
v = f.createVariable('v', "i2", 'x')
v[:] = self.v
# Note: Scale factors are only added after writing, so that no auto-scaling takes place!
v.scale_factor = self.scale_factor
v.add_offset = self.add_offset
f.close()
def tearDown(self):
os.remove(self.testfile)
class SetAutoScaleFalse(SetAutoScaleTestBase):
def test_unmasked(self):
"""Testing (not) auto-scaling of variables for set_auto_scale(False)"""
f = Dataset(self.testfile, "r")
f.variables["v"].set_auto_scale(False)
v = f.variables["v"][:]
self.assertEqual(v.dtype, "i2")
self.assertIsInstance(v, np.ndarray)
self.assertNotIsInstance(v, ma.core.MaskedArray)
assert_array_almost_equal(v, self.v)
f.close()
def test_masked(self):
"""Testing auto-conversion of masked arrays for set_auto_mask(False) with masking"""
# Update test data file
f = Dataset(self.testfile, "a")
f.variables["v"].missing_value = self.missing_value
f.close()
# Note: Converting arrays to masked arrays is default if missing_value is present
f = Dataset(self.testfile, "r")
f.variables["v"].set_auto_scale(False)
v_ma = f.variables["v"][:]
self.assertEqual(v_ma.dtype, "i2")
self.assertIsInstance(v_ma, np.ndarray)
self.assertIsInstance(v_ma, ma.core.MaskedArray)
assert_array_almost_equal(v_ma, self.v_ma)
f.close()
class SetAutoScaleTrue(SetAutoScaleTestBase):
def test_unmasked(self):
"""Testing auto-scaling of variables for set_auto_scale(True)"""
f = Dataset(self.testfile)
f.variables["v"].set_auto_scale(True) # The default anyway...
v_scaled = f.variables['v'][:]
self.assertEqual(v_scaled.dtype, "f8")
self.assertIsInstance(v_scaled, np.ndarray)
self.assertNotIsInstance(v_scaled, ma.core.MaskedArray)
assert_array_almost_equal(v_scaled, self.v_scaled)
f.close()
def test_masked(self):
"""Testing auto-scaling of variables for set_auto_scale(True) with masking"""
# Update test data file
f = Dataset(self.testfile, "a")
f.variables["v"].missing_value = self.missing_value
f.close()
# Note: Converting arrays to masked arrays is default if missing_value is present
f = Dataset(self.testfile)
f.variables["v"].set_auto_scale(True) # The default anyway...
v_ma_scaled = f.variables['v'][:]
self.assertEqual(v_ma_scaled.dtype, "f8")
self.assertIsInstance(v_ma_scaled, np.ndarray)
self.assertIsInstance(v_ma_scaled, ma.core.MaskedArray)
assert_array_almost_equal(v_ma_scaled, self.v_ma_scaled)
f.close()
class GlobalSetAutoScaleTest(unittest.TestCase):
def setUp(self):
self.testfile = tempfile.mktemp(".nc")
f = Dataset(self.testfile, 'w')
grp1 = f.createGroup('Group1')
grp2 = f.createGroup('Group2')
f.createGroup('Group3') # empty group
f.createVariable('var0', "i2", ())
grp1.createVariable('var1', 'f8', ())
grp2.createVariable('var2', 'f4', ())
f.close()
def tearDown(self):
os.remove(self.testfile)
def runTest(self):
f = Dataset(self.testfile, "r")
# Default is both scaling and masking enabled
v0 = f.variables['var0']
v1 = f.groups['Group1'].variables['var1']
v2 = f.groups['Group2'].variables['var2']
self.assertTrue(v0.scale)
self.assertTrue(v0.mask)
self.assertTrue(v1.scale)
self.assertTrue(v1.mask)
self.assertTrue(v2.scale)
self.assertTrue(v2.mask)
# No auto-scaling
f.set_auto_scale(False)
self.assertFalse(v0.scale)
self.assertTrue(v0.mask)
self.assertFalse(v1.scale)
self.assertTrue(v1.mask)
self.assertFalse(v2.scale)
self.assertTrue(v2.mask)
f.close()
if __name__ == '__main__':
unittest.main()
| mathause/netCDF4p | test/tst_scaled.py | Python | mit | 5,177 |
#!/usr/bin/env python
#
# Copyright (c) 2015 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
"""Tempest testcases implementation."""
from __future__ import division
import logging
import os
import re
import shutil
import subprocess
import time
import uuid
from snaps.config.flavor import FlavorConfig
from snaps.config.network import NetworkConfig, SubnetConfig
from snaps.config.project import ProjectConfig
from snaps.config.user import UserConfig
from snaps.openstack.create_flavor import OpenStackFlavor
from snaps.openstack.tests import openstack_tests
from snaps.openstack.utils import deploy_utils
from xtesting.core import testcase
import yaml
from functest.opnfv_tests.openstack.snaps import snaps_utils
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils import config
from functest.utils import env
LOGGER = logging.getLogger(__name__)
class TempestCommon(testcase.TestCase):
# pylint: disable=too-many-instance-attributes
"""TempestCommon testcases implementation class."""
TEMPEST_RESULTS_DIR = os.path.join(
getattr(config.CONF, 'dir_results'), 'tempest')
def __init__(self, **kwargs):
super(TempestCommon, self).__init__(**kwargs)
self.resources = TempestResourcesManager(**kwargs)
self.mode = ""
self.option = []
self.verifier_id = conf_utils.get_verifier_id()
self.verifier_repo_dir = conf_utils.get_verifier_repo_dir(
self.verifier_id)
self.deployment_id = conf_utils.get_verifier_deployment_id()
self.deployment_dir = conf_utils.get_verifier_deployment_dir(
self.verifier_id, self.deployment_id)
self.verification_id = None
self.res_dir = TempestCommon.TEMPEST_RESULTS_DIR
self.raw_list = os.path.join(self.res_dir, 'test_raw_list.txt')
self.list = os.path.join(self.res_dir, 'test_list.txt')
self.conf_file = None
@staticmethod
def read_file(filename):
"""Read file and return content as a stripped list."""
with open(filename) as src:
return [line.strip() for line in src.readlines()]
@staticmethod
def get_verifier_result(verif_id):
"""Retrieve verification results."""
result = {
'num_tests': 0,
'num_success': 0,
'num_failures': 0,
'num_skipped': 0
}
cmd = ["rally", "verify", "show", "--uuid", verif_id]
LOGGER.info("Showing result for a verification: '%s'.", cmd)
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in proc.stdout:
new_line = line.replace(' ', '').split('|')
if 'Tests' in new_line:
break
LOGGER.info(line)
if 'Testscount' in new_line:
result['num_tests'] = int(new_line[2])
elif 'Success' in new_line:
result['num_success'] = int(new_line[2])
elif 'Skipped' in new_line:
result['num_skipped'] = int(new_line[2])
elif 'Failures' in new_line:
result['num_failures'] = int(new_line[2])
return result
def generate_test_list(self):
"""Generate test list based on the test mode."""
LOGGER.debug("Generating test case list...")
if self.mode == 'custom':
if os.path.isfile(conf_utils.TEMPEST_CUSTOM):
shutil.copyfile(
conf_utils.TEMPEST_CUSTOM, self.list)
else:
raise Exception("Tempest test list file %s NOT found."
% conf_utils.TEMPEST_CUSTOM)
else:
if self.mode == 'smoke':
testr_mode = r"'^tempest\.(api|scenario).*\[.*\bsmoke\b.*\]$'"
elif self.mode == 'full':
testr_mode = r"'^tempest\.'"
else:
testr_mode = self.mode
cmd = "(cd {0}; testr list-tests {1} >{2} 2>/dev/null)".format(
self.verifier_repo_dir, testr_mode, self.list)
output = subprocess.check_output(cmd, shell=True)
LOGGER.info("%s\n%s", cmd, output)
def apply_tempest_blacklist(self):
"""Exclude blacklisted test cases."""
LOGGER.debug("Applying tempest blacklist...")
if os.path.exists(self.raw_list):
os.remove(self.raw_list)
os.rename(self.list, self.raw_list)
cases_file = self.read_file(self.raw_list)
result_file = open(self.list, 'w')
black_tests = []
try:
installer_type = env.get('INSTALLER_TYPE')
deploy_scenario = env.get('DEPLOY_SCENARIO')
if bool(installer_type) * bool(deploy_scenario):
# if INSTALLER_TYPE and DEPLOY_SCENARIO are set we read the
# file
black_list_file = open(conf_utils.TEMPEST_BLACKLIST)
black_list_yaml = yaml.safe_load(black_list_file)
black_list_file.close()
for item in black_list_yaml:
scenarios = item['scenarios']
installers = item['installers']
if (deploy_scenario in scenarios and
installer_type in installers):
tests = item['tests']
for test in tests:
black_tests.append(test)
break
except Exception: # pylint: disable=broad-except
black_tests = []
LOGGER.debug("Tempest blacklist file does not exist.")
for cases_line in cases_file:
for black_tests_line in black_tests:
if black_tests_line in cases_line:
break
else:
result_file.write(str(cases_line) + '\n')
result_file.close()
def run_verifier_tests(self):
"""Execute tempest test cases."""
cmd = ["rally", "verify", "start", "--load-list",
self.list]
cmd.extend(self.option)
LOGGER.info("Starting Tempest test suite: '%s'.", cmd)
f_stdout = open(
os.path.join(self.res_dir, "tempest.log"), 'w+')
f_stderr = open(
os.path.join(self.res_dir,
"tempest-error.log"), 'w+')
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=f_stderr,
bufsize=1)
with proc.stdout:
for line in iter(proc.stdout.readline, b''):
if re.search(r"\} tempest\.", line):
LOGGER.info(line.replace('\n', ''))
elif re.search('Starting verification', line):
LOGGER.info(line.replace('\n', ''))
first_pos = line.index("UUID=") + len("UUID=")
last_pos = line.index(") for deployment")
self.verification_id = line[first_pos:last_pos]
LOGGER.debug('Verification UUID: %s', self.verification_id)
f_stdout.write(line)
proc.wait()
f_stdout.close()
f_stderr.close()
if self.verification_id is None:
raise Exception('Verification UUID not found')
def parse_verifier_result(self):
"""Parse and save test results."""
stat = self.get_verifier_result(self.verification_id)
try:
num_executed = stat['num_tests'] - stat['num_skipped']
try:
self.result = 100 * stat['num_success'] / num_executed
except ZeroDivisionError:
self.result = 0
if stat['num_tests'] > 0:
LOGGER.info("All tests have been skipped")
else:
LOGGER.error("No test has been executed")
return
with open(os.path.join(self.res_dir,
"tempest.log"), 'r') as logfile:
output = logfile.read()
success_testcases = []
for match in re.findall(r'.*\{0\} (.*?)[. ]*success ', output):
success_testcases.append(match)
failed_testcases = []
for match in re.findall(r'.*\{0\} (.*?)[. ]*fail', output):
failed_testcases.append(match)
skipped_testcases = []
for match in re.findall(r'.*\{0\} (.*?)[. ]*skip:', output):
skipped_testcases.append(match)
self.details = {"tests_number": stat['num_tests'],
"success_number": stat['num_success'],
"skipped_number": stat['num_skipped'],
"failures_number": stat['num_failures'],
"success": success_testcases,
"skipped": skipped_testcases,
"failures": failed_testcases}
except Exception: # pylint: disable=broad-except
self.result = 0
LOGGER.info("Tempest %s success_rate is %s%%",
self.case_name, self.result)
def generate_report(self):
"""Generate verification report."""
html_file = os.path.join(self.res_dir,
"tempest-report.html")
cmd = ["rally", "verify", "report", "--type", "html", "--uuid",
self.verification_id, "--to", html_file]
subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def configure(self, **kwargs): # pylint: disable=unused-argument
"""
Create all openstack resources for tempest-based testcases and write
tempest.conf.
"""
if not os.path.exists(self.res_dir):
os.makedirs(self.res_dir)
resources = self.resources.create()
compute_cnt = snaps_utils.get_active_compute_cnt(
self.resources.os_creds)
self.conf_file = conf_utils.configure_verifier(self.deployment_dir)
conf_utils.configure_tempest_update_params(
self.conf_file, self.res_dir,
network_name=resources.get("network_name"),
image_id=resources.get("image_id"),
flavor_id=resources.get("flavor_id"),
compute_cnt=compute_cnt)
def run(self, **kwargs):
self.start_time = time.time()
try:
self.configure()
self.generate_test_list()
self.apply_tempest_blacklist()
self.run_verifier_tests()
self.parse_verifier_result()
self.generate_report()
res = testcase.TestCase.EX_OK
except Exception: # pylint: disable=broad-except
LOGGER.exception('Error with run')
res = testcase.TestCase.EX_RUN_ERROR
finally:
self.resources.cleanup()
self.stop_time = time.time()
return res
class TempestSmokeSerial(TempestCommon):
"""Tempest smoke serial testcase implementation."""
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = 'tempest_smoke_serial'
TempestCommon.__init__(self, **kwargs)
self.mode = "smoke"
self.option = ["--concurrency", "1"]
class TempestNeutronTrunk(TempestCommon):
"""Tempest neutron trunk testcase implementation."""
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = 'neutron_trunk'
TempestCommon.__init__(self, **kwargs)
self.mode = "'neutron.tests.tempest.(api|scenario).test_trunk'"
self.res_dir = os.path.join(
getattr(config.CONF, 'dir_results'), 'neutron_trunk')
self.raw_list = os.path.join(self.res_dir, 'test_raw_list.txt')
self.list = os.path.join(self.res_dir, 'test_list.txt')
def configure(self, **kwargs):
super(TempestNeutronTrunk, self).configure(**kwargs)
rconfig = conf_utils.ConfigParser.RawConfigParser()
rconfig.read(self.conf_file)
rconfig.set('network-feature-enabled', 'api_extensions', 'all')
with open(self.conf_file, 'wb') as config_file:
rconfig.write(config_file)
class TempestSmokeParallel(TempestCommon):
"""Tempest smoke parallel testcase implementation."""
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = 'tempest_smoke_parallel'
TempestCommon.__init__(self, **kwargs)
self.mode = "smoke"
class TempestFullParallel(TempestCommon):
"""Tempest full parallel testcase implementation."""
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = 'tempest_full_parallel'
TempestCommon.__init__(self, **kwargs)
self.mode = "full"
class TempestCustom(TempestCommon):
"""Tempest custom testcase implementation."""
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = 'tempest_custom'
TempestCommon.__init__(self, **kwargs)
self.mode = "custom"
self.option = ["--concurrency", "1"]
class TempestDefcore(TempestCommon):
"""Tempest Defcore testcase implementation."""
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = 'tempest_defcore'
TempestCommon.__init__(self, **kwargs)
self.mode = "defcore"
self.option = ["--concurrency", "1"]
class TempestResourcesManager(object):
"""Tempest resource manager."""
def __init__(self, **kwargs):
self.os_creds = kwargs.get('os_creds') or snaps_utils.get_credentials()
self.guid = '-' + str(uuid.uuid4())
self.creators = list()
self.cirros_image_config = getattr(
config.CONF, 'snaps_images_cirros', None)
def _create_project(self):
"""Create project for tests."""
project_creator = deploy_utils.create_project(
self.os_creds, ProjectConfig(
name=getattr(
config.CONF, 'tempest_identity_tenant_name') + self.guid,
description=getattr(
config.CONF, 'tempest_identity_tenant_description'),
domain=self.os_creds.project_domain_name))
if project_creator is None or project_creator.get_project() is None:
raise Exception("Failed to create tenant")
self.creators.append(project_creator)
return project_creator.get_project().id
def _create_user(self):
"""Create user for tests."""
user_creator = deploy_utils.create_user(
self.os_creds, UserConfig(
name=getattr(
config.CONF, 'tempest_identity_user_name') + self.guid,
password=getattr(
config.CONF, 'tempest_identity_user_password'),
project_name=getattr(
config.CONF, 'tempest_identity_tenant_name') + self.guid,
domain_name=self.os_creds.user_domain_name))
if user_creator is None or user_creator.get_user() is None:
raise Exception("Failed to create user")
self.creators.append(user_creator)
return user_creator.get_user().id
def _create_network(self, project_name):
"""Create network for tests."""
tempest_network_type = None
tempest_physical_network = None
tempest_segmentation_id = None
tempest_network_type = getattr(
config.CONF, 'tempest_network_type', None)
tempest_physical_network = getattr(
config.CONF, 'tempest_physical_network', None)
tempest_segmentation_id = getattr(
config.CONF, 'tempest_segmentation_id', None)
tempest_net_name = getattr(
config.CONF, 'tempest_private_net_name') + self.guid
network_creator = deploy_utils.create_network(
self.os_creds, NetworkConfig(
name=tempest_net_name,
project_name=project_name,
network_type=tempest_network_type,
physical_network=tempest_physical_network,
segmentation_id=tempest_segmentation_id,
subnet_settings=[SubnetConfig(
name=getattr(
config.CONF,
'tempest_private_subnet_name') + self.guid,
project_name=project_name,
cidr=getattr(
config.CONF, 'tempest_private_subnet_cidr'),
dns_nameservers=[env.get('NAMESERVER')])]))
if network_creator is None or network_creator.get_network() is None:
raise Exception("Failed to create private network")
self.creators.append(network_creator)
return tempest_net_name
def _create_image(self, name):
"""Create image for tests"""
os_image_settings = openstack_tests.cirros_image_settings(
name, public=True,
image_metadata=self.cirros_image_config)
image_creator = deploy_utils.create_image(
self.os_creds, os_image_settings)
if image_creator is None:
raise Exception('Failed to create image')
self.creators.append(image_creator)
return image_creator.get_image().id
def _create_flavor(self, name):
"""Create flavor for tests."""
flavor_metadata = getattr(config.CONF, 'flavor_extra_specs', None)
flavor_creator = OpenStackFlavor(
self.os_creds, FlavorConfig(
name=name,
ram=getattr(config.CONF, 'openstack_flavor_ram'),
disk=getattr(config.CONF, 'openstack_flavor_disk'),
vcpus=getattr(config.CONF, 'openstack_flavor_vcpus'),
metadata=flavor_metadata))
flavor = flavor_creator.create()
if flavor is None:
raise Exception('Failed to create flavor')
self.creators.append(flavor_creator)
return flavor.id
def create(self, create_project=False):
"""Create resources for Tempest test suite."""
result = {
'tempest_net_name': None,
'image_id': None,
'image_id_alt': None,
'flavor_id': None,
'flavor_id_alt': None
}
project_name = None
if create_project:
LOGGER.debug("Creating project and user for Tempest suite")
project_name = getattr(
config.CONF, 'tempest_identity_tenant_name') + self.guid
result['project_id'] = self._create_project()
result['user_id'] = self._create_user()
result['tenant_id'] = result['project_id'] # for compatibility
LOGGER.debug("Creating private network for Tempest suite")
result['tempest_net_name'] = self._create_network(project_name)
LOGGER.debug("Creating two images for Tempest suite")
image_name = getattr(config.CONF, 'openstack_image_name') + self.guid
result['image_id'] = self._create_image(image_name)
image_name = getattr(
config.CONF, 'openstack_image_name_alt') + self.guid
result['image_id_alt'] = self._create_image(image_name)
LOGGER.info("Creating two flavors for Tempest suite")
name = getattr(config.CONF, 'openstack_flavor_name') + self.guid
result['flavor_id'] = self._create_flavor(name)
name = getattr(
config.CONF, 'openstack_flavor_name_alt') + self.guid
result['flavor_id_alt'] = self._create_flavor(name)
return result
def cleanup(self):
"""
Cleanup all OpenStack objects. Should be called on completion.
"""
for creator in reversed(self.creators):
try:
creator.clean()
except Exception as err: # pylint: disable=broad-except
LOGGER.error('Unexpected error cleaning - %s', err)
| mywulin/functest | functest/opnfv_tests/openstack/tempest/tempest.py | Python | apache-2.0 | 20,259 |
from __future__ import unicode_literals
import ctypes
import json
import random
from binascii import a2b_hex, b2a_hex
from io import BytesIO
from django.contrib.gis import memoryview
from django.contrib.gis.geos import (GEOSException, GEOSIndexError, GEOSGeometry,
GeometryCollection, Point, MultiPoint, Polygon, MultiPolygon, LinearRing,
LineString, MultiLineString, fromfile, fromstr, geos_version_info)
from django.contrib.gis.geos.base import gdal, numpy, GEOSBase
from django.contrib.gis.geos.libgeos import GEOS_PREPARE
from django.contrib.gis.geometry.test_data import TestDataMixin
from django.utils.encoding import force_bytes
from django.utils import six
from django.utils.six.moves import xrange
from django.utils import unittest
class GEOSTest(unittest.TestCase, TestDataMixin):
@property
def null_srid(self):
"""
Returns the proper null SRID depending on the GEOS version.
See the comments in `test_srid` for more details.
"""
info = geos_version_info()
if info['version'] == '3.0.0' and info['release_candidate']:
return -1
else:
return None
def test_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferrable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memmory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p(b'foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
self.assertEqual(g.ewkt, geom.wkt)
def test_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex.decode())
def test_hexewkb(self):
"Testing (HEX)EWKB output."
# For testing HEX(EWKB).
ogc_hex = b'01010000000000000000000000000000000000F03F'
ogc_hex_3d = b'01010000800000000000000000000000000000F03F0000000000000040'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = b'0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = b'01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex_3d, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none. Also, GEOS has a
# a bug in versions prior to 3.1 that puts the X coordinate in
# place of Z; an exception should be raised on those versions.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
if GEOS_PREPARE:
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
else:
try:
hexewkb = pnt_3d.hexewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException.')
# Same for EWKB.
self.assertEqual(memoryview(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
if GEOS_PREPARE:
self.assertEqual(memoryview(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
else:
try:
ewkb = pnt_3d.ewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException')
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml: self.assertEqual(kml, geom.kml)
def test_errors(self):
"Testing the Error handlers."
# string-based
for err in self.geometries.errors:
with self.assertRaises((GEOSException, ValueError)):
_ = fromstr(err.wkt)
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, memoryview(b'0'))
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test_wkb(self):
"Testing WKB output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).decode().upper(), g.hex)
def test_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_create_wkb(self):
"Testing creation from WKB."
for g in self.geometries.hex_wkt:
wkb = memoryview(a2b_hex(g.hex.encode()))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
@unittest.skipUnless(gdal.HAS_GDAL, "gdal is required")
def test_json(self):
"Testing GeoJSON input/output (via GDAL)."
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test_fromfile(self):
"Testing the fromfile() factory."
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = BytesIO()
wkt_f.write(force_bytes(ref_pnt.wkt))
wkb_f = BytesIO()
wkb_f.write(bytes(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo' : 'bar'})
self.assertNotEqual(g, False)
def test_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(True, pnt == fromstr(p.wkt))
self.assertEqual(False, pnt == prev)
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertEqual(None, pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(True, pnt == pnt2)
self.assertEqual(True, pnt == pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(True, ls == fromstr(l.wkt))
self.assertEqual(False, ls == prev)
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt) # Point individual arguments
if numpy: self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(True, ml == fromstr(l.wkt))
self.assertEqual(False, ml == prev)
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy: self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test_polygons_from_bbox(self):
"Testing `from_bbox` class method."
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
# Testing numerical precision
x = 3.14159265358979323
bbox = (0, 0, 1, x)
p = Polygon.from_bbox(bbox)
y = p.extent[-1]
self.assertEqual(format(x, '.13f'), format(y, '.13f'))
def test_polygons(self):
"Testing Polygon objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(True, poly == fromstr(p.wkt))
self.assertEqual(False, poly == prev) # Should not be equal to previous geometry
self.assertEqual(True, poly != prev)
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test_polygon_comparison(self):
p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
p2 = Polygon(((0, 0), (0, 1), (1, 0), (0, 0)))
self.assertTrue(p1 > p2)
self.assertFalse(p1 < p2)
self.assertFalse(p2 > p1)
self.assertTrue(p2 < p1)
p3 = Polygon(((0, 0), (0, 1), (1, 1), (2, 0), (0, 0)))
p4 = Polygon(((0, 0), (0, 1), (2, 2), (1, 0), (0, 0)))
self.assertFalse(p4 < p3)
self.assertTrue(p3 < p4)
self.assertTrue(p4 > p3)
self.assertFalse(p3 > p4)
def test_multipolygons(self):
"Testing MultiPolygon objects."
prev = fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
def test_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
#### Memory issues with rings and polygons
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
s1, s2 = str(ring1), str(ring2)
def test_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in xrange(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2: tset = (5, 23)
else: tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in xrange(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in xrange(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly: self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)): self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
# In GEOS 3.0.0rc1-4 when the EWKB and/or HEXEWKB is exported,
# the SRID information is lost and set to -1 -- this is not a
# problem on the 3.0.0 version (another reason to upgrade).
exp_srid = self.null_srid
p2 = fromstr(p1.hex)
self.assertEqual(exp_srid, p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
def test_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup: new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
s = str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(21, 100), random.randint(21, 100))
# Testing the assignment
mp[i] = new
s = str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in xrange(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in xrange(len(poly)):
r = poly[j]
for k in xrange(len(r)): r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
s = str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
#mpoly[0][0][0] = (3.14, 2.71)
#self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
#self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
#del mpoly
def test_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2.,3.,8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1.,2.))
pnt.coords = (1.,2.,3.)
self.assertEqual((1.,2.,3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2.,3.,8.), (50.,250.,-117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1.,2.))
ls[0] = (1.,2.,3.)
self.assertEqual((1.,2.,3.), ls[0])
def test_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumfrence of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend([mls.wkt for mls in self.geometries.multilinestrings])
coll.extend([p.wkt for p in self.geometries.polygons])
coll.extend([mp.wkt for mp in self.geometries.multipoints])
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
@unittest.skipUnless(gdal.HAS_GDAL, "gdal is required")
def test_gdal(self):
"Testing `ogr` and `srs` properties."
g1 = fromstr('POINT(5 23)')
self.assertIsInstance(g1.ogr, gdal.OGRGeometry)
self.assertIsNone(g1.srs)
if GEOS_PREPARE:
g1_3d = fromstr('POINT(5 23 8)')
self.assertIsInstance(g1_3d.ogr, gdal.OGRGeometry)
self.assertEqual(g1_3d.ogr.z, 8)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertIsInstance(g2.ogr, gdal.OGRGeometry)
self.assertIsInstance(g2.srs, gdal.SpatialReference)
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
@unittest.skipUnless(gdal.HAS_GDAL, "gdal is required to transform geometries")
def test_transform(self):
"Testing `transform` method."
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
@unittest.skipUnless(gdal.HAS_GDAL, "gdal is required to transform geometries")
def test_transform_3d(self):
p3d = GEOSGeometry('POINT (5 23 100)', 4326)
p3d.transform(2774)
if GEOS_PREPARE:
self.assertEqual(p3d.z, 100)
else:
self.assertIsNone(p3d.z)
def test_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
if gdal.HAS_GDAL:
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
finally:
gdal.HAS_GDAL = old_has_gdal
def test_transform_nosrid(self):
""" Testing `transform` method (no SRID or negative SRID) """
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
def test_transform_nogdal(self):
""" Testing `transform` method (GDAL not available) """
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
finally:
gdal.HAS_GDAL = old_has_gdal
def test_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
from django.utils.six.moves import cPickle
import pickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 900913))
# The SRID won't be exported in GEOS 3.0 release candidates.
no_srid = self.null_srid == -1
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
if not no_srid: self.assertEqual(geom.srid, tmpg.srid)
@unittest.skipUnless(GEOS_PREPARE, "geos >= 3.1.0 is required")
def test_prepared(self):
"Testing PreparedGeometry support."
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
def test_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
@unittest.skipUnless(GEOS_PREPARE, "geos >= 3.1.0 is required")
def test_valid_reason(self):
"Testing IsValidReason support"
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertEqual(g.valid_reason, "Valid Geometry")
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertFalse(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
@unittest.skipUnless(geos_version_info()['version'] >= '3.2.0', "geos >= 3.2.0 is required")
def test_linearref(self):
"Testing linear referencing"
ls = fromstr('LINESTRING(0 0, 0 10, 10 10, 10 0)')
mls = fromstr('MULTILINESTRING((0 0, 0 10), (10 0, 10 10))')
self.assertEqual(ls.project(Point(0, 20)), 10.0)
self.assertEqual(ls.project(Point(7, 6)), 24)
self.assertEqual(ls.project_normalized(Point(0, 20)), 1.0/3)
self.assertEqual(ls.interpolate(10), Point(0, 10))
self.assertEqual(ls.interpolate(24), Point(10, 6))
self.assertEqual(ls.interpolate_normalized(1.0/3), Point(0, 10))
self.assertEqual(mls.project(Point(0, 20)), 10)
self.assertEqual(mls.project(Point(7, 6)), 16)
self.assertEqual(mls.interpolate(9), Point(0, 9))
self.assertEqual(mls.interpolate(17), Point(10, 7))
def test_geos_version(self):
"Testing the GEOS version regular expression."
from django.contrib.gis.geos.libgeos import version_regex
versions = [ ('3.0.0rc4-CAPI-1.3.3', '3.0.0'),
('3.0.0-CAPI-1.4.1', '3.0.0'),
('3.4.0dev-CAPI-1.8.0', '3.4.0') ]
for v, expected in versions:
m = version_regex.match(v)
self.assertTrue(m)
self.assertEqual(m.group('version'), expected)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| javiergarridomellado/Empresa_django | devcodela/lib/python2.7/site-packages/django/contrib/gis/geos/tests/test_geos.py | Python | gpl-2.0 | 44,442 |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listtransactions API."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_array_result
class ListTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Leave IBD
self.nodes[0].generate(1)
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "amount": Decimal("-0.1"), "confirmations": 0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "amount": Decimal("0.1"), "confirmations": 0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "amount": Decimal("-0.1"), "confirmations": 1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "amount": Decimal("0.1"), "confirmations": 1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "send"},
{"amount": Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "receive"},
{"amount": Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = {self.nodes[0].getnewaddress(): 0.11,
self.nodes[1].getnewaddress(): 0.22,
self.nodes[0].getnewaddress(): 0.33,
self.nodes[1].getnewaddress(): 0.44}
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.11")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("0.11")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.22")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("0.22")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.33")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("0.33")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.44")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("0.44")},
{"txid": txid})
pubkey = self.nodes[1].getaddressinfo(
self.nodes[1].getnewaddress())['pubkey']
multisig = self.nodes[1].createmultisig(1, [pubkey])
self.nodes[0].importaddress(
multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert len(
self.nodes[0].listtransactions(
label="watchonly",
count=100,
include_watchonly=False)) == 0
assert_array_result(self.nodes[0].listtransactions(label="watchonly", count=100, include_watchonly=True),
{"category": "receive", "amount": Decimal("0.1")},
{"txid": txid, "label": "watchonly"})
if __name__ == '__main__':
ListTransactionsTest().main()
| cculianu/bitcoin-abc | test/functional/wallet_listtransactions.py | Python | mit | 4,922 |
###########################################################################
## PyBot ##
## Copyright (C) 2015, Kyle Repinski ##
## Copyright (C) 2015, Andres Preciado (Glitch) ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
###########################################################################
import __main__, requests
from pybotutils import fixHTMLChars, strbetween
info = { "names" : [ "high", "thathigh", "th" ], "access" : 0, "version" : 1 }
def command( message, user, recvfrom ):
thehigh = fixHTMLChars( strbetween( requests.get( "http://www.thathigh.com/random" ).text, "<p>", "</p>" ) )
if thehigh == "":
thehigh = "When you can't find that high..."
__main__.sendMessage( thehigh, recvfrom )
return True
| MWisBest/PyBot | Commands/high/high.py | Python | gpl-3.0 | 1,795 |
import urllib2
import urllib
import json
import active_keys
urlbase = "http://api.amp.active.com/v2/search?%s"
def search_query(activity,location):
query_params = urllib.urlencode({"api_key":active_keys.search2_key,"query":query})
req = urllib.urlopen(urlbase%query_params)
print req.geturl()
resp = req.read()
print resp
| anirudhranganath/mashventure | app/modules/ActiveInteractor/search.py | Python | gpl-3.0 | 346 |
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Serializes an Environment into a shell file."""
import inspect
# Disable super() warnings since this file must be Python 2 compatible.
# pylint: disable=super-with-arguments
class _BaseShellVisitor(object): # pylint: disable=useless-object-inheritance
def __init__(self, *args, **kwargs):
pathsep = kwargs.pop('pathsep', ':')
super(_BaseShellVisitor, self).__init__(*args, **kwargs)
self._pathsep = pathsep
self._outs = None
def _remove_value_from_path(self, variable, value):
return ('{variable}="$(echo "${variable}"'
' | sed "s|{pathsep}{value}{pathsep}|{pathsep}|g;"'
' | sed "s|^{value}{pathsep}||g;"'
' | sed "s|{pathsep}{value}$||g;"'
')"\nexport {variable}\n'.format(variable=variable,
value=value,
pathsep=self._pathsep))
def visit_hash(self, hash): # pylint: disable=redefined-builtin
del hash
self._outs.write(
inspect.cleandoc('''
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting past
# commands the $PATH changes we made may not be respected.
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
hash -r\n
fi
'''))
class ShellVisitor(_BaseShellVisitor):
"""Serializes an Environment into a shell file."""
def __init__(self, *args, **kwargs):
super(ShellVisitor, self).__init__(*args, **kwargs)
self._replacements = ()
def serialize(self, env, outs):
try:
self._replacements = tuple(
(key, env.get(key) if value is None else value)
for key, value in env.replacements)
self._outs = outs
env.accept(self)
finally:
self._replacements = ()
self._outs = None
def _apply_replacements(self, action):
value = action.value
for var, replacement in self._replacements:
if var != action.name:
value = value.replace(replacement, '${}'.format(var))
return value
def visit_set(self, set): # pylint: disable=redefined-builtin
value = self._apply_replacements(set)
self._outs.write('{name}="{value}"\nexport {name}\n'.format(
name=set.name, value=value))
def visit_clear(self, clear):
self._outs.write('unset {name}\n'.format(**vars(clear)))
def visit_remove(self, remove):
value = self._apply_replacements(remove)
self._outs.write('# Remove \n# {value}\n# from\n# {value}\n# '
'before adding it back.\n')
self._outs.write(self._remove_value_from_path(remove.name, value))
def _join(self, *args):
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return self._pathsep.join(args)
def visit_prepend(self, prepend):
value = self._apply_replacements(prepend)
value = self._join(value, '${}'.format(prepend.name))
self._outs.write('{name}="{value}"\nexport {name}\n'.format(
name=prepend.name, value=value))
def visit_append(self, append):
value = self._apply_replacements(append)
value = self._join('${}'.format(append.name), value)
self._outs.write('{name}="{value}"\nexport {name}\n'.format(
name=append.name, value=value))
def visit_echo(self, echo):
# TODO(mohrr) use shlex.quote().
self._outs.write('if [ -z "${PW_ENVSETUP_QUIET:-}" ]; then\n')
if echo.newline:
self._outs.write(' echo "{}"\n'.format(echo.value))
else:
self._outs.write(' echo -n "{}"\n'.format(echo.value))
self._outs.write('fi\n')
def visit_comment(self, comment):
for line in comment.value.splitlines():
self._outs.write('# {}\n'.format(line))
def visit_command(self, command):
# TODO(mohrr) use shlex.quote here?
self._outs.write('{}\n'.format(' '.join(command.command)))
if not command.exit_on_error:
return
# Assume failing command produced relevant output.
self._outs.write('if [ "$?" -ne 0 ]; then\n return 1\nfi\n')
def visit_doctor(self, doctor):
self._outs.write('if [ -z "$PW_ACTIVATE_SKIP_CHECKS" ]; then\n')
self.visit_command(doctor)
self._outs.write('else\n')
self._outs.write('echo Skipping environment check because '
'PW_ACTIVATE_SKIP_CHECKS is set\n')
self._outs.write('fi\n')
def visit_blank_line(self, blank_line):
del blank_line
self._outs.write('\n')
def visit_function(self, function):
self._outs.write('{name}() {{\n{body}\n}}\n'.format(
name=function.name, body=function.body))
class DeactivateShellVisitor(_BaseShellVisitor):
"""Removes values from an Environment."""
def __init__(self, *args, **kwargs):
pathsep = kwargs.pop('pathsep', ':')
super(DeactivateShellVisitor, self).__init__(*args, **kwargs)
self._pathsep = pathsep
def serialize(self, env, outs):
try:
self._outs = outs
env.accept(self)
finally:
self._outs = None
def visit_set(self, set): # pylint: disable=redefined-builtin
if set.deactivate:
self._outs.write('unset {name}\n'.format(name=set.name))
def visit_clear(self, clear):
pass # Not relevant.
def visit_remove(self, remove):
pass # Not relevant.
def visit_prepend(self, prepend):
self._outs.write(
self._remove_value_from_path(prepend.name, prepend.value))
def visit_append(self, append):
self._outs.write(
self._remove_value_from_path(append.name, append.value))
def visit_echo(self, echo):
pass # Not relevant.
def visit_comment(self, comment):
pass # Not relevant.
def visit_command(self, command):
pass # Not relevant.
def visit_doctor(self, doctor):
pass # Not relevant.
def visit_blank_line(self, blank_line):
pass # Not relevant.
def visit_function(self, function):
pass # Not relevant.
| google/pigweed | pw_env_setup/py/pw_env_setup/shell_visitor.py | Python | apache-2.0 | 6,987 |
# -*- coding: utf-8 -*-
"""Model unit tests."""
import datetime as dt
import pytest
from martbell.user.models import User, Role
from .factories import UserFactory
@pytest.mark.usefixtures('db')
class TestUser:
def test_get_by_id(self):
user = User('foo', 'foo@bar.com')
user.save()
retrieved = User.get_by_id(user.id)
assert retrieved == user
def test_created_at_defaults_to_datetime(self):
user = User(username='foo', email='foo@bar.com')
user.save()
assert bool(user.created_at)
assert isinstance(user.created_at, dt.datetime)
def test_password_is_nullable(self):
user = User(username='foo', email='foo@bar.com')
user.save()
assert user.password is None
def test_factory(self, db):
user = UserFactory(password="myprecious")
db.session.commit()
assert bool(user.username)
assert bool(user.email)
assert bool(user.created_at)
assert user.is_admin is False
assert user.active is True
assert user.check_password('myprecious')
def test_check_password(self):
user = User.create(username="foo", email="foo@bar.com",
password="foobarbaz123")
assert user.check_password('foobarbaz123') is True
assert user.check_password("barfoobaz") is False
def test_full_name(self):
user = UserFactory(first_name="Foo", last_name="Bar")
assert user.full_name == "Foo Bar"
def test_roles(self):
role = Role(name='admin')
role.save()
u = UserFactory()
u.roles.append(role)
u.save()
assert role in u.roles
| tinker20/tinnker-flask | tests/test_models.py | Python | bsd-3-clause | 1,681 |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Parameter Class for variable parameters.
"""
from uuid import uuid4
import sympy
from .parameterexpression import ParameterExpression
class Parameter(ParameterExpression):
"""Parameter Class for variable parameters"""
def __new__(cls, _, uuid=None):
# Parameter relies on self._uuid being set prior to other attributes
# (e.g. symbol_map) which may depend on self._uuid for Parameter's hash
# or __eq__ functions.
obj = object.__new__(cls)
if uuid is None:
obj._uuid = uuid4()
else:
obj._uuid = uuid
return obj
def __getnewargs__(self):
# Unpickling won't in general call __init__ but will always call
# __new__. Specify arguments to be passed to __new__ when unpickling.
return (self.name, self._uuid)
def __init__(self, name):
self._name = name
symbol = sympy.Symbol(name)
super().__init__(symbol_map={self: symbol}, expr=symbol)
def subs(self, parameter_map):
"""Substitute self with the corresponding parameter in parameter_map."""
return parameter_map[self]
@property
def name(self):
"""Returns the name of the Parameter."""
return self._name
def __str__(self):
return self.name
def __copy__(self):
return self
def __deepcopy__(self, memo=None):
return self
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.name)
def __eq__(self, other):
return isinstance(other, Parameter) and self._uuid == other._uuid
def __hash__(self):
return hash(self._uuid)
| QISKit/qiskit-sdk-py | qiskit/circuit/parameter.py | Python | apache-2.0 | 2,170 |
#!/bin/env python
# mainly for sys.argv[], sys.argv[0] is the name of the program
import sys
import time
# mainly for arrays
import numpy as np
# Time the following methods
def a():
print 'hello'
def b():
print 'hi'
if __name__ == '__main__':
print 'hello'
| ketancmaheshwari/hello-goog | src/python/timeit.py | Python | apache-2.0 | 276 |
"""Token Class"""
class Token:
def __init__(self, lexeme, tCode):
self.lexeme = ""
self.tCode = "" | davidk12/FMAL-Compiler | Token.py | Python | gpl-2.0 | 104 |
import math
import random
import numpy as np
import IMP
import IMP.core
import IMP.test
def _get_beta(N, b):
return 3. / (2. * N * b**2)
def _get_score(z, N, b):
beta = _get_beta(N, b)
return beta * z**2 - math.log(2 * beta * z)
def _get_derv(z, N, b):
beta = _get_beta(N, b)
return 2 * beta * z - 1. / float(z)
def _get_linear_score(z, N, b):
slope = _get_linear_derv(N, b)
intercept = 5.258546595708 - .5 * math.log(_get_beta(N, b))
return slope * z + intercept
def _get_linear_derv(N, b):
return -141.407214101686 * _get_beta(N, b)**.5
class Tests(IMP.test.TestCase):
"""Tests for SurfaceTetheredChain."""
def test_init(self):
"""Test correct initialization."""
func = IMP.core.SurfaceTetheredChain(10, 8)
func.set_was_used(True)
def test_evaluate(self):
"""Test evaluates to correct scores and derivatives."""
for i in range(100):
N = random.randint(1, 10)
b = random.uniform(.1, 5.)
func = IMP.core.SurfaceTetheredChain(N, b)
func.set_was_used(True)
max_z = 2 * N * b
beta = _get_beta(N, b)
min_z = .01 / (2 * beta)**.5
z_range = np.linspace(min_z, max_z, 100)
for z in z_range:
corr_score = _get_score(z, N, b)
corr_derv = _get_derv(z, N, b)
score, deriv = func.evaluate_with_derivative(z)
scoreonly = func.evaluate(z)
self.assertAlmostEqual(scoreonly, corr_score, delta=1e-4)
self.assertAlmostEqual(score, corr_score, delta=1e-4)
self.assertAlmostEqual(deriv, corr_derv, delta=1e-4)
def test_evaluate_linear(self):
"""Test linear region evaluates to correct scores and derivatives."""
for i in range(100):
N = random.randint(3, 10)
b = random.uniform(.1, 5.)
func = IMP.core.SurfaceTetheredChain(N, b)
func.set_was_used(True)
beta = _get_beta(N, b)
min_z = .01 / (2 * beta)**.5
z_range = np.linspace(-1, min_z, 100)
corr_derv = _get_linear_derv(N, b)
for z in z_range:
corr_score = _get_linear_score(z, N, b)
score, deriv = func.evaluate_with_derivative(z)
scoreonly = func.evaluate(z)
self.assertAlmostEqual(scoreonly / corr_score, 1, delta=1e-6)
self.assertAlmostEqual(score / corr_score, 1, delta=1e-6)
self.assertAlmostEqual(deriv / corr_derv, 1, delta=1e-6)
def test_special_values(self):
"""Test special distance values are correctly calculated."""
for i in range(10):
N = random.randint(3, 10)
b = random.uniform(.1, 5.)
func = IMP.core.SurfaceTetheredChain(N, b)
func.set_was_used(True)
beta = _get_beta(N, b)
zmin = 1. / (2 * beta)**.5
zmean = .5 * (math.pi / beta)**.5
self.assertAlmostEqual(func.get_distance_at_minimum(), zmin,
delta=1e-6)
self.assertAlmostEqual(func.evaluate_with_derivative(zmin)[1], 0.,
delta=1e-6)
self.assertAlmostEqual(func.get_average_distance(), zmean,
delta=1e-6)
if __name__ == '__main__':
IMP.test.main()
| shanot/imp | modules/core/test/test_surface_tethered_chain.py | Python | gpl-3.0 | 3,454 |
##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing HPL, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
import shutil
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.filetools import run_cmd
class EB_HPL(ConfigureMake):
"""
Support for building HPL (High Performance Linpack)
- create Make.UNKNOWN
- build with make and install
"""
def configure_step(self, subdir=None):
"""
Create Make.UNKNOWN file to build from
- provide subdir argument so this can be reused in HPCC easyblock
"""
basedir = self.cfg['start_dir']
if subdir:
makeincfile = os.path.join(basedir, subdir, 'Make.UNKNOWN')
setupdir = os.path.join(basedir, subdir, 'setup')
else:
makeincfile = os.path.join(basedir, 'Make.UNKNOWN')
setupdir = os.path.join(basedir, 'setup')
try:
os.chdir(setupdir)
except OSError, err:
self.log.exception("Failed to change to to dir %s: %s" % (setupdir, err))
cmd = "/bin/bash make_generic"
run_cmd(cmd, log_all=True, simple=True, log_output=True)
try:
os.symlink(os.path.join(setupdir, 'Make.UNKNOWN'), os.path.join(makeincfile))
except OSError, err:
self.log.exception("Failed to symlink Make.UNKNOWN from %s to %s: %s" % (setupdir, makeincfile, err))
# go back
os.chdir(self.cfg['start_dir'])
def build_step(self):
"""
Build with make and correct make options
"""
for envvar in ['MPICC', 'LIBLAPACK_MT', 'CPPFLAGS', 'LDFLAGS', 'CFLAGS']:
if not os.getenv(envvar):
self.log.error("Required environment variable %s not found (no toolchain used?)." % envvar)
# build dir
extra_makeopts = 'TOPdir="%s" ' % self.cfg['start_dir']
# compilers
extra_makeopts += 'CC="%(mpicc)s" MPICC="%(mpicc)s" LINKER="%(mpicc)s" ' % {'mpicc': os.getenv('MPICC')}
# libraries: LAPACK and FFTW
extra_makeopts += 'LAlib="%s %s" ' % (os.getenv('LIBFFT'), os.getenv('LIBLAPACK_MT'))
# HPL options
extra_makeopts += 'HPL_OPTS="%s -DUSING_FFTW" ' % os.getenv('CPPFLAGS')
# linker flags
extra_makeopts += 'LINKFLAGS="%s" ' % os.getenv('LDFLAGS')
# C compilers flags
extra_makeopts += "CCFLAGS='$(HPL_DEFS) %s' " % os.getenv('CFLAGS')
# set options and build
self.cfg.update('makeopts', extra_makeopts)
super(EB_HPL, self).build_step()
def install_step(self):
"""
Install by copying files to install dir
"""
srcdir = os.path.join(self.cfg['start_dir'], 'bin', 'UNKNOWN')
destdir = os.path.join(self.installdir, 'bin')
srcfile = None
try:
os.makedirs(destdir)
for filename in ["xhpl", "HPL.dat"]:
srcfile = os.path.join(srcdir, filename)
shutil.copy2(srcfile, destdir)
except OSError, err:
self.log.exception("Copying %s to installation dir %s failed: %s" % (srcfile, destdir, err))
def sanity_check_step(self):
"""
Custom sanity check for HPL
"""
custom_paths = {
'files': ["bin/xhpl"],
'dirs': []
}
super(EB_HPL, self).sanity_check_step(custom_paths)
| hajgato/easybuild-easyblocks | easybuild/easyblocks/h/hpl.py | Python | gpl-2.0 | 4,729 |
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to perform basic operations with the
Google Cloud Vision API.
Example Usage:
python detect.py text ./resources/wakeupcat.jpg
python detect.py labels ./resources/landmark.jpg
python detect.py web ./resources/landmark.jpg
python detect.py web-uri http://wheresgus.com/dog.JPG
python detect.py web-geo ./resources/city.jpg
python detect.py faces-uri gs://your-bucket/file.jpg
python detect.py ocr-uri gs://python-docs-samples-tests/HodgeConj.pdf \
gs://BUCKET_NAME/PREFIX/
python detect.py object-localization ./resources/puppies.jpg
python detect.py object-localization-uri gs://...
For more information, the documentation at
https://cloud.google.com/vision/docs.
"""
import argparse
# [START vision_face_detection]
def detect_faces(path):
"""Detects faces in an image."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_face_detection]
# [START vision_python_migration_image_file]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
# [END vision_python_migration_image_file]
response = client.face_detection(image=image)
faces = response.face_annotations
# Names of likelihood from google.cloud.vision.enums
likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
'LIKELY', 'VERY_LIKELY')
print('Faces:')
for face in faces:
print('anger: {}'.format(likelihood_name[face.anger_likelihood]))
print('joy: {}'.format(likelihood_name[face.joy_likelihood]))
print('surprise: {}'.format(likelihood_name[face.surprise_likelihood]))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in face.bounding_poly.vertices])
print('face bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_face_detection]
# [END vision_face_detection]
# [START vision_face_detection_gcs]
def detect_faces_uri(uri):
"""Detects faces in the file located in Google Cloud Storage or the web."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_image_uri]
image = vision.Image()
image.source.image_uri = uri
# [END vision_python_migration_image_uri]
response = client.face_detection(image=image)
faces = response.face_annotations
# Names of likelihood from google.cloud.vision.enums
likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
'LIKELY', 'VERY_LIKELY')
print('Faces:')
for face in faces:
print('anger: {}'.format(likelihood_name[face.anger_likelihood]))
print('joy: {}'.format(likelihood_name[face.joy_likelihood]))
print('surprise: {}'.format(likelihood_name[face.surprise_likelihood]))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in face.bounding_poly.vertices])
print('face bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_face_detection_gcs]
# [START vision_label_detection]
def detect_labels(path):
"""Detects labels in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_label_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.label_detection(image=image)
labels = response.label_annotations
print('Labels:')
for label in labels:
print(label.description)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_label_detection]
# [END vision_label_detection]
# [START vision_label_detection_gcs]
def detect_labels_uri(uri):
"""Detects labels in the file located in Google Cloud Storage or on the
Web."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.label_detection(image=image)
labels = response.label_annotations
print('Labels:')
for label in labels:
print(label.description)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_label_detection_gcs]
# [START vision_landmark_detection]
def detect_landmarks(path):
"""Detects landmarks in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_landmark_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.landmark_detection(image=image)
landmarks = response.landmark_annotations
print('Landmarks:')
for landmark in landmarks:
print(landmark.description)
for location in landmark.locations:
lat_lng = location.lat_lng
print('Latitude {}'.format(lat_lng.latitude))
print('Longitude {}'.format(lat_lng.longitude))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_landmark_detection]
# [END vision_landmark_detection]
# [START vision_landmark_detection_gcs]
def detect_landmarks_uri(uri):
"""Detects landmarks in the file located in Google Cloud Storage or on the
Web."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.landmark_detection(image=image)
landmarks = response.landmark_annotations
print('Landmarks:')
for landmark in landmarks:
print(landmark.description)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_landmark_detection_gcs]
# [START vision_logo_detection]
def detect_logos(path):
"""Detects logos in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_logo_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.logo_detection(image=image)
logos = response.logo_annotations
print('Logos:')
for logo in logos:
print(logo.description)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_logo_detection]
# [END vision_logo_detection]
# [START vision_logo_detection_gcs]
def detect_logos_uri(uri):
"""Detects logos in the file located in Google Cloud Storage or on the Web.
"""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.logo_detection(image=image)
logos = response.logo_annotations
print('Logos:')
for logo in logos:
print(logo.description)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_logo_detection_gcs]
# [START vision_safe_search_detection]
def detect_safe_search(path):
"""Detects unsafe features in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_safe_search_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.safe_search_detection(image=image)
safe = response.safe_search_annotation
# Names of likelihood from google.cloud.vision.enums
likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
'LIKELY', 'VERY_LIKELY')
print('Safe search:')
print('adult: {}'.format(likelihood_name[safe.adult]))
print('medical: {}'.format(likelihood_name[safe.medical]))
print('spoofed: {}'.format(likelihood_name[safe.spoof]))
print('violence: {}'.format(likelihood_name[safe.violence]))
print('racy: {}'.format(likelihood_name[safe.racy]))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_safe_search_detection]
# [END vision_safe_search_detection]
# [START vision_safe_search_detection_gcs]
def detect_safe_search_uri(uri):
"""Detects unsafe features in the file located in Google Cloud Storage or
on the Web."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.safe_search_detection(image=image)
safe = response.safe_search_annotation
# Names of likelihood from google.cloud.vision.enums
likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
'LIKELY', 'VERY_LIKELY')
print('Safe search:')
print('adult: {}'.format(likelihood_name[safe.adult]))
print('medical: {}'.format(likelihood_name[safe.medical]))
print('spoofed: {}'.format(likelihood_name[safe.spoof]))
print('violence: {}'.format(likelihood_name[safe.violence]))
print('racy: {}'.format(likelihood_name[safe.racy]))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_safe_search_detection_gcs]
# [START vision_text_detection]
def detect_text(path):
"""Detects text in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_text_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
print('Texts:')
for text in texts:
print('\n"{}"'.format(text.description))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in text.bounding_poly.vertices])
print('bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_text_detection]
# [END vision_text_detection]
# [START vision_text_detection_gcs]
def detect_text_uri(uri):
"""Detects text in the file located in Google Cloud Storage or on the Web.
"""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.text_detection(image=image)
texts = response.text_annotations
print('Texts:')
for text in texts:
print('\n"{}"'.format(text.description))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in text.bounding_poly.vertices])
print('bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_text_detection_gcs]
# [START vision_image_property_detection]
def detect_properties(path):
"""Detects image properties in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_image_properties]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.image_properties(image=image)
props = response.image_properties_annotation
print('Properties:')
for color in props.dominant_colors.colors:
print('fraction: {}'.format(color.pixel_fraction))
print('\tr: {}'.format(color.color.red))
print('\tg: {}'.format(color.color.green))
print('\tb: {}'.format(color.color.blue))
print('\ta: {}'.format(color.color.alpha))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_image_properties]
# [END vision_image_property_detection]
# [START vision_image_property_detection_gcs]
def detect_properties_uri(uri):
"""Detects image properties in the file located in Google Cloud Storage or
on the Web."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.image_properties(image=image)
props = response.image_properties_annotation
print('Properties:')
for color in props.dominant_colors.colors:
print('frac: {}'.format(color.pixel_fraction))
print('\tr: {}'.format(color.color.red))
print('\tg: {}'.format(color.color.green))
print('\tb: {}'.format(color.color.blue))
print('\ta: {}'.format(color.color.alpha))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_image_property_detection_gcs]
# [START vision_web_detection]
def detect_web(path):
"""Detects web annotations given an image."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_web_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.web_detection(image=image)
annotations = response.web_detection
if annotations.best_guess_labels:
for label in annotations.best_guess_labels:
print('\nBest guess label: {}'.format(label.label))
if annotations.pages_with_matching_images:
print('\n{} Pages with matching images found:'.format(
len(annotations.pages_with_matching_images)))
for page in annotations.pages_with_matching_images:
print('\n\tPage url : {}'.format(page.url))
if page.full_matching_images:
print('\t{} Full Matches found: '.format(
len(page.full_matching_images)))
for image in page.full_matching_images:
print('\t\tImage url : {}'.format(image.url))
if page.partial_matching_images:
print('\t{} Partial Matches found: '.format(
len(page.partial_matching_images)))
for image in page.partial_matching_images:
print('\t\tImage url : {}'.format(image.url))
if annotations.web_entities:
print('\n{} Web entities found: '.format(
len(annotations.web_entities)))
for entity in annotations.web_entities:
print('\n\tScore : {}'.format(entity.score))
print(u'\tDescription: {}'.format(entity.description))
if annotations.visually_similar_images:
print('\n{} visually similar images found:\n'.format(
len(annotations.visually_similar_images)))
for image in annotations.visually_similar_images:
print('\tImage url : {}'.format(image.url))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_web_detection]
# [END vision_web_detection]
# [START vision_web_detection_gcs]
def detect_web_uri(uri):
"""Detects web annotations in the file located in Google Cloud Storage."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.web_detection(image=image)
annotations = response.web_detection
if annotations.best_guess_labels:
for label in annotations.best_guess_labels:
print('\nBest guess label: {}'.format(label.label))
if annotations.pages_with_matching_images:
print('\n{} Pages with matching images found:'.format(
len(annotations.pages_with_matching_images)))
for page in annotations.pages_with_matching_images:
print('\n\tPage url : {}'.format(page.url))
if page.full_matching_images:
print('\t{} Full Matches found: '.format(
len(page.full_matching_images)))
for image in page.full_matching_images:
print('\t\tImage url : {}'.format(image.url))
if page.partial_matching_images:
print('\t{} Partial Matches found: '.format(
len(page.partial_matching_images)))
for image in page.partial_matching_images:
print('\t\tImage url : {}'.format(image.url))
if annotations.web_entities:
print('\n{} Web entities found: '.format(
len(annotations.web_entities)))
for entity in annotations.web_entities:
print('\n\tScore : {}'.format(entity.score))
print(u'\tDescription: {}'.format(entity.description))
if annotations.visually_similar_images:
print('\n{} visually similar images found:\n'.format(
len(annotations.visually_similar_images)))
for image in annotations.visually_similar_images:
print('\tImage url : {}'.format(image.url))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_web_detection_gcs]
# [START vision_web_detection_include_geo]
def web_entities_include_geo_results(path):
"""Detects web annotations given an image, using the geotag metadata
in the image to detect web entities."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
web_detection_params = vision.WebDetectionParams(
include_geo_results=True)
image_context = vision.ImageContext(
web_detection_params=web_detection_params)
response = client.web_detection(image=image, image_context=image_context)
for entity in response.web_detection.web_entities:
print('\n\tScore : {}'.format(entity.score))
print(u'\tDescription: {}'.format(entity.description))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_web_detection_include_geo]
# [START vision_web_detection_include_geo_gcs]
def web_entities_include_geo_results_uri(uri):
"""Detects web annotations given an image in the file located in
Google Cloud Storage., using the geotag metadata in the image to
detect web entities."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
web_detection_params = vision.WebDetectionParams(
include_geo_results=True)
image_context = vision.ImageContext(
web_detection_params=web_detection_params)
response = client.web_detection(image=image, image_context=image_context)
for entity in response.web_detection.web_entities:
print('\n\tScore : {}'.format(entity.score))
print(u'\tDescription: {}'.format(entity.description))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_web_detection_include_geo_gcs]
# [START vision_crop_hint_detection]
def detect_crop_hints(path):
"""Detects crop hints in an image."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_crop_hints]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
crop_hints_params = vision.CropHintsParams(aspect_ratios=[1.77])
image_context = vision.ImageContext(
crop_hints_params=crop_hints_params)
response = client.crop_hints(image=image, image_context=image_context)
hints = response.crop_hints_annotation.crop_hints
for n, hint in enumerate(hints):
print('\nCrop Hint: {}'.format(n))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in hint.bounding_poly.vertices])
print('bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_crop_hints]
# [END vision_crop_hint_detection]
# [START vision_crop_hint_detection_gcs]
def detect_crop_hints_uri(uri):
"""Detects crop hints in the file located in Google Cloud Storage."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
crop_hints_params = vision.CropHintsParams(aspect_ratios=[1.77])
image_context = vision.ImageContext(
crop_hints_params=crop_hints_params)
response = client.crop_hints(image=image, image_context=image_context)
hints = response.crop_hints_annotation.crop_hints
for n, hint in enumerate(hints):
print('\nCrop Hint: {}'.format(n))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in hint.bounding_poly.vertices])
print('bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_crop_hint_detection_gcs]
# [START vision_fulltext_detection]
def detect_document(path):
"""Detects document features in an image."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_document_text_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.document_text_detection(image=image)
for page in response.full_text_annotation.pages:
for block in page.blocks:
print('\nBlock confidence: {}\n'.format(block.confidence))
for paragraph in block.paragraphs:
print('Paragraph confidence: {}'.format(
paragraph.confidence))
for word in paragraph.words:
word_text = ''.join([
symbol.text for symbol in word.symbols
])
print('Word text: {} (confidence: {})'.format(
word_text, word.confidence))
for symbol in word.symbols:
print('\tSymbol: {} (confidence: {})'.format(
symbol.text, symbol.confidence))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_document_text_detection]
# [END vision_fulltext_detection]
# [START vision_fulltext_detection_gcs]
def detect_document_uri(uri):
"""Detects document features in the file located in Google Cloud
Storage."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.document_text_detection(image=image)
for page in response.full_text_annotation.pages:
for block in page.blocks:
print('\nBlock confidence: {}\n'.format(block.confidence))
for paragraph in block.paragraphs:
print('Paragraph confidence: {}'.format(
paragraph.confidence))
for word in paragraph.words:
word_text = ''.join([
symbol.text for symbol in word.symbols
])
print('Word text: {} (confidence: {})'.format(
word_text, word.confidence))
for symbol in word.symbols:
print('\tSymbol: {} (confidence: {})'.format(
symbol.text, symbol.confidence))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_fulltext_detection_gcs]
# [START vision_text_detection_pdf_gcs]
def async_detect_document(gcs_source_uri, gcs_destination_uri):
"""OCR with PDF/TIFF as source files on GCS"""
import json
import re
from google.cloud import vision
from google.cloud import storage
# Supported mime_types are: 'application/pdf' and 'image/tiff'
mime_type = 'application/pdf'
# How many pages should be grouped into each json output file.
batch_size = 2
client = vision.ImageAnnotatorClient()
feature = vision.Feature(
type_=vision.Feature.Type.DOCUMENT_TEXT_DETECTION)
gcs_source = vision.GcsSource(uri=gcs_source_uri)
input_config = vision.InputConfig(
gcs_source=gcs_source, mime_type=mime_type)
gcs_destination = vision.GcsDestination(uri=gcs_destination_uri)
output_config = vision.OutputConfig(
gcs_destination=gcs_destination, batch_size=batch_size)
async_request = vision.AsyncAnnotateFileRequest(
features=[feature], input_config=input_config,
output_config=output_config)
operation = client.async_batch_annotate_files(
requests=[async_request])
print('Waiting for the operation to finish.')
operation.result(timeout=420)
# Once the request has completed and the output has been
# written to GCS, we can list all the output files.
storage_client = storage.Client()
match = re.match(r'gs://([^/]+)/(.+)', gcs_destination_uri)
bucket_name = match.group(1)
prefix = match.group(2)
bucket = storage_client.get_bucket(bucket_name)
# List objects with the given prefix, filtering out folders.
blob_list = [blob for blob in list(bucket.list_blobs(
prefix=prefix)) if not blob.name.endswith('/')]
print('Output files:')
for blob in blob_list:
print(blob.name)
# Process the first output file from GCS.
# Since we specified batch_size=2, the first response contains
# the first two pages of the input file.
output = blob_list[0]
json_string = output.download_as_string()
response = json.loads(json_string)
# The actual response for the first page of the input file.
first_page_response = response['responses'][0]
annotation = first_page_response['fullTextAnnotation']
# Here we print the full text from the first page.
# The response contains more information:
# annotation/pages/blocks/paragraphs/words/symbols
# including confidence scores and bounding boxes
print('Full text:\n')
print(annotation['text'])
# [END vision_text_detection_pdf_gcs]
# [START vision_localize_objects]
def localize_objects(path):
"""Localize objects in the local image.
Args:
path: The path to the local file.
"""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
with open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
objects = client.object_localization(
image=image).localized_object_annotations
print('Number of objects found: {}'.format(len(objects)))
for object_ in objects:
print('\n{} (confidence: {})'.format(object_.name, object_.score))
print('Normalized bounding polygon vertices: ')
for vertex in object_.bounding_poly.normalized_vertices:
print(' - ({}, {})'.format(vertex.x, vertex.y))
# [END vision_localize_objects]
# [START vision_localize_objects_gcs]
def localize_objects_uri(uri):
"""Localize objects in the image on Google Cloud Storage
Args:
uri: The path to the file in Google Cloud Storage (gs://...)
"""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
objects = client.object_localization(
image=image).localized_object_annotations
print('Number of objects found: {}'.format(len(objects)))
for object_ in objects:
print('\n{} (confidence: {})'.format(object_.name, object_.score))
print('Normalized bounding polygon vertices: ')
for vertex in object_.bounding_poly.normalized_vertices:
print(' - ({}, {})'.format(vertex.x, vertex.y))
# [END vision_localize_objects_gcs]
def run_local(args):
if args.command == 'faces':
detect_faces(args.path)
elif args.command == 'labels':
detect_labels(args.path)
elif args.command == 'landmarks':
detect_landmarks(args.path)
elif args.command == 'text':
detect_text(args.path)
elif args.command == 'logos':
detect_logos(args.path)
elif args.command == 'safe-search':
detect_safe_search(args.path)
elif args.command == 'properties':
detect_properties(args.path)
elif args.command == 'web':
detect_web(args.path)
elif args.command == 'crophints':
detect_crop_hints(args.path)
elif args.command == 'document':
detect_document(args.path)
elif args.command == 'web-geo':
web_entities_include_geo_results(args.path)
elif args.command == 'object-localization':
localize_objects(args.path)
def run_uri(args):
if args.command == 'text-uri':
detect_text_uri(args.uri)
elif args.command == 'faces-uri':
detect_faces_uri(args.uri)
elif args.command == 'labels-uri':
detect_labels_uri(args.uri)
elif args.command == 'landmarks-uri':
detect_landmarks_uri(args.uri)
elif args.command == 'logos-uri':
detect_logos_uri(args.uri)
elif args.command == 'safe-search-uri':
detect_safe_search_uri(args.uri)
elif args.command == 'properties-uri':
detect_properties_uri(args.uri)
elif args.command == 'web-uri':
detect_web_uri(args.uri)
elif args.command == 'crophints-uri':
detect_crop_hints_uri(args.uri)
elif args.command == 'document-uri':
detect_document_uri(args.uri)
elif args.command == 'web-geo-uri':
web_entities_include_geo_results_uri(args.uri)
elif args.command == 'ocr-uri':
async_detect_document(args.uri, args.destination_uri)
elif args.command == 'object-localization-uri':
localize_objects_uri(args.uri)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
detect_faces_parser = subparsers.add_parser(
'faces', help=detect_faces.__doc__)
detect_faces_parser.add_argument('path')
faces_file_parser = subparsers.add_parser(
'faces-uri', help=detect_faces_uri.__doc__)
faces_file_parser.add_argument('uri')
detect_labels_parser = subparsers.add_parser(
'labels', help=detect_labels.__doc__)
detect_labels_parser.add_argument('path')
labels_file_parser = subparsers.add_parser(
'labels-uri', help=detect_labels_uri.__doc__)
labels_file_parser.add_argument('uri')
detect_landmarks_parser = subparsers.add_parser(
'landmarks', help=detect_landmarks.__doc__)
detect_landmarks_parser.add_argument('path')
landmark_file_parser = subparsers.add_parser(
'landmarks-uri', help=detect_landmarks_uri.__doc__)
landmark_file_parser.add_argument('uri')
detect_text_parser = subparsers.add_parser(
'text', help=detect_text.__doc__)
detect_text_parser.add_argument('path')
text_file_parser = subparsers.add_parser(
'text-uri', help=detect_text_uri.__doc__)
text_file_parser.add_argument('uri')
detect_logos_parser = subparsers.add_parser(
'logos', help=detect_logos.__doc__)
detect_logos_parser.add_argument('path')
logos_file_parser = subparsers.add_parser(
'logos-uri', help=detect_logos_uri.__doc__)
logos_file_parser.add_argument('uri')
safe_search_parser = subparsers.add_parser(
'safe-search', help=detect_safe_search.__doc__)
safe_search_parser.add_argument('path')
safe_search_file_parser = subparsers.add_parser(
'safe-search-uri',
help=detect_safe_search_uri.__doc__)
safe_search_file_parser.add_argument('uri')
properties_parser = subparsers.add_parser(
'properties', help=detect_properties.__doc__)
properties_parser.add_argument('path')
properties_file_parser = subparsers.add_parser(
'properties-uri',
help=detect_properties_uri.__doc__)
properties_file_parser.add_argument('uri')
# 1.1 Vision features
web_parser = subparsers.add_parser(
'web', help=detect_web.__doc__)
web_parser.add_argument('path')
web_uri_parser = subparsers.add_parser(
'web-uri',
help=detect_web_uri.__doc__)
web_uri_parser.add_argument('uri')
web_geo_parser = subparsers.add_parser(
'web-geo', help=web_entities_include_geo_results.__doc__)
web_geo_parser.add_argument('path')
web_geo_uri_parser = subparsers.add_parser(
'web-geo-uri',
help=web_entities_include_geo_results_uri.__doc__)
web_geo_uri_parser.add_argument('uri')
crop_hints_parser = subparsers.add_parser(
'crophints', help=detect_crop_hints.__doc__)
crop_hints_parser.add_argument('path')
crop_hints_uri_parser = subparsers.add_parser(
'crophints-uri', help=detect_crop_hints_uri.__doc__)
crop_hints_uri_parser.add_argument('uri')
document_parser = subparsers.add_parser(
'document', help=detect_document.__doc__)
document_parser.add_argument('path')
document_uri_parser = subparsers.add_parser(
'document-uri', help=detect_document_uri.__doc__)
document_uri_parser.add_argument('uri')
ocr_uri_parser = subparsers.add_parser(
'ocr-uri', help=async_detect_document.__doc__)
ocr_uri_parser.add_argument('uri')
ocr_uri_parser.add_argument('destination_uri')
object_localization_parser = subparsers.add_parser(
'object-localization', help=async_detect_document.__doc__)
object_localization_parser.add_argument('path')
object_localization_uri_parser = subparsers.add_parser(
'object-localization-uri', help=async_detect_document.__doc__)
object_localization_uri_parser.add_argument('uri')
args = parser.parse_args()
if 'uri' in args.command:
run_uri(args)
else:
run_local(args)
| googleapis/python-vision | samples/snippets/detect/detect.py | Python | apache-2.0 | 38,224 |
from population import Population
from cell import Cell
# Create population for the simulation
p = Population(Cell(drug_resistance=2, can_divide=True))
# Start loop
while True:
action = raw_input('Enter action (tick/kill/quit): ')
if(action == 'tick'):
number_of_ticks = int(raw_input('Enter number of ticks: '))
for x in range(number_of_ticks):
p.tick()
print '---'
print p
elif(action == 'kill'):
amount_of_poison = int(raw_input('Enter amount of poison: '))
p.poison(amount_of_poison)
p.tick()
print '---'
print p
elif(action == 'quit'):
break
else:
print 'Unknown command: ', action
| sabarjp/ClonalCellSimulation | simulate.py | Python | mit | 715 |
#!C:\Users\Beatriz\Desktop\django\pasta\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| beatrizjesus/my-first-blog | pasta/Scripts/django-admin.py | Python | mit | 165 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
import numpy as np
class ApiOnesTest(unittest.TestCase):
def test_paddle_ones(self):
with paddle.static.program_guard(paddle.static.Program()):
ones = paddle.ones(shape=[10])
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
with paddle.static.program_guard(paddle.static.Program()):
ones = paddle.ones(shape=[10], dtype="float64")
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with paddle.static.program_guard(paddle.static.Program()):
ones = paddle.ones(shape=[10], dtype="int64")
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
def test_fluid_ones(self):
with paddle.static.program_guard(paddle.static.Program()):
ones = fluid.layers.ones(shape=[10], dtype="int64")
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
class ApiOnesZerosError(unittest.TestCase):
def test_errors(self):
def test_error1():
with paddle.static.program_guard(paddle.static.Program()):
ones = paddle.ones(shape=10, dtype="int64")
self.assertRaises(TypeError, test_error1)
def test_error2():
with paddle.static.program_guard(paddle.static.Program()):
ones = paddle.ones(shape=10)
self.assertRaises(TypeError, test_error2)
def test_error3():
with paddle.static.program_guard(paddle.static.Program()):
ones = fluid.layers.ones(shape=10, dtype="int64")
self.assertRaises(TypeError, test_error3)
def test_error4():
with paddle.static.program_guard(paddle.static.Program()):
ones = fluid.layers.ones(shape=[10], dtype="int8")
self.assertRaises(TypeError, test_error4)
if __name__ == "__main__":
unittest.main()
| PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_ones_op.py | Python | apache-2.0 | 3,441 |
# coding=utf-8
"""
Calculs statistiques.
Many function to refactor to python function.
"""
# @Author: Zackary BEAUGELIN <gysco>
# @Date: 2017-04-11T10:01:21+02:00
# @Email: zackary.beaugelin@epitech.eu
# @Project: SSWD
# @Filename: statistics.py
# @Last modified by: gysco
# @Last modified time: 2017-06-16T11:39:51+02:00
import math
from numpy import mean, median, percentile, std
from numpy.random import choice, seed
from scipy.stats import norm
import initialisation
from common import cellule_gras, compt_inf, ecrire_titre, trier_tirages_feuille
def threaded_bootstrap(data, nbvar, B, pond, line_start, nom_feuille_stat):
"""Optimized bootstrap on thread."""
i = 1
j = 0
bootstrap = choice(data, nbvar * B, p=pond)
for x in bootstrap:
if j == nbvar:
j = 0
i += 1
initialisation.Worksheets[nom_feuille_stat].Cells \
.at[i + line_start, j] = x
j += 1
def tirage(nom_feuille_stat, nbvar, B, nom_feuille_pond, col_data, col_pond,
seed_check):
"""
Effectue un tirage aleatoire suivant une loi multinomiale.
@param nom_feuille_stat: nom de la feuille pour affichage
resultats du tirage
@param nbvar: nombre de points tires (nombre de donnees utilisees
dans le calcul de la HC)
@param B: nombre de tirages du bootstrap
@param nom_feuille_pond: nom de la feuille contenant les donnees a
tirer et les probabilites associees
@param lig_deb: ligne de debut de la plage des donnees a tirer
remarque: il y a une ligne de titre avant cette ligne
@param col_data: colonne des donnees a tirer
@param lig_fin: derniere ligne de la plage des donnees a tirer
@param col_pond: colonne des probabilites associees a chaque donnee
"""
data = list()
pond = list()
if seed_check:
seed(42)
for x in range(1, len(initialisation.Worksheets[nom_feuille_pond].Cells)):
data.append(initialisation.Worksheets[nom_feuille_pond]
.Cells.at[x, col_data])
pond.append(initialisation.Worksheets[nom_feuille_pond]
.Cells.at[x, col_pond])
for j in range(0, nbvar):
initialisation.Worksheets[nom_feuille_stat].Cells.at[
0, j] = 'POINT ' + str(j + 1)
"""
thread_number = cpu_count()
with Pool(thread_number) as p:
for i in range(0, thread_number):
p.apply_async(func=threaded_bootstrap, args=(
data, nbvar, int(B / thread_number), pond,
int((B / thread_number) * i), nom_feuille_stat,))
"""
i = 1
j = 0
for x in choice(data, nbvar * B, p=pond):
if j == nbvar:
j = 0
i += 1
initialisation.Worksheets[nom_feuille_stat].Cells.at[i, j] = x
j += 1
def calcul_ic_empirique(l1, c1, l2, c2, c3, p, nom_feuille_stat,
nom_feuille_qemp, nom_feuille_sort, nbvar, a):
"""
Calcul les centiles p% empiriques sur les echantillons du tirage aleatoire.
@param nom_feuille_stat: nom de la feuille des tirages
aleatoires
@param nom_feuille_qemp: nom de la feuille contenant les
quantiles empiriques calcules a
partir des tirages
dans nom_feuille_stat
@param l1: premiere ligne de donnees numeriques tirees ;
sera egalement la premiere ligne des qemp
@param c1: premiere colonne de donnees tirees
@param l2: derniere ligne de donnees tirees
@param c2: derniere colonne de donnees tirees
dans nom_feuille_qemp
@param c3: premiere colonne affichage resultats quantiles empiriques
Remarque : la premiere ligne des resultats empiriques est
obligatoirement la meme que celle des tirages
"""
pcum = list()
rang = list()
"""
On calcule la probabilite cumulee empirique de chaque point tire
sauvegarde dans pcum
"""
for i in range(1, nbvar + 1):
pcum.append((i - a) / (nbvar + 1 - 2 * a))
"""
On calcule le rang qu'occupe la probabilite voulues (p) au sein des
pcum, sauvegarde dans rang
"""
for i in range(0, len(p)):
rang.append(compt_inf(pcum, p[i]))
"""
On trie les donnees a exploiter (issues des tirages aleatoires)
dans l'ordre croissant
Creation de la feuille nom_feuille_sort
"""
trier_tirages_feuille(nom_feuille_stat, nom_feuille_sort, nbvar)
"""Creation de la feuille contenant les quantiles empiriques"""
"""Ecriture des entetes de colonnes"""
for i in range(0, len(p)):
initialisation.Worksheets[nom_feuille_qemp].Cells.at[
l1 - 1, c3 + i] = 'QUANT ' + str(p[i] * 100) + ' %'
"""
Calcul des quantiles p%
data = "RC" & c1 & ":RC" & c2
"""
for y in range(1, l2 + 1):
data = list(
initialisation.Worksheets[nom_feuille_sort].Cells.ix[y, c1:c2])
for i in range(0, len(p)):
if rang[i] == 0:
set_data = (data[0])
elif rang[i] >= nbvar:
set_data = (data[nbvar - 1])
else:
set_data = (data[rang[i]] -
((data[rang[i]] - data[rang[i] - 1]) *
(pcum[rang[i]] - p[i]) /
(pcum[rang[i]] - pcum[rang[i] - 1])))
initialisation.Worksheets[nom_feuille_qemp].Cells.at[
y, c3 + i] = set_data
def calcul_ic_normal(l1, c1, l2, c2, c3, p, nom_feuille_stat,
nom_feuille_qnorm):
"""
Calcul les centiles p% normaux pour chaque echantillon du tirage.
@param nom_feuille_stat: nom de la feuille des tirages aleatoires
@param nom_feuille_qnorm: nom de la feuille contenant les
quantiles normaux calcules a partir des
tirages
dans nom_feuille_stat
@param l1: premiere ligne de donnees tirees ;
correspond egalement a la premiere ligne des
quantiles normaux ;
il y a une premiere ligne de titre avant
@param c1: premiere colonne de donnees tirees
@param l2: derniere ligne de donnees tirees
@param c2: derniere colonne de donnees tirees
dans nom_feuille_qnorm
@param c3: premiere colonne affichage resultats normaux
"""
c_mu = c2 + 2
"""
Calcul moyenne et ecart type correspondant a chaque tirage
(chaque ligne de nom_feuille_stat)
on travaille dans nom_feuille_stat
"""
initialisation.Worksheets[nom_feuille_stat].Cells.at[l1 - 1, c_mu] = 'MEAN'
initialisation.Worksheets[nom_feuille_stat].Cells.at[l1 - 1,
c_mu + 1] = 'STDEV'
# to_drop = list()
for i in range(l1, l2 + 1):
data = list()
for j in range(c1, c2):
data.append(
float(initialisation.Worksheets[nom_feuille_stat]
.Cells.at[i, j]))
"""1. Calcul de la moyenne des echantillons"""
initialisation.Worksheets[nom_feuille_stat].Cells.at[i, c_mu] = mean(
data)
"""2. Calcul de l'ecart type des echantillons"""
initialisation.Worksheets[nom_feuille_stat].Cells.at[
i, c_mu + 1] = std(data)
# if std(data) == 0:
# to_drop.append(i)
# if (len(to_drop) / (l2 - l1)) * 100 <= 5:
# initialisation.Worksheets[
# nom_feuille_stat].Cells = initialisation.Worksheets[
# nom_feuille_stat].Cells.drop(initialisation.Worksheets[
# nom_feuille_stat].Cells.index[to_drop]).reindex(
# range(0,
# (initialisation.Worksheets[nom_feuille_stat]
# .Cells.index.max() + 1)))
# l2 = initialisation.Worksheets[nom_feuille_stat].Cells.index.max
"""
3. Calcul quantiles normaux correspondant a p() et a mean et
stdev precedemment calcules
"""
"""Affichage dans nom_feuille_qnorm"""
for i in range(0, len(p)):
initialisation.Worksheets[nom_feuille_qnorm].Cells.at[
l1 - 1, c3 + i] = 'QUANT ' + str(p[i] * 100) + ' %'
for x in range(l1, l2 + 1):
initialisation.Worksheets[nom_feuille_qnorm].Cells.at[
x, c3 + i] = norm.ppf(
p[i],
loc=initialisation.Worksheets[nom_feuille_stat]
.Cells.at[x, c_mu],
scale=initialisation.Worksheets[nom_feuille_stat]
.Cells.at[x, c_mu + 1])
def calcul_ic_triang_p(l1, c1, l2, c2, c3, nbvar, a, p, nom_feuille_stat,
nom_feuille_sort, nom_feuille_Ftriang,
nom_feuille_qtriang):
"""
Calcul les centiles p% triangulaires pour chaque echantillon.
Avant cela, il faut estimer les parametres min, max, mode de
la loi triangulaire correspondant a chaque echantillon aleatoire
pour se faire : utilisation du solver (ajust proba cumulees)
@param nom_feuille_stat: nom de la feuille des tirages aleatoires
@param nom_feuille_qtriang: nom de la feuille contenant les
quantiles triangulaires calcules a
partir des tirages
@param nom_feuille_sort: nom de la feuille des tirages aleatoires
classes dans l'ordre croissant sur chaque
ligne
@param nom_feuille_Ftriang: nom de la feuille contenant les
probabilites cumulees triangulaires,
empiriques et theoriques pour
ajustement et determination des
parametres min, max et mode
dans nom_feuille_stat
@param l1: premiere ligne de donnees tirees ;
egalement premiere ligne resultats triangulaires
@param c1: premiere colonne de donnees tirees
@param l2: derniere ligne de donnees tirees
@param c2: derniere colonne de donnees tirees
@param c_min: indice de la colonne contenant le min des tirages ;
le max et le mode occupe les positions respectives
c_min+1 et c_min+2
dans nom_feuille_sort, nom_feuille_Ftriang et nom_feuille_qtriang
@param c3: premiere colonne affichage resultats triangulaire
@param nbvar: nombre de points tires a chaque tirage du bootstrap
@param a: parametre de Hazen pour calcul des probabilites
empiriques cumulees
"""
indic = 0
"""
On trie les donnees a exploiter (issues des tirages aleatoires)
dans l'ordre croissant
Creation de la feuille nom_feuille_sort si pas existante
(pour empirique)
"""
for ws in initialisation.Worksheets:
if ws.Name == nom_feuille_sort:
indic = 1
if indic == 0:
trier_tirages_feuille(nom_feuille_stat, nom_feuille_sort, nbvar)
"""
On calcule les probabilites cumulees empiriques que l'on affiche
dans la premiere ligne et on met en place les formules de
probabilite triangulaire qui seront comparees aux probabilites
empiriques ; creation de la feuille nom_feuille_Ftriang
On initialise le solver en prennant le min et le max de chaque
serie tiree et on calcule mode=(min+max)/2
"""
c_min = c3 + nbvar + 1
c_max = c_min + 1
c_mode = c_max + 1
initialisation.Worksheets[nom_feuille_Ftriang].Cells.at[l1 - 1,
c_min] = 'min'
initialisation.Worksheets[nom_feuille_Ftriang].Cells.at[l1 - 1,
c_max] = 'max'
initialisation.Worksheets[nom_feuille_Ftriang].Cells.at[l1 - 1,
c_mode] = 'mode'
for i in range(l1, l2):
initialisation.Worksheets[nom_feuille_Ftriang].Cells.at[
i, c_min] = initialisation.Worksheets[nom_feuille_sort].Cells.at[
i, c3]
initialisation.Worksheets[nom_feuille_Ftriang].Cells.at[
i, c_max] = initialisation.Worksheets[nom_feuille_sort].Cells.at[
i, c3 + nbvar - 1]
initialisation.Worksheets[nom_feuille_Ftriang].Cells.at[i, c_mode] = (
initialisation.Worksheets[nom_feuille_Ftriang].Cells.at[i, c_min] +
initialisation.Worksheets[nom_feuille_Ftriang]
.Cells.at[i, c_max]) / 2
"""Calcul probabilites empiriques et theoriques pour ajustement"""
for i in range(0, nbvar):
initialisation.Worksheets[nom_feuille_Ftriang].Cells.at[
l1 - 1, c3 + i - 1] = (i - a) / (nbvar + 1 - 2 * a)
# data = Cells(l1, c3).Address(
# False, False, xlR1C1, RelativeTo=Cells(l1, c3))
# data1 = nom_feuille_sort + '!' + data
# Initialisation.Worksheets[nom_feuille_Ftriang].Cells[l1,
# c3].FormulaR1C1 = (
# '=IF(' + data1 + '<=' + ref + c_min + ',0, IF(' + data1 + '<=' + ref+
# c_mode + ', ((' + data1 + '-' + ref + c_min + ')^2)/(('+ref + c_max +
# '-' + ref + c_min + ')*(' + ref + c_mode + '-' +ref+ c_min + ')),' +
# 'IF(' + data1 + '<=' + ref + c_max + ', 1-((' + data1 + '-' + ref +
# c_max + ')^2)/((' + ref + c_max + '-' + ref + c_min + ')*(' + ref +
# c_max + '-' + ref + c_mode + ')),1)))')
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c3).Select()
# Selection.AutoFill(
# Destination=Range(
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c3),
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1,
# c3 + nbvar - 1)),
# Type=xlFillDefault)
# Range(
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c3),
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c3 + nbvar
# - 1)).Select()
# Selection.AutoFill(
# Destination=Range(
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c3),
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l2,
# c3 + nbvar - 1)),
# Type=xlFillDefault)
"""
On calcule la somme des carres des differences entre probabilites
empiriques et probabilites theoriques triangulaires,
pour ajustement
"""
c_ssr = c_mode + 1
# Initialisation.Worksheets[nom_feuille_Ftriang].Cells[l1 - 1, c_ssr] =
# 'Sum Square Res'
# data = Cells(l1 - 1, c3).Address(True, True, xlR1C1) + ':' + Cells(
# l1 - 1, c3 + nbvar - 1).Address(True, True, xlR1C1)
# data = nom_feuille_Ftriang + '!' + data
# data1 = Cells(l1, c3).Address(
# False, False, xlR1C1, RelativeTo=Cells(l1, c_ssr)) + ':' + Cells(
# l1, c3 + nbvar - 1).Address(
# False, False, xlR1C1, RelativeTo=Cells(l1, c_ssr))
# data1 = nom_feuille_Ftriang + '!' + data1
# Initialisation.Worksheets[nom_feuille_Ftriang].Cells[
# l1, c_ssr].FormulaR1C1 = '=SUMXMY2(' + data + ',' + data1 + ')'
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c_ssr).Select()
# Selection.AutoFill(
# Destination=Range(
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c_ssr),
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l2, c_ssr)),
# Type=xlFillDefault)
"""
Ajustement : on determine les valeurs de min, max, mode qui
minimisent la sum square res et on calcule la probabilite du mode
(necessaire pour le calcul des quantiles)
"""
c_pmode = c_ssr + 1
initialisation.Worksheets[nom_feuille_Ftriang].Cells[l1 - 1,
c_pmode] = 'pmode'
for i in range(l1, l2):
# SolverOk(
# SetCell=Initialisation.Worksheets(nom_feuille_Ftriang).Cells(i,
# c_ssr),
# MaxMinVal=2,
# ValueOf='0',
# ByChange=Range(
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(i,
# c_min),
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(i,
# c_min + 2)))
# SolverSolve(UserFinish=True)
initialisation.Worksheets[nom_feuille_Ftriang].Cells[i, c_pmode] = (
initialisation.Worksheets[nom_feuille_Ftriang].Cells[i, c_mode] -
initialisation.Worksheets[nom_feuille_Ftriang].Cells[i, c_min]
) / (initialisation.Worksheets[nom_feuille_Ftriang].Cells[i, c_max] -
initialisation.Worksheets[nom_feuille_Ftriang].Cells[i, c_min])
initialisation.Worksheets[nom_feuille_Ftriang].Cells(1, 1).Select()
"""
Calcul des quantiles correspondant a la loi triangulaire dont les
parametres min, max et mode viennent d'etre estimes ; creation de
la feuille nom_feuille_qtriang
"""
ref = nom_feuille_Ftriang + '!RC'
for i in range(0, len(p)):
initialisation.Worksheets[nom_feuille_qtriang].Cells[
l1 - 1, c3 + i - 1] = 'QUANT ' + str(p[i] * 100) + ' %'
initialisation.Worksheets[nom_feuille_qtriang].Cells[
l1, c3 + i - 1].FormulaR1C1 = (
'=IF(' + p[i] + '<=' + ref + c_pmode + ',' + ref + c_min +
'+SQRT(' + p[i] + '*(' + ref + c_max + '-' + ref + c_min +
')*(' + ref + c_mode + '-' + ref + c_min + ')), ' + ref +
c_max + '-SQRT((' + 1 - p[i] + ')*(' + ref + c_max + '-' +
ref + c_min + ')*(' + ref + c_max + '-' + ref + c_mode + ')))')
# Range(
# Initialisation.Worksheets(nom_feuille_qtriang).Cells(l1, c3),
# Initialisation.Worksheets(nom_feuille_qtriang).Cells(l1,
# c3 + len(p) - 1)).Select()
# Selection.AutoFill(
# Destination=Range(
# Initialisation.Worksheets(nom_feuille_qtriang).Cells(l1, c3),
# Initialisation.Worksheets(nom_feuille_qtriang).Cells(l2,
# c3 + len(p) - 1)),
# Type=xlFillDefault)
# Initialisation.Worksheets(nom_feuille_qtriang).Cells(1, 1).Select()
return c_min
def calcul_ic_triang_q(l1, c1, l2, c2, c3, nbvar, a, p, nom_feuille_stat,
nom_feuille_sort, nom_feuille_Ftriang,
nom_feuille_qtriang):
"""
Calcul les centiles p% triangulaires pour chaque echantillon.
Avant cela, il faut estimer les parametres min, max, mode de la
loi triangulaire correspondant a chaque echantillon aleatoire
pour se faire: utilisation du solver, ajustement sur les quantiles
@param nom_feuille_stat: nom de la feuille des tirages aleatoires
@param nom_feuille_qtriang: nom de la feuille contenant les
quantiles triangulaires calcules
a partir des tirages
@param nom_feuille_sort: nom de la feuille des tirages aleatoires
classes dans l'ordre croissant sur chaque
ligne
@param nom_feuille_Ftriang: nom de la feuille contenant les
quantiles triangulaires, empiriques et
theoriques pour ajustement et
determination des parametres min, max
et mode
dans nom_feuille_stat
@param l1: premiere ligne de donnees tirees ;
egalement premiere ligne resultats triangulaires
@param c1: premiere colonne de donnees tirees
@param l2: derniere ligne de donnees tirees
@param c2: derniere colonne de donnees tirees
@param c_min: indice de la colonne contenant le min des tirages ;
le max et le mode occupe les positions respectives
c_min+1 et c_min+2
dans nom_feuille_sort, nom_feuille_Ftriang et nom_feuille_qtriang
@param c3: premiere colonne affichage resultats triangulaire
@param nbvar: nombre de points tires a chaque tirage du bootstrap
@param a: parametre de Hazen pour calcul des probabilites
empiriques cumulees
"""
indic = 0
"""
On trie les donnees a exploiter (issues des tirages aleatoires)
dans l'ordre croissant
Creation de la feuille nom_feuille_sort si pas deja existant
(pour empirique)
"""
for ws in initialisation.Worksheets:
if ws.Name == nom_feuille_sort:
indic = 1
if indic == 0:
trier_tirages_feuille(nom_feuille_stat, nom_feuille_sort, nbvar)
"""
On calcule les probabilites cumulees empiriques que l'on affiche
dans la premiere ligne et on met en place les formules de quantile
triangulaire qui seront comparees aux valeurs empiriques ;
creation de la feuille nom_feuille_Ftriang
On initialise le solver en prennant le min et le max de chaque
serie tiree et on calcule mode=(min+max)/2 puis pmode
"""
c_min = c3 + nbvar + 1
c_max = c_min + 1
c_mode = c_max + 1
c_ssr = c_mode + 1
c_pmode = c_ssr + 1
initialisation.Worksheets[nom_feuille_Ftriang].Cells[l1 - 1, c_min] = 'min'
initialisation.Worksheets[nom_feuille_Ftriang].Cells[l1 - 1, c_max] = 'max'
initialisation.Worksheets[nom_feuille_Ftriang].Cells[l1 - 1,
c_mode] = 'mode'
initialisation.Worksheets[nom_feuille_Ftriang].Cells[l1 - 1,
c_pmode] = 'pmode'
for i in range(l1, l2):
initialisation.Worksheets[nom_feuille_Ftriang].Cells[
i, c_min] = initialisation.Worksheets[nom_feuille_sort].Cells[i,
c3]
initialisation.Worksheets[nom_feuille_Ftriang].Cells[
i, c_max] = initialisation.Worksheets[nom_feuille_sort].Cells[
i, c3 + nbvar - 1]
initialisation.Worksheets[nom_feuille_Ftriang].Cells[i, c_mode] = (
initialisation.Worksheets[nom_feuille_Ftriang].Cells[i, c_min] +
initialisation.Worksheets[nom_feuille_Ftriang].Cells[i, c_max]) / 2
# Initialisation.Worksheets[nom_feuille_Ftriang].Cells[l1,
# c_pmode].FormulaR1C1 = (
# '=(' + ref + c_mode + '-' + ref + c_min + ')/(' + ref + c_max + '-' +
# ref + c_min + ')')
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c_pmode)
# .Select()
# Selection.AutoFill(
# Destination=Range(
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c_pmode)
# ,Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l2,
# c_pmode)),
# Type=xlFillDefault)
"""
Calcul probabilites empiriques et quantiles triangulaires
correspondants
"""
for i in range(0, nbvar):
initialisation.Worksheets[nom_feuille_Ftriang].Cells[
l1 - 1, c3 + i - 1] = (i - a) / (nbvar + 1 - 2 * a)
# Initialisation.Worksheets[nom_feuille_Ftriang].Cells[l1,
# c3].FormulaR1C1 = (
# '=IF(' + ref2 + l1 - 1 + 'C<=' + ref + c_pmode + ',' + ref + c_min +
# '+SQRT(' + ref2 + l1 - 1 + 'C*(' + ref + c_max + '-' + ref + c_min +
# ')' + '*(' + ref + c_mode + '-' + ref + c_min + ')),' + ref + c_max +
# '-SQRT((1-' + ref2 + l1 - 1 + 'C)*(' + ref + c_max + '-' + ref +c_min
# + ')' + '*(' + ref + c_max + '-' + ref + c_mode + ')))')
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c3).Select()
# Selection.AutoFill(
# Destination=Range(
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c3),
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1,
# c3 + nbvar - 1)),
# Type=xlFillDefault)
# Range(
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c3),
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c3 + nbvar
# - 1)).Select()
# Selection.AutoFill(
# Destination=Range(
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c3),
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l2,
# c3 + nbvar - 1)),
# Type=xlFillDefault)
"""
On calcule la somme des carres des differences entre donnees
empiriques etquantiles theoriques triangulaires, pour ajustement
"""
initialisation.Worksheets[nom_feuille_Ftriang].Cells[
l1 - 1, c_ssr] = 'Sum Square Res'
# data = Cells(l1, c3).Address(
# False, False, xlR1C1, RelativeTo=Cells(l1, c_ssr)) + ':' + Cells(
# l1, c3 + nbvar - 1).Address(
# False, False, xlR1C1, RelativeTo=Cells(l1, c_ssr))
# data1 = nom_feuille_Ftriang + '!' + data
# data = nom_feuille_sort + '!' + data
# Initialisation.Worksheets[nom_feuille_Ftriang].Cells[
# l1, c_ssr].FormulaR1C1 = '=SUMXMY2(' + data + ',' + data1 + ')'
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c_ssr).Select()
# Selection.AutoFill(
# Destination=Range(
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l1, c_ssr),
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(l2, c_ssr)),
# Type=xlFillDefault)
"""
Ajustement : on determine les valeurs de min, max, mode qui
minimisent la sum square res et on calcule la probabilite du mode
(necessaire pour le calcul des quantiles)
"""
# for i in range(l1, l2):
# SolverOk(
# SetCell=Initialisation.Worksheets(nom_feuille_Ftriang).Cells(i,
# c_ssr),
# MaxMinVal=2,
# ValueOf='0',
# ByChange=Range(
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(i,
# c_min),
# Initialisation.Worksheets(nom_feuille_Ftriang).Cells(i,
# c_min + 2)))
# SolverSolve(UserFinish=True)
initialisation.Worksheets[nom_feuille_Ftriang].Cells(1, 1).Select()
"""
Calcul des quantiles correspondant a la loi triangulaire dont les
parametres min, max et mode viennent d'etre estimes ; creation de
la feuillenom_feuille_qtriang
"""
for i in range(0, len(p)):
initialisation.Worksheets[nom_feuille_qtriang].Cells[
l1 - 1, c3 + i - 1] = 'QUANT ' + p[i] * 100 + ' %'
# Initialisation.Worksheets[nom_feuille_qtriang].Cells[l1, c3 + i -
# 1].FormulaR1C1 = (
# '=IF(' + p[i] + '<=' + ref + c_pmode + ',' + ref + c_min +
# '+SQRT(' + p[i] + '*(' + ref + c_max + '-' + ref + c_min +
# ')*(' + ref + c_mode + '-' + ref + c_min + ')), ' + ref+c_min + 1
# + '-SQRT((' + 1 - p[i] + ')*(' + ref + c_max + '-' + ref +
# c_min + ')*(' + ref + c_max + '-' + ref + c_mode + ')))')
# Range(
# Initialisation.Worksheets(nom_feuille_qtriang).Cells(l1, c3),
# Initialisation.Worksheets(nom_feuille_qtriang).Cells(l1,
# c3 + len(p) - 1)).Select()
# Selection.AutoFill(
# Destination=Range(
# Initialisation.Worksheets(nom_feuille_qtriang).Cells(l1, c3),
# Initialisation.Worksheets(nom_feuille_qtriang).Cells(l2,
# c3 + len(p) - 1)),
# Type=xlFillDefault)
# Initialisation.Worksheets(nom_feuille_qtriang).Cells(1, 1).Select()
return c_min
def calcul_res(l1, l2, ind_hc, pond_lig_deb, pond_col_deb, pond_col_data,
pond_col_pcum, l_hc, c_hc, nbvar, loi, titre, pcent, pourcent,
data_co, nom_colonne, nom_feuille_res, nom_feuille_quant,
nom_feuille_pond, nom_feuille, c_min, triang_ajust, iproc,
nbdata):
"""
Calcul des resultats statistiques finaux.
@param data_co tableau des donnees exploitees pour calcul HC
dans les feuilles contenant les quantiles
@param l1: Numero de la premiere ligne des quantiles
@param c1: premiere colonne contenant les quantiles issues du
bootstrap
@param l2: Numero de la derniere ligne des resultats quantiles
bootstraps
@param c2: derniere colonne contenant les quantiles
dans nom_feuille_pond
@param pond_lig_deb premiere ligne de donnees numeriques
@param pond_col_deb premiere colonne de donnees numeriques
@param pond_col_data colonne des donnees de concentrations
@param pond_col_pcum colonne des donnees de probabilites cumulees
dans nom_feuille_res
@param l_hc: premiere ligne d'affichage des resultats HC
@param c_hc: premiere colonne d'affichage des resultats HC
@param ligne_tot: numero de la derniere ligne en cours
@param ind_hc: indice de reperage dans pourcent du HC a encadrer
en gras
@param nbvar: nombre de points par tirage bootstrap
@param loi: distribution statistique choisie
1: empirique, 2: normal, 3: triangulaire
@param titre: titre des tableaux de resultats HCx%
@param pourcent: tableau des probabilites x% definissant les HCx%
a estimer
@param pcent: tableau des centiles a calculer sur chaque HCx%
issues des tirages bootstrap
@param nom_feuille_quant: nom de la feuille contenant les quantiles
issues du bootstrap
@param nom_feuille_res: nom de la feuille de resultats
@param nom_feuille_pond: nom de la feuille contenant la table
data_co avec ponderations
mup, sigmap moyenne et ecar type ponderes des donnees
min, max, mode parametre de la loi triangulaire ponderee
@param c_min: numero de colonne du parametre min de la loi
triangulaire les parametres max et modes se trouvent
respectivement a c_min+1 et c_min+2
@param triang_ajust: option d'ajustement pour la loi triangulaire:
si T ajustement sur les quantiles, sinon sur
les probabilites cumulees
"""
_min = 0
_max = 0
mode = 0
mup = 0
sigmap = 0
"""Ecriture du titre"""
ecrire_titre(titre[loi - 1], nom_feuille_res, l_hc, c_hc)
"""
Affichage titre des lignes du tableau HC et Calcul ecart type
de HC
"""
initialisation.Worksheets[nom_feuille_res].Cells.at[l_hc + 1, c_hc] = 'HC'
for i in range(0, len(pourcent)):
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 1, c_hc + i + 1] = pourcent[i]
initialisation.Worksheets[nom_feuille_res].Cells.at[l_hc + 2,
c_hc] = 'Best-Estimate'
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 3, c_hc] = 'Geo. Stand. Deviation'
for i in range(0, len(pcent)):
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 4 + i, c_hc] = 'Centile ' + str(pcent[i] * 100) + '%'
HC_be = list()
"""
Calcul HC best-estimate : different suivant la loi
Selection des donnees de concentrations utilisees suivant que
procedure SSWD ou ACT
"""
data_c = list()
for i in range(0, nbdata):
data_c.append(data_co[i].data if iproc == 1 else data_co[i].act)
if loi == 1:
calculer_be_empirique(data_co, pourcent, HC_be, nbdata, data_c)
elif loi == 2:
mup, sigmap = calculer_be_normal(data_co, pourcent, HC_be, nbdata,
data_c)
elif loi == 3:
_min, _max, mode = (calculer_be_triang_q(
data_c, nom_feuille_pond, pond_lig_deb, pond_col_deb,
pond_col_data, pond_col_pcum, pourcent, HC_be, nom_colonne, nbdata)
if triang_ajust is True else calculer_be_triang_p(
data_c, nom_feuille_pond, pond_lig_deb,
pond_col_deb, pond_col_pcum, pourcent, HC_be,
nom_colonne, nbdata))
"""Affichage HC best-estimate dans la feuille de resultats"""
for i in range(0, len(pourcent)):
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 2, c_hc + 1 + i] = 10**HC_be[i]
cellule_gras()
"""
calcul percentiles intervalles de confiance :
empirique : pas de bias correction
normal et triangulaire : bias correction
"""
for x in range(0, len(pourcent)):
data = list()
for y in range(1, l2):
data.append(initialisation.Worksheets[nom_feuille_quant]
.Cells.at[y, x])
val = "nan" if str(10**std(data)) == "nan" else 10**std(data)
initialisation.Worksheets[nom_feuille_res].Cells[x].astype('object')
initialisation.Worksheets[nom_feuille_res].Cells.at[l_hc + 3,
c_hc + 1 + x] = val
if loi == 1:
for i in range(0, len(pcent)):
val = 10**percentile(data, pcent[i] * 100)
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 4 + i, c_hc + 1 + x] = "nan" if str(
val) == 'nan' else val
else:
for i in range(0, len(pcent)):
val = (10**(percentile(data, pcent[i] * 100) - median(data)) *
initialisation.Worksheets[nom_feuille_res]
.Cells.at[l_hc + 2, c_hc + 1 + x])
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 4 + i, c_hc + 1 + x] = "nan" if str(
val) == 'nan' else val
"""Infos supplementaires suivant les distributions"""
if loi == 2:
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 2, c_hc + len(pourcent) + 1] = 'Best-Estimate'
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 3, c_hc + len(pourcent) + 1] = 'Geo. Stand. Deviation'
for i in range(0, len(pcent)):
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 4 + i, c_hc + len(pourcent) + 1] = 'Centile ' + str(
pcent[i] * 100) + '%'
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 1, c_hc + len(pourcent) + 2] = 'GWM'
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 1, c_hc + len(pourcent) + 3] = 'GWSD'
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 2, c_hc + len(pourcent) + 2] = 10**mup
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 2, c_hc + len(pourcent) + 3] = 10**sigmap
for x in [nbvar + 1, nbvar + 2]:
data = list()
for y in range(l1, l2):
data.append(initialisation.Worksheets[nom_feuille]
.Cells.at[y, x])
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 3, c_hc + len(pourcent) + 1 + x - nbvar] = 10**std(data)
for i in range(0, len(pcent)):
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 4 + i, c_hc + len(pourcent) + 1 + x - nbvar] = (
(10**(percentile(data, pcent[i] * 100) - median(data)))
* initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 2, c_hc + len(pourcent) + 1 + x - nbvar])
if loi == 3:
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 2, c_hc + len(pourcent) + 1] = 'Best-Estimate'
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 3, c_hc + len(pourcent) + 1] = 'Geo. Stand. Deviation'
for i in range(0, len(pcent)):
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 4 + i, c_hc + len(pourcent) + 1] = 'Centile ' + str(
pcent[i] * 100) + '%'
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 1, c_hc + len(pourcent) + 2] = 'GWMin'
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 1, c_hc + len(pourcent) + 3] = 'GWMax'
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 1, c_hc + len(pourcent) + 4] = 'GWMode'
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 2, c_hc + len(pourcent) + 2] = 10**_min
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 2, c_hc + len(pourcent) + 3] = 10**_max
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 2, c_hc + len(pourcent) + 4] = 10**mode
for x in [c_min, c_min + 1, c_min + 2]:
data = list()
for y in range(l1, l2):
data.append(initialisation.Worksheets[nom_feuille]
.Cells.at[y, x])
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 3, c_hc + len(pourcent) + 1 + x - c_min] = 10**std(data)
for i in range(0, len(pcent)):
to_exp = ()
initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 4 + i, c_hc + len(pourcent) + 1 + x - c_min] = (
(10**(percentile(data, pcent[i] * 100) - median(data)))
* initialisation.Worksheets[nom_feuille_res].Cells.at[
l_hc + 2, c_hc + len(pourcent) + 2 + x - c_min])
return mup, sigmap, _min, _max, mode, data_c
def calcul_R2(data_co, loi, mup, sigmap, _min, _max, mode, nbdata, data_c):
"""
Calcul de R2 et de Pvalue paired TTest.
Base sur quantiles ponderes empiriques versus quantiles theoriques
ponderes: normaux ou triangulaires
@param data_co: tableau des donnees exploitees pour calcul HCx%
@param loi: distribution statistique retenue:
2 pour normal et 3 pour triangulaire
@param R2: coefficient de determination
@param Pvalue: Proba paired TTEST comparaison quantiles
empiriques/quantiles theoriques
@param mup: moyenne ponderee des donnees de concentration
@param sigmap: ecart type ponderee des donnees de concentration
@param min, max, mode: parametres de la loi trianguliaire ponderee
@param nbdata: nombre de donnees exploitees pour les calculs HC
@param data_c: donnees de concentration exploitees pour les
calculs HC
"""
"""Calcul de quantiles theoriques, normaux ou triangulaires"""
resQ = [0.0] * nbdata
Qth = [0.0] * nbdata
Pth = [0.0] * nbdata
dif = [0.0] * nbdata
if loi == 2:
for i in range(0, nbdata):
Qth[i] = norm.ppf(data_co[i].pcum, mup, sigmap)
resQ[i] = Qth[i] - data_c[i]
Pth[i] = norm.cdf(data_co[i].data, mup, sigmap)
dif[i] = data_co[i].pcum - Pth[i]
dif[i] = math.fabs(dif[i])
if loi == 3:
pmode = (mode - _min) / (_max - _min)
for i in range(0, nbdata):
if data_co[i].pcum <= pmode:
Qth[i] = (_min + math.sqrt(data_co[i].pcum * (_max - _min) *
(mode - _min)))
else:
Qth[i] = (_max - math.sqrt(
(1 - data_co[i].pcum) * (_max - _min) * (_max - mode)))
resQ[i] = Qth[i] - data_c[i]
if data_co[i].data <= _min:
Pth[i] = 0.0
else:
if data_co[i].data <= mode:
Pth[i] = ((data_co[i].data - _min) ** 2.) / \
((_max - _min) * (mode - _min))
else:
if data_co[i].data <= _max:
Pth[i] = 1 - ((data_co[i].data - _max) ** 2.) / \
((_max - _min) * (_max - mode))
else:
Pth[i] = 1
dif[i] = data_co[i].pcum - Pth[i]
dif[i] = math.fabs(dif[i])
"""
Calcul variance et R2
ceci conduit au calcul d'un R2 non pondere(il est max quand aucune
ponderation n'est appliquee aux donnees, ce qui n'est pas tres
coherent)
var_resQ = Application.WorksheetFunction.Var(resQ)
var_data = Application.WorksheetFunction.Var(data_c)
calcul variance ponderee des donnees concentrations(calcul deja
effectue dans le cas de la loi log - normale)
"""
mu = 0
for i in range(0, nbdata):
mu = mu + data_co[i].data * data_co[i].pond
var_data = 0
for i in range(0, nbdata):
var_data = (var_data + data_co[i].pond * (data_co[i].data - mu)**2.)
var_data = var_data * nbdata / (nbdata - 1)
"""calcul variance ponderee des residus"""
mu = 0
for i in range(0, nbdata):
mu += resQ[i] * data_co[i].pond
var_resQ = 0
for i in range(0, nbdata):
var_resQ += data_co[i].pond * (resQ[i] - mu)**2.
var_resQ = var_resQ * nbdata / (nbdata - 1)
"""calcul R2"""
R2 = 1 - var_resQ / var_data
"""KS dallal wilkinson approximation pvalue"""
n = nbdata
KS = max(dif)
if n < 5:
Pvalue = 0
else:
if n > 100:
KS *= (n / 100)**0.49
n = 100
Pvalue = math.exp(-7.01256 * (KS**2) * (
n + 2.78019) + 2.99587 * KS * math.sqrt(n + 2.78019) - 0.122119 +
0.974598 / math.sqrt(n) + 1.67997 / n)
if Pvalue > 0.1:
Pvalue = 0.5
return R2, Pvalue
def calculer_be_empirique(data_co, pourcent, HC_emp, nbdata, data):
"""
Calcul des HCx% empirique meilleure estimation.
(independant des runs bootstraps)
a partir de la probabilite cumulee ponderee empirique
@param data_co: tableau des donnees exploitees pour le calcul HC
@param pourcent: table des probabilites x% correspondant aux HCx%
calculees
@param nom_feuille: nom de la feuille contenant data_co_feuil
(il s'agit en fait de nom_feuille_pond)
@param lig_deb: ligne debut donnees numeriques dans nom_feuille
@param col_pcum: indice de la colonne contenant les probabilites
cumulees dans data_co_feuil
@param iproc: indice de procedure: 1 pour SSWD et 2 pour ACT
@param nbdata: nombre de donnees exploitees
@param data: donnees de concentration exploitees pour le calcul HC
Remarque: les donnees doivent etre classees dans l'ordre croissant
dans data et data_co
"""
pcum = list()
for i in range(0, nbdata):
pcum.append(data_co[i].pcum)
rang = list()
"""Calcul de HC_emp"""
for i in range(0, len(pourcent)):
rang.append(compt_inf(pcum, pourcent[i]))
if rang[i] == 0:
HC_emp.append(data[0])
elif rang[i] >= nbdata:
HC_emp.append(data[nbdata - 1])
else:
HC_emp.append(data[rang[i]] - (
(data[rang[i]] - data[rang[i] - 1]) *
(data_co[rang[i]].pcum - pourcent[i]) /
(data_co[rang[i]].pcum - data_co[rang[i] - 1].pcum)))
def calculer_be_normal(data_co, pourcent, HC_norm, nbdata, data):
"""
Calcul des HCp% normaux meilleure estimation.
(independant des runs bootstrap)
pour cela, calcul prealable des moyenne et ecart type ponderes
correspondant aux donnees
@param data_co: collection de donnees exploitees pour le calcul
des HC
@param mup: moyenne ponderee des donnees de concentration
@param sigmap: ecart type pondere des donnees de concentration
@param pourcent: table des probabilites x% correspondant au calcul
des HCx%
@param nbdata: nombre de donnees exploitees
@param data: donnees de concentration exploitees pour les calculs
de HC
"""
mup = 0
for i in range(0, nbdata):
mup = mup + data[i] * data_co[i].pond
sigmap = 0
for i in range(0, nbdata):
sigmap = sigmap + data_co[i].pond * (data[i] - mup)**2
sigmap = math.sqrt(sigmap * nbdata / (nbdata - 1))
for i in range(0, len(pourcent)):
HC_norm.append(norm.ppf(pourcent[i], mup, sigmap))
return mup, sigmap
def calculer_be_triang_p(data_c, nom_feuille, lig_deb, col_deb, col_pcum,
pourcent, HC_triang, nom_colonne, nbdata):
"""
Calcul des HCp% triang meilleure estimation.
(independant des runs bootstrap)
pour cela, calcul prealable des parametres min, max et mode,
ponderes correspondant aux donnees ; estimation par ajustement
sur les probabilites
@param nom_colonne: contient les titres des colonnes de
data_co_feuil
@param nom_feuille: nom_feuille_pond
@param pourcent: table des probabilites x% correspondant au calcul
des HCx%
@param min, max, mode: parametre de la loi triangulaire, ajustee
sur donnees ponderees
@param lig_deb: premiere ligne de donnees numeriques dans
nom_feuille
@param col_deb: premiere colonne de donnees dans nom_feuille
@param col_data: colonne contenant les concentrations nom_feuille
@param col_pcum: colonne contenant les probabilites cumulees dans
nom_feuille
@param nbdata: nombre de donnees exploitees pour le calcul des HC
@param data_c: donnees de concentration exploitees pour le calcul
des HC
"""
# Application.ScreenUpdating = False
col = col_deb + len(nom_colonne) + 1
"""Definition des valeurs initiale de min, max et mode"""
_min = min(data_c)
_max = max(data_c)
mode = (min + max) / 2
# Initialisation.Worksheets[nom_feuille].Activate()
initialisation.Worksheets[nom_feuille].Cells[lig_deb, col] = _min
initialisation.Worksheets[nom_feuille].Cells[lig_deb, col].Name = 'cmin'
initialisation.Worksheets[nom_feuille].Cells[lig_deb + 1, col] = _max
initialisation.Worksheets[nom_feuille].Cells[lig_deb + 1,
col].Name = 'cmax'
initialisation.Worksheets[nom_feuille].Cells[lig_deb + 2, col] = mode
initialisation.Worksheets[nom_feuille].Cells[lig_deb + 2,
col].Name = 'cmode'
col -= 1
"""Recherche de la colonne correspondant à data"""
# data = Initialisation.Worksheets[nom_feuille].Cells(lig_deb, col_data).
# Address(False, False, xlR1C1, RelativeTo= Cells(lig_deb, col))
# data = nom_feuille + '!' + data
"""Formule correspondant à la probabilite cumulee triangulaire"""
# Initialisation.Worksheets[nom_feuille].Cells[lig_deb, col].FormulaR1C1 =
# ('=IF(' + data + '<=cmin,0,IF(' + data + '<= cmode ,((' + data +
# '-cmin)^2)/((cmax - cmin) * (cmode - cmin)) ,IF(' + data +
# '<= cmax ,1-((' + data +
# '- cmax)^2)/ ((cmax - cmin) * (cmax - cmode)),1)))')
# Initialisation.Worksheets[nom_feuille].Cells(lig_deb, col).Select()
# Selection.AutoFill(Destination=Range(Initialisation.Worksheets[
# nom_feuille].
# Cells(lig_deb, col), Initialisation.Worksheets[nom_feuille].Cells(
# lig_deb + nbdata - 1,
# col)), Type=xlFillDefault)
# dataF = Cells(lig_deb, col).Address(True, True, xlR1C1) + ':' +
# Cells(lig_deb + nbdata - 1, col).Address(True, True, xlR1C1)
"""
Recherche de la colonne correspondant à probabilite cumulee
ponderee empirique
"""
# dataP = Cells(lig_deb, col_pcum).Address(True, True, xlR1C1) + ':' +
# Cells(lig_deb + nbdata - 1, col_pcum).Address(True, True, xlR1C1)
"""
Calcul somme à minimiser pour estimer parametre min, max et mode
puis optimisation par la procedure solver
"""
# Initialisation.Worksheets[nom_feuille].Cells[
# lig_deb + 3, col +
# 1].FormulaR1C1 = '=SUMXMY2(' + dataP + ',' + dataF + ')'
# SolverOk(SetCell=Initialisation.Worksheets[nom_feuille].Cells(lig_deb +
# 3, col + 1),
# MaxMinVal=2, ValueOf='0', ByChange=Initialisation.Worksheets[nom_feuille]
# .Range(Cells(lig_deb, col + 1), Cells(lig_deb + 2, col + 1)))
# SolverSolve(UserFinish=True)
"""
On rapatrie min, max, mode dans programme et on calcule HC_triang
meilleure estimation correspondant
"""
col += 1
_min = initialisation.Worksheets[nom_feuille].Cells(lig_deb, col)
_max = initialisation.Worksheets[nom_feuille].Cells(lig_deb + 1, col)
mode = initialisation.Worksheets[nom_feuille].Cells(lig_deb + 2, col)
pmode = (mode - _min) / (_max - _min)
for i in range(0, len(pourcent)):
if pourcent[i] <= pmode:
HC_triang[i] = _min + math.sqrt(pourcent[i] * (_max - _min) *
(mode - _min))
else:
HC_triang[i] = _max - math.sqrt(
(1 - pourcent[i]) * (_max - _min) * (_max - mode))
"""
On efface la plage de cellules sur laquelle on vient de
travailler
"""
# Range(Initialisation.Worksheets[nom_feuille].Cells(lig_deb, col - 1),
# Initialisation.Worksheets[nom_feuille].Cells(lig_deb + nbdata - 1,
# col)).Select()
# Selection.Delete()
return _min, _max, mode
def calculer_be_triang_q(data_c, nom_feuille, lig_deb, col_deb, col_data,
col_pcum, pourcent, HC_triang, nom_colonne, nbdata):
"""
Calcul des HCp% triang meilleure estimation.
(independant des runs bootstrap)
pour cela, calcul prealable des parametres _min, _max et mode,
ponderes correspondant aux donnees ; estimation par ajustement
sur les quantiles
@param nom_feuille: nom_feuille_pond
@param pourcent: table des probabilites x% correspondant au calcul
des HCx%
@param _min, _max, mode: parametre de la loi triangulaire, ajustee
sur donnees ponderees
@param lig_deb: premiere ligne de donnees numeriques dans
nom_feuille
@param col_deb: premiere colonne de donnees dans nom_feuille
@param col_data: colonne contenant les concentrations nom_feuille
@param col_pcum: colonne contenant les probabilites cumulees dans
nom_feuille
@param nbdata: nombre de donnees exploitees pour le calcul des HC
@param data_c: donnees de concentration exploitees pour le calcul
des HC
"""
# Application.ScreenUpdating = False
col = col_deb + len(nom_colonne) + 1
"""Definition des valeurs initiale de _min, max, mode et pmode"""
_min = min(data_c)
_max = max(data_c)
mode = (_min + _max) / 2
# Initialisation.Worksheets[nom_feuille].Activate()
initialisation.Worksheets[nom_feuille].Cells[lig_deb, col] = _min
initialisation.Worksheets[nom_feuille].Cells[lig_deb, col].Name = 'cmin'
initialisation.Worksheets[nom_feuille].Cells[lig_deb + 1, col] = _max
initialisation.Worksheets[nom_feuille].Cells[lig_deb + 1,
col].Name = 'cmax'
initialisation.Worksheets[nom_feuille].Cells[lig_deb + 2, col] = mode
initialisation.Worksheets[nom_feuille].Cells[lig_deb + 2,
col].Name = 'cmode'
# Initialisation.Worksheets[nom_feuille].Cells[lig_deb + 4,
# col].FormulaR1C1 =
# '=(cmode - cmin) / (cmax - cmin)'
initialisation.Worksheets[nom_feuille].Cells[lig_deb + 4,
col].Name = 'cpmode'
col -= 1
"""Recherche de la colonne correspondant à pcum"""
# data = Initialisation.Worksheets[nom_feuille].Cells(lig_deb, col_pcum).
# Address(False, False, xlR1C1, RelativeTo= Cells(lig_deb, col))
# data = nom_feuille + '!' + data
"""Formule correspondant aux quantiles de la loi triangulaire"""
# Initialisation.Worksheets[nom_feuille].Cells[lig_deb, col].FormulaR1C1 =
# ('=IF(' + data + '<=cpmode,cmin+SQRT(' + data +
# '*(cmax - cmin) * (cmode - cmin)),cmax -SQRT((1-' + data +
# ')*(cmax - cmin) * (cmax - cmode)))')
# Initialisation.Worksheets[nom_feuille].Cells[lig_deb, col].Select()
# Selection.AutoFill(
# Destination=Range(Initialisation.Worksheets[nom_feuille].Cells(
# lig_deb, col),
# Initialisation.Worksheets[nom_feuille].Cells(
# lig_deb + nbdata - 1, col)),
# Type=xlFillDefault)
# dataF = Cells(lig_deb, col).Address(True, True, xlR1C1) + ':' + Cells(
# lig_deb + nbdata - 1, col).Address(True, True, xlR1C1)
"""Recherche de la colonne correspondant aux donnees"""
# dataP = Cells(lig_deb, col_data).Address(True, True, xlR1C1) + ':' +
# Cells(lig_deb + nbdata - 1, col_data).Address(True, True, xlR1C1)
"""
Calcul somme à minimiser pour estimer parametre min, max et mode
puis optimisation par la procedure solver
"""
# Initialisation.Worksheets[nom_feuille].Cells[lig_deb + 3,
# col + 1].FormulaR1C1 = (
# '=SUMXMY2(' + dataP + ',' + dataF + ')')
# SolverOk(
# SetCell=Initialisation.Worksheets[nom_feuille].Cells(lig_deb + 3,
# col + 1),
# MaxMinVal=2,
# ValueOf='0',
# ByChange=Initialisation.Worksheets[nom_feuille].Range(
# Cells(lig_deb, col + 1), Cells(lig_deb + 2, col + 1)))
# SolverSolve(UserFinish=True)
"""
On rapatrie min, max, mode dans le programme et on calcule
HC_triang meilleure estimation correspondant
"""
col += 1
_min = initialisation.Worksheets[nom_feuille].Cells(lig_deb, col)
_max = initialisation.Worksheets[nom_feuille].Cells(lig_deb + 1, col)
mode = initialisation.Worksheets[nom_feuille].Cells(lig_deb + 2, col)
pmode = (mode - _min) / (_max - _min)
for i in range(1, len(pourcent)):
if pourcent[i] <= pmode:
HC_triang[i] = _min + math.sqrt(pourcent[i] * (_max - _min) *
(mode - _min))
else:
HC_triang[i] = _max - math.sqrt(
(1 - pourcent[i]) * (_max - _min) * (_max - mode))
"""On efface la plage de cellules sur laquelle on vient de travailler"""
# Range(Initialisation.Worksheets[nom_feuille].Cells(lig_deb, col - 1),
# Initialisation.Worksheets[nom_feuille].Cells(lig_deb + nbdata -
# 1, col)).Select()
# Selection.Delete()
return _min, _max, mode
| Gysco/SSWD | pyment/statistics.py | Python | gpl-3.0 | 54,420 |
# This file is part of the Perspectives Notary Server
#
# Copyright (C) 2011 Dan Wendlandt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Ask a network notary server what keys it has seen for a particular service.
"""
from __future__ import print_function
import sys
import traceback
import argparse
from client_common import verify_notary_signature, notary_reply_as_text,fetch_notary_xml
DEFAULT_SERVER = 'localhost'
DEFAULT_PORT = '8080'
DEFAULT_KEYFILE = 'notary.pub'
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('service_id', default=DEFAULT_SERVER,
help="A remote service of the form 'hostname:port,servicetype'. Use servicetype '1' for SSH or '2' for SSL.")
# don't use type=FileType for the key because we don't want argparse
# to try opening it before the program runs.
# this module should be callable without using any key at all.
parser.add_argument('--notary_pubkey', '--key', '-k', default=DEFAULT_KEYFILE, metavar='KEYFILE',
help="File containing the notary's public key. If supplied the response signature from the notary will be verified. Default: \'%(default)s\'.")
parser.add_argument('--notary-server', '--server', '-s', default=DEFAULT_SERVER, metavar='SERVER',
help="Notary server to contact. Default: \'%(default)s\'.")
parser.add_argument('--notary_port', '--port', '-p', type=int, default=DEFAULT_PORT, metavar='PORT',
help="Port to contact the server on. Default: \'%(default)s\'.")
args = parser.parse_args()
try:
code, xml_text = fetch_notary_xml(args.notary_server, args.notary_port, args.service_id)
if code == 404:
print("Notary has no results for '%s'." % args.service_id)
elif code != 200:
print("Notary server returned error code: %s" % code)
except Exception, e:
print("Exception contacting notary server:")
traceback.print_exc(e)
exit(1)
print(50 * "-")
print("XML Response:")
print(xml_text)
print(50 * "-")
try:
pub_key = open(args.notary_pubkey).read()
if not verify_notary_signature(args.service_id, xml_text, pub_key):
print("Signature verification failed. Results are not valid.")
exit(1)
except IOError:
print("Warning: no public key specified, not verifying notary signature.")
print("Results:")
print(notary_reply_as_text(xml_text))
| danwent/Perspectives-Server | client/simple_client.py | Python | gpl-3.0 | 2,821 |
__author__ = 'arobres'
import requests
from nose.tools import assert_equals, assert_true
import ujson
#OK WE NOW CAN SEND MESSAGES TO THE FORUM
THEME_LIST = ['Security', 'Development', 'Automation', 'Testing']
message_body = {}
message_body['theme'] = ''
message_body['subject'] = ''
message_body['message'] = ''
print message_body
#CREATE 4 NEW MESSAGES IN FORUM
for message_id in range(0, 4):
message_body['theme'] = THEME_LIST[message_id]
message_body['subject'] = 'MESSAGE SUBJECT ID = {}'.format(message_id+1)
message_body['message'] = 'MESSAGE BODY ID = {}'.format(message_id+1)
response = requests.post(url='http://localhost:8081/v1.0/forum', data=ujson.dumps(message_body))
assert_true(response.ok)
assert_equals(response.content, 'message created')
#WE CAN MAKE A REQUEST TO OBTAIN ALL THE FORUM MESSAGES
response = requests.get(url='http://localhost:8081/v1.0/forum')
assert_true(response.ok)
try:
json_response = response.json()
except:
print 'Error: THE RESPONSE NOT HAS JSON FORMAT'
print json_response
print json_response['Development']
print json_response['Development'][0]
#THIS API ALLOW HTTP QUERY PARAMETERS TO FILTER BY THEME
#FIRST CREATE A DICT WITH THE FILTER USING THE PATTERN {'key': 'value'}
payload = {'theme': 'Security'}
#AFTER INCLUDE IT IN THE REQUEST
response = requests.get(url='http://localhost:8081/v1.0/forum', params=payload)
print (response.url)
#Verify the response
assert_true(response.ok, response.content)
#Convert to JSON
try:
json_response = response.json()
except:
print 'Error: THE RESPONSE NOT HAS JSON FORMAT'
print json_response['Security']
print json_response
###### REMEMBER ########
# TO USE QUERY PARAMETERS IN URL DEFINE FIRST ALL THE PARAMETERS IN A DICT AND INTRODUCE IT IN THE REQUEST
| twiindan/forum | test/demo/demo_3.py | Python | gpl-2.0 | 1,814 |
import json
import gen
from gen.tests.utils import make_arguments, true_false_msg, validate_error, validate_success
dns_forward_zones_str = """\
[["a.contoso.com", [["1.1.1.1", 53], \
["2.2.2.2", 53]]], \
["b.contoso.com", [["3.3.3.3", 53], \
["4.4.4.4", 53]]]] \
"""
bad_dns_forward_zones_str = """\
[["a.contoso.com", [[1, 53], \
["2.2.2.2", 53]]], \
["b.contoso.com", [["3.3.3.3", 53], \
["4.4.4.4", 53]]]] \
"""
def validate_error_multikey(new_arguments, keys, message, unset=None):
assert gen.validate(arguments=make_arguments(new_arguments)) == {
'status': 'errors',
'errors': {key: {'message': message} for key in keys},
'unset': set() if unset is None else unset,
}
def test_invalid_telemetry_enabled():
err_msg = "Must be one of 'true', 'false'. Got 'foo'."
validate_error(
{'telemetry_enabled': 'foo'},
'telemetry_enabled',
err_msg)
def test_invalid_ports():
test_bad_range = '["52.37.192.49", "52.37.181.230:53", "52.37.163.105:65536"]'
range_err_msg = "Must be between 1 and 65535 inclusive"
test_bad_value = '["52.37.192.49", "52.37.181.230:53", "52.37.163.105:abc"]'
value_err_msg = "Must be an integer but got a str: abc"
validate_error(
{'resolvers': test_bad_range},
'resolvers',
range_err_msg)
validate_error(
{'resolvers': test_bad_value},
'resolvers',
value_err_msg)
def test_dns_bind_ip_blacklist():
test_ips = '["52.37.192.49", "52.37.181.230", "52.37.163.105"]'
validate_success({'dns_bind_ip_blacklist': test_ips})
def test_dns_forward_zones():
zones = dns_forward_zones_str
bad_zones = bad_dns_forward_zones_str
err_msg = 'Invalid "dns_forward_zones": 1 not a valid IP address'
validate_success({'dns_forward_zones': zones})
validate_error(
{'dns_forward_zones': bad_zones},
'dns_forward_zones',
err_msg)
def test_invalid_ipv4():
test_ips = '["52.37.192.49", "52.37.181.230", "foo", "52.37.163.105", "bar"]'
err_msg = "Invalid IPv4 addresses in list: foo, bar"
validate_error(
{'master_list': test_ips},
'master_list',
err_msg)
validate_error(
{'dns_bind_ip_blacklist': test_ips},
'dns_bind_ip_blacklist',
err_msg)
validate_error(
{'resolvers': test_ips},
'resolvers',
err_msg)
def test_invalid_zk_path():
validate_error(
{'exhibitor_zk_path': 'bad/path'},
'exhibitor_zk_path',
"Must be of the form /path/to/znode")
def test_invalid_zk_hosts():
validate_error(
{'exhibitor_zk_hosts': 'zk://10.10.10.10:8181'},
'exhibitor_zk_hosts',
"Must be of the form `host:port,host:port', not start with zk://")
def test_invalid_bootstrap_url():
validate_error(
{'bootstrap_url': '123abc/'},
'bootstrap_url',
"Must not end in a '/'")
def test_validate_duplicates():
test_ips = '["10.0.0.1", "10.0.0.2", "10.0.0.1"]'
err_msg = 'List cannot contain duplicates: 10.0.0.1 appears 2 times'
validate_error(
{'master_list': test_ips},
'master_list',
err_msg)
validate_error(
{'dns_bind_ip_blacklist': test_ips},
'dns_bind_ip_blacklist',
err_msg)
def test_invalid_oauth_enabled():
validate_error(
{'oauth_enabled': 'foo'},
'oauth_enabled',
true_false_msg)
def test_invalid_mesos_dns_set_truncate_bit():
validate_error(
{'mesos_dns_set_truncate_bit': 'foo'},
'mesos_dns_set_truncate_bit',
true_false_msg)
def test_cluster_docker_credentials():
validate_error(
{'cluster_docker_credentials': 'foo'},
'cluster_docker_credentials',
"Must be valid JSON. Got: foo")
validate_error(
{'cluster_docker_credentials_dcos_owned': 'foo'},
'cluster_docker_credentials_dcos_owned',
true_false_msg)
def test_exhibitor_storage_master_discovery():
msg_master_discovery = "When master_discovery is not static, exhibitor_storage_backend must be " \
"non-static. Having a variable list of master which are discovered by agents using the " \
"master_discovery method but also having a fixed known at install time static list of " \
"master ips doesn't `master_http_load_balancer` then exhibitor_storage_backend must not " \
"be static."
validate_success({
'exhibitor_storage_backend': 'static',
'master_discovery': 'static'})
validate_success({
'exhibitor_storage_backend': 'aws_s3',
'master_discovery': 'master_http_loadbalancer',
'aws_region': 'foo',
'exhibitor_address': 'http://foobar',
'exhibitor_explicit_keys': 'false',
'num_masters': '5',
's3_bucket': 'baz',
's3_prefix': 'mofo'})
validate_success({
'exhibitor_storage_backend': 'aws_s3',
'master_discovery': 'static',
'exhibitor_explicit_keys': 'false',
's3_bucket': 'foo',
'aws_region': 'bar',
's3_prefix': 'baz/bar'})
validate_error_multikey(
{'exhibitor_storage_backend': 'static',
'master_discovery': 'master_http_loadbalancer'},
['exhibitor_storage_backend', 'master_discovery'],
msg_master_discovery,
unset={'exhibitor_address', 'num_masters'})
def test_validate_s3_prefix():
validate_error({
'exhibitor_storage_backend': 'aws_s3',
'exhibitor_explicit_keys': 'false',
'aws_region': 'bar',
's3_bucket': 'baz',
's3_prefix': 'baz/'},
's3_prefix',
'Must be a file path and cannot end in a /')
validate_success({'s3_prefix': 'baz'})
validate_success({'s3_prefix': 'bar/baz'})
def test_validate_default_overlay_network_name():
msg = "Default overlay network name does not reference a defined overlay network: foo"
validate_error_multikey(
{'dcos_overlay_network': json.dumps({
'vtep_subnet': '44.128.0.0/20',
'vtep_mac_oui': '70:B3:D5:00:00:00',
'overlays': [{
'name': 'bar',
'subnet': '1.1.1.0/24',
'prefix': 24
}],
}), 'dcos_overlay_network_default_name': 'foo'},
['dcos_overlay_network_default_name', 'dcos_overlay_network'],
msg)
def test_validate_check_config():
# No checks.
validate_success({'check_config': json.dumps({})})
# Valid node and cluster checks.
validate_success({
'check_config': json.dumps({
'cluster_checks': {
'cluster-check-1': {
'description': 'Cluster check 1',
'cmd': ['echo', 'cluster-check-1'],
'timeout': '1s',
},
},
'node_checks': {
'checks': {
'node-check-1': {
'description': 'Node check 1',
'cmd': ['echo', 'node-check-1'],
'timeout': '1s',
},
'node-check-2': {
'description': 'Node check 2',
'cmd': ['echo', 'node-check-2'],
'timeout': '1s',
'roles': ['agent']
},
},
'prestart': ['node-check-1'],
'poststart': ['node-check-1', 'node-check-2'],
},
})
})
# Valid node checks only.
validate_success({
'check_config': json.dumps({
'node_checks': {
'checks': {
'node-check-1': {
'description': 'Node check 1',
'cmd': ['echo', 'node-check-1'],
'timeout': '1s',
},
'node-check-2': {
'description': 'Node check 2',
'cmd': ['echo', 'node-check-2'],
'timeout': '1s',
'roles': ['agent']
},
},
'prestart': ['node-check-1'],
'poststart': ['node-check-1', 'node-check-2'],
},
})
})
# Valid cluster checks only.
validate_success({
'check_config': json.dumps({
'cluster_checks': {
'cluster-check-1': {
'description': 'Cluster check 1',
'cmd': ['echo', 'cluster-check-1'],
'timeout': '1s',
},
},
})
})
# Missing check definitions.
validate_error(
{'check_config': json.dumps({'cluster_checks': {}})},
'check_config',
"Key 'cluster_checks' error: Missing keys: Check name must be a nonzero length string with no whitespace",
)
validate_error(
{'check_config': json.dumps({'node_checks': {}})},
'check_config',
"Key 'node_checks' error: Missing keys: 'checks'",
)
validate_error(
{
'check_config': json.dumps({
'node_checks': {
'checks': {},
},
})
},
'check_config',
(
"Key 'node_checks' error: Key 'checks' error: Missing keys: Check name must be a nonzero length string "
"with no whitespace"
),
)
# Invalid check names.
validate_error(
{
'check_config': json.dumps({
'cluster_checks': {
'cluster check 1': {
'description': 'Cluster check 1',
'cmd': ['echo', 'cluster-check-1'],
'timeout': '1s',
},
},
})
},
'check_config',
"Key 'cluster_checks' error: Missing keys: Check name must be a nonzero length string with no whitespace",
)
validate_error(
{
'check_config': json.dumps({
'node_checks': {
'checks': {
'node check 1': {
'description': 'Node check 1',
'cmd': ['echo', 'node-check-1'],
'timeout': '1s',
},
},
'prestart': ['node-check-1'],
},
})
},
'check_config',
(
"Key 'node_checks' error: Key 'checks' error: Missing keys: Check name must be a nonzero length string "
"with no whitespace"
),
)
validate_error(
{
'check_config': json.dumps({
'node_checks': {
'checks': {
'node-check-1': {
'description': 'Node check 1',
'cmd': ['echo', 'node-check-1'],
'timeout': '1s',
},
},
'prestart': ['node check 1'],
},
})
},
'check_config',
'Check name must be a nonzero length string with no whitespace',
)
validate_error(
{
'check_config': json.dumps({
'node_checks': {
'checks': {
'node-check-1': {
'description': 'Node check 1',
'cmd': ['echo', 'node-check-1'],
'timeout': '1s',
},
},
'poststart': ['node check 1'],
},
})
},
'check_config',
'Check name must be a nonzero length string with no whitespace',
)
# Invalid timeouts.
validate_error(
{
'check_config': json.dumps({
'cluster_checks': {
'cluster-check-1': {
'description': 'Cluster check 1',
'cmd': ['echo', 'cluster-check-1'],
'timeout': '1second',
},
},
})
},
'check_config',
'Timeout must be a string containing an integer or float followed by a unit: ns, us, µs, ms, s, m, h',
)
validate_error(
{
'check_config': json.dumps({
'node_checks': {
'checks': {
'node-check-1': {
'description': 'Node check 1',
'cmd': ['echo', 'node-check-1'],
'timeout': '1 s',
},
},
'poststart': ['node-check-1'],
},
})
},
'check_config',
'Timeout must be a string containing an integer or float followed by a unit: ns, us, µs, ms, s, m, h',
)
# Missing check description.
validate_error(
{
'check_config': json.dumps({
'cluster_checks': {
'cluster-check-1': {
'cmd': ['echo', 'cluster-check-1'],
'timeout': '1s',
},
},
})
},
'check_config',
"Key 'cluster_checks' error: Key 'cluster-check-1' error: Missing keys: 'description'",
)
validate_error(
{
'check_config': json.dumps({
'node_checks': {
'checks': {
'node-check-1': {
'cmd': ['echo', 'node-check-1'],
'timeout': '1s',
},
},
'poststart': ['node-check-1'],
},
})
},
'check_config',
"Key 'node_checks' error: Key 'checks' error: Key 'node-check-1' error: Missing keys: 'description'",
)
# Check cmd is wrong type.
validate_error(
{
'check_config': json.dumps({
'cluster_checks': {
'cluster-check-1': {
'description': 'Cluster check 1',
'cmd': 'echo cluster-check-1',
'timeout': '1s',
},
},
})
},
'check_config',
(
"Key 'cluster_checks' error: Key 'cluster-check-1' error: Key 'cmd' error: 'echo cluster-check-1' should "
"be instance of 'list'"
),
)
validate_error(
{
'check_config': json.dumps({
'node_checks': {
'checks': {
'node-check-1': {
'cmd': 'echo node-check-1',
'timeout': '1s',
},
},
'poststart': ['node-check-1'],
},
})
},
'check_config',
(
"Key 'node_checks' error: Key 'checks' error: Key 'node-check-1' error: Key 'cmd' error: "
"'echo node-check-1' should be instance of 'list'"
),
)
# Missing node prestart and poststart check lists.
validate_error(
{
'check_config': json.dumps({
'node_checks': {
'checks': {
'node-check-1': {
'description': 'Node check 1',
'cmd': ['echo', 'node-check-1'],
'timeout': '1s',
},
},
},
})
},
'check_config',
'At least one of prestart or poststart must be defined in node_checks',
)
# Checks missing from both prestart and poststart.
validate_error(
{
'check_config': json.dumps({
'node_checks': {
'checks': {
'node-check-1': {
'description': 'Node check 1',
'cmd': ['echo', 'node-check-1'],
'timeout': '1s',
},
'node-check-2': {
'description': 'Node check 2',
'cmd': ['echo', 'node-check-2'],
'timeout': '1s',
},
},
'poststart': ['node-check-1'],
},
})
},
'check_config',
'All node checks must be referenced in either prestart or poststart, or both',
)
# Checks referenced in prestart or poststart but not defined.
validate_error(
{
'check_config': json.dumps({
'node_checks': {
'checks': {
'node-check-1': {
'description': 'Node check 1',
'cmd': ['echo', 'node-check-1'],
'timeout': '1s',
},
'node-check-2': {
'description': 'Node check 2',
'cmd': ['echo', 'node-check-2'],
'timeout': '1s',
},
},
'poststart': ['node-check-1', 'node-check-2', 'node-check-3'],
},
})
},
'check_config',
'All node checks must be referenced in either prestart or poststart, or both',
)
# Invalid node check role.
validate_error(
{
'check_config': json.dumps({
'node_checks': {
'checks': {
'node-check-1': {
'description': 'Node check 1',
'cmd': ['echo', 'node-check-1'],
'timeout': '1s',
'roles': ['master', 'foo'],
},
},
'poststart': ['node-check-1'],
},
})
},
'check_config',
'roles must be a list containing master or agent or both',
)
def test_validate_custom_checks():
check_config = json.dumps({
'cluster_checks': {
'cluster-check-1': {
'description': 'Cluster check 1',
'cmd': ['echo', 'cluster-check-1'],
'timeout': '1s',
},
},
'node_checks': {
'checks': {
'node-check-1': {
'description': 'Node check 1',
'cmd': ['echo', 'node-check-1'],
'timeout': '1s',
},
'node-check-2': {
'description': 'Node check 2',
'cmd': ['echo', 'node-check-2'],
'timeout': '1s',
'roles': ['agent']
},
},
'prestart': ['node-check-1'],
'poststart': ['node-check-1', 'node-check-2'],
},
})
custom_checks = json.dumps({
'cluster_checks': {
'custom-cluster-check-1': {
'description': 'Custom cluster check 1',
'cmd': ['echo', 'custom-cluster-check-1'],
'timeout': '1s',
},
},
'node_checks': {
'checks': {
'custom-node-check-1': {
'description': 'Custom node check 1',
'cmd': ['echo', 'custom-node-check-1'],
'timeout': '1s',
},
},
'prestart': ['custom-node-check-1'],
'poststart': ['custom-node-check-1'],
}
})
# Empty and non-empty check_config and custom_checks.
validate_success({
'check_config': json.dumps({}),
'custom_checks': json.dumps({}),
})
validate_success({
'check_config': check_config,
'custom_checks': json.dumps({}),
})
validate_success({
'check_config': check_config,
'custom_checks': custom_checks,
})
validate_success({
'check_config': json.dumps({}),
'custom_checks': custom_checks,
})
# Invalid custom checks.
validate_error(
{
'custom_checks': json.dumps({
'node_checks': {
'checks': {
'node-check-1': {
'description': 'Node check 1',
'cmd': ['echo', 'node-check-1'],
'timeout': '1s',
},
'node-check-2': {
'description': 'Node check 2',
'cmd': ['echo', 'node-check-2'],
'timeout': '1s',
},
},
'poststart': ['node-check-1', 'node-check-2', 'node-check-3'],
},
})
},
'custom_checks',
'All node checks must be referenced in either prestart or poststart, or both',
)
# Custom checks re-use check name used by builtin checks.
validate_error_multikey(
{
'check_config': check_config,
'custom_checks': json.dumps({
'cluster_checks': {
'cluster-check-1': {
'description': 'Cluster check 1',
'cmd': ['echo', 'cluster-check-1'],
'timeout': '1s',
},
},
'node_checks': {
'checks': {
'node-check-1': {
'description': 'Node check 1',
'cmd': ['echo', 'node-check-1'],
'timeout': '1s',
},
'node-check-2': {
'description': 'Node check 2',
'cmd': ['echo', 'node-check-2'],
'timeout': '1s',
},
},
'poststart': ['node-check-1', 'node-check-2'],
},
}),
},
['check_config', 'custom_checks'],
(
'Custom check names conflict with builtin checks. Reserved cluster check names: cluster-check-1. Reserved '
'node check names: node-check-1, node-check-2.'
),
)
| lingmann/dcos | gen/tests/test_validation.py | Python | apache-2.0 | 22,929 |
import json
from django.urls import reverse
from rest_framework.test import APITestCase
from cities.models import State
class StateViewTestCase(APITestCase):
def test_correct_fields_list(self):
"""Verify the correct serializer is being used"""
State.objects.create(code=1, name="State", abbr="st")
response = self.client.get(reverse("api:state-list"))
response_json = json.loads(response.content.decode("utf-8"))
expected = [{"name": "State", "abbr": "st"}]
self.assertEqual(1, len(response_json["results"]))
self.assertEqual(expected, response_json["results"])
| dirtycoder/pets | pets/api/tests/test_view_state.py | Python | mit | 628 |
# -*- coding: utf-8 -*-
"""
Optimization ops.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def optimize(cost,
optimizer,
global_step,
max_grad_norm=5.0):
""" Helper funciton to return the optimization op that will optimize
the network during training.
This function will obtain all trainable variables and clip their
gradients by their global norm at max_grad_norm.
After that, the optimizer object passed will get the (possibly) cplipped
gradients and apply the updates defining the backprop step that will
represent our training op.
Args:
cost: Tensor representing the cost obtained.
optimizer: tf.train.Optimizer object. Valid objects are
- GradientDescentOptimizer
- AdagradOptimizer
- AdamOptimizer
- RMSPropOptimizer
max_grad_norm: float, the maximum norm for the gradients before we
clip them.
Returns:
train_op: the operation that we will run to optimize the network.
grads: list of tensors, representing the gradients for each
trainable variable
tvars: list of Tensors, the trainable variables
"""
assert optimizer is not None
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(
tf.gradients(cost, tvars), max_grad_norm)
train_op = optimizer.apply_gradients(zip(grads, tvars),
global_step=global_step)
return train_op, grads, tvars
| giancds/attentive_lm | optimization_ops.py | Python | apache-2.0 | 1,569 |
from django.contrib import admin
from django import forms
from models import UserAccess, CharacterAccessRule, CorpAccessRule, AllianceAccessRule, StandingAccessRule
class UserAccessForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(UserAccessForm, self).__init__(*args, **kwargs)
instance = getattr(self, 'instance', None)
if instance and instance.pk:
self.fields['object_id'].widget.attrs['readonly'] = True
def clean_user(self):
instance = getattr(self, 'instance', None)
if instance and instance.pk:
if self.cleaned_data['user'] != instance.user:
self.data['user'] = instance.user
raise forms.ValidationError("Cannot change once set")
else:
return instance.user
return self.cleaned_data['user']
def clean_character(self):
instance = getattr(self, 'instance', None)
if instance and instance.pk:
if self.cleaned_data['character'] != instance.character:
self.data['character'] = instance.character
raise forms.ValidationError("Cannot change once set")
else:
return instance.character
return self.cleaned_data['character']
def clean_content_type(self):
instance = getattr(self, 'instance', None)
if instance and instance.pk:
if self.cleaned_data['content_type'] != instance.content_type:
self.data['content_type'] = instance.content_type
raise forms.ValidationError("Cannot change once set")
else:
return instance.content_type
return self.cleaned_data['content_type']
def clean_object_id(self):
instance = getattr(self, 'instance', None)
if instance and instance.pk:
return instance.object_id
return self.cleaned_data['object_id']
@admin.register(UserAccess)
class UserAccessAdmin(admin.ModelAdmin):
form = UserAccessForm
admin.site.register(CharacterAccessRule)
admin.site.register(CorpAccessRule)
admin.site.register(AllianceAccessRule)
admin.site.register(StandingAccessRule)
| Adarnof/adarnauth | access/admin.py | Python | gpl-2.0 | 2,180 |
#!/usr/bin/python
# Copyright 2015 Intel corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a barebones file needed to file a gap until Ansible 2.0. No
# error checking, no deletions, no updates. Idempotent creation only.
# If you look closely, you will see we arent _really_ using the shade module
# we just use it to slightly abstract the authentication model. As patches land
# in upstream shade we will be able to use more of the shade module. Until then
# if we want to be 'stable' we really need to be using it as a passthrough
import tempfile
import traceback
import shade
class SanityChecks(object):
@staticmethod
def keystone(cloud):
[tenant for tenant in cloud.keystone_client.tenants.list()]
@staticmethod
def glance(cloud):
with tempfile.NamedTemporaryfile(suffix='qcow2') as image:
cloud.create_image("test", filename=image.name,
disk_format="qcow2", container_format="bare")
testid = cloud.get_image_id("test")
cloud.delete_image(testid)
@staticmethod
def cinder(cloud):
[volume for volume in cloud.cinder_client.volumes.list()]
@staticmethod
def swift(cloud):
[container for container in cloud.swift_client.list()]
def main():
module = AnsibleModule(
argument_spec=openstack_full_argument_spec(
password=dict(required=True, type='str'),
project=dict(required=True, type='str'),
role=dict(required=True, type='str'),
user=dict(required=True, type='str'),
service=dict(required=True, type='str'),
)
)
try:
changed = True
cloud = shade.operator_cloud(**module.params)
getattr(SanityChecks, module.params.pop("service"))(cloud)
module.exit_json(changed=changed)
except Exception:
module.exit_json(failed=True, changed=True,
msg=repr(traceback.format_exc()))
# import module snippets
from ansible.module_utils.basic import * # noqa
from ansible.module_utils.openstack import * # noqa
if __name__ == '__main__':
main()
| coolsvap/kolla | docker/kolla-toolbox/kolla_sanity.py | Python | apache-2.0 | 2,652 |
from django.core import serializers
from django.shortcuts import render, redirect
from flow.models import *
from django.http import HttpResponse
from json import dumps
def index(request):
f = Flow.objects.all()
return render(request, 'flow/index.html', {'flows': f})
def show(request, id):
f = Flow.objects.get(id=id)
return render(request, 'flow/show.html', {'f': f})
def create(request):
if request.method == 'POST':
form = FlowForm(request.POST)
if form.is_valid():
f = form.save()
return redirect('/flow/%s/' % f.id)
else:
form = FlowForm()
return render(request, 'flow/create.html', {'form': form})
def json(request, id):
f = Flow.objects.get(id=id)
nodes = [t.render_to_json() for t in f.techniques.all()]
links = []
for t in f.techniques.all():
links.extend(t.render_links_to_json())
js = dumps({"nodes": nodes, "links": links})
return HttpResponse(js, content_type="application/json")
| coffenbacher/bjj | flow/views.py | Python | agpl-3.0 | 1,006 |
# coding=utf-8
__author__ = 'litao'
from sqlalchemy import create_engine
import tushare as ts
try:
df = ts.get_hist_data('600848')
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.insert(0,'code','600848')
#df.to_sql('hist_data',engine,if_exists='append')
df = ts.get_stock_basics()
#df.rename(columns={'$a': 'a', '$b': 'b'}, inplace=True)
df.rename(columns={'stockno':'股票代码','stockname':'股票名称','sshy':'所属杨业','sjgn':'涉及概念','brspj':'本日收盘价','syspj':'上月收盘价','ltg':'流通股','zgb':'总股本','jlr':'净利润','yysr':'营业收入','mgjzc':'每股净资产','mgsy':'每股收益','jlrzzl':'净利润增长率','mgxjl':'每股现金流','mggjj':'每股公积金','mgwfplr':'每股未分配利润','dtsyl':'动态市盈率','sjl':'市净率','ltsz':'流通市值','zsz':'总市值','jjsj':'解禁时间','mlv':'毛利率','jlv':'净利率','roe':'ROE','fzl':'负债率','importdate':'导入日期','sssj':'上市时间','cplx':'产品类型','ssdy':'所属地域','lrzy':'利润总额','tenholder':'十大持股人','gdrs':'股东人数'}, inplace=True)
print df
#df.to_csv('d:\\stock_basic.csv')
#df.to_sql('stock_basics',engine,if_exists='append')
except Exception,e:
print e
| nfsli926/stock | python/com/nfs/getStockBytushare.py | Python | apache-2.0 | 1,305 |
#coding:utf-8
'''
1ãæ³åä¸ä¸ªçè§æå¡å¨æ¯å¦è¿è¡çç®åæå¡ï¼ç½ä¸æå°çä¾ç¨ä¸å¤ªå®åï¼å¦æ¢
岿¾ç许ä¹?
æ²¡ææ´æ°ï¼è?ä¸SvcDoRunåå¾ä¸å®æ´ï¼è§http://www.chinaunix.net/jh/55/558190.htmlï¼?
ä¸ç¥éæ¯ä¸æ¯åå§åºå¤ï¼ï¼èmail.python.orgä¸ç没æå®ä¹_svc_name_çåéï¼è§?
http://mail.python.org/pipermail/python-list/2005-December/315190.htmlï¼?
2ãè¿ä¸ªå®ç°åè½å¾ç®?ï¼å°±æ¯æå½åæ¶é´åå
¥âc:\\temp\\time.txtâæä»¶ï¼ä¸?å³ç¥ï¼?
大家å¯ä»¥éææ©å
ã?
3ãç¨
service install å®è£
service start å¯å¨
service stop 忢
service debug è°è¯
service remove å é¤
service.py
---代ç å¼?§
'''
import win32serviceutil
import win32service
import win32event
import win32evtlogutil
import time
class service(win32serviceutil.ServiceFramework):
_svc_name_ = "test_python"
_svc_display_name_ = "test_python"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
print 'service starts'
def SvcDoRun(self):
import servicemanager
#------------------------------------------------------
# Make entry in the event log that this service started
#------------------------------------------------------
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, ''))
#-------------------------------------------------------------
# Set an amount of time to wait (in milliseconds) between runs
#-------------------------------------------------------------
self.timeout = 100
while 1:
#-------------------------------------------------------
# Wait for service stop signal, if I timeout, loop again
#-------------------------------------------------------
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
#
# Check to see if self.hWaitStop happened
#
if rc == win32event.WAIT_OBJECT_0:
#
# Stop signal encountered
#
break
else:
#
# Put your code here
#
#
f = open('c:\\temp\\time.txt', 'w', 0)
f.write(time.ctime(time.time()))
f.close()
print 'service in running'
time.sleep(1)
#
# Only return from SvcDoRun when you wish to stop
#
return
def SvcStop(self):
#---------------------------------------------------------------------
# Before we do anything, tell SCM we are starting the stop process.
#---------------------------------------------------------------------
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
#---------------------------------------------------------------------
# And set my event
#---------------------------------------------------------------------
win32event.SetEvent(self.hWaitStop)
print 'service ends'
return
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(service)
#---代ç ç»æ
| ptphp/PyLib | src/devs/winnt.py | Python | apache-2.0 | 3,864 |
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from default import Test, db, with_context
from nose.tools import assert_raises
from sqlalchemy.exc import IntegrityError
from pybossa.model.user import User
from pybossa.model.app import App
from pybossa.model.task import Task
from pybossa.model.task_run import TaskRun
from pybossa.model.category import Category
class TestModelTaskRun(Test):
@with_context
def test_task_run_errors(self):
"""Test TASK_RUN model errors."""
user = User(
email_addr="john.doe@example.com",
name="johndoe",
fullname="John Doe",
locale="en")
db.session.add(user)
db.session.commit()
user = db.session.query(User).first()
category = Category(name=u'cat', short_name=u'cat', description=u'cat')
app = App(name='Application', short_name='app', description='desc',
owner_id=user.id, category=category)
db.session.add(app)
db.session.commit()
task = Task(app_id=app.id)
db.session.add(task)
db.session.commit()
task_run = TaskRun(app_id=None, task_id=task.id)
db.session.add(task_run)
assert_raises(IntegrityError, db.session.commit)
db.session.rollback()
task_run = TaskRun(app_id=app.id, task_id=None)
db.session.add(task_run)
assert_raises(IntegrityError, db.session.commit)
db.session.rollback()
| stefanhahmann/pybossa | test/test_model/test_model_taskrun.py | Python | agpl-3.0 | 2,161 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A gRPC Interceptor that is responsible to augmenting request metadata.
This class is initialized in the GoogleAdsClient and passed into a grpc
intercept_channel whenever a new service is initialized. It intercepts requests
and updates the metadata in order to insert the developer token and
login-customer-id values.
"""
from grpc import UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor
from .interceptor import Interceptor
class MetadataInterceptor(
Interceptor, UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor
):
"""An interceptor that appends custom metadata to requests."""
def __init__(
self, developer_token, login_customer_id, linked_customer_id=None
):
"""Initialization method for this class.
Args:
developer_token: a str developer token.
login_customer_id: a str specifying a login customer ID.
linked_customer_id: a str specifying a linked customer ID.
"""
self.developer_token_meta = ("developer-token", developer_token)
self.login_customer_id_meta = (
("login-customer-id", login_customer_id)
if login_customer_id
else None
)
self.linked_customer_id_meta = (
("linked-customer-id", linked_customer_id)
if linked_customer_id
else None
)
def _update_client_call_details_metadata(
self, client_call_details, metadata
):
"""Updates the client call details with additional metadata.
Args:
client_call_details: An instance of grpc.ClientCallDetails.
metadata: Additional metadata defined by GoogleAdsClient.
Returns:
An new instance of grpc.ClientCallDetails with additional metadata
from the GoogleAdsClient.
"""
client_call_details = self.get_client_call_details_instance(
client_call_details.method,
client_call_details.timeout,
metadata,
client_call_details.credentials,
)
return client_call_details
def _intercept(self, continuation, client_call_details, request):
"""Generic interceptor used for Unary-Unary and Unary-Stream requests.
Args:
continuation: a function to continue the request process.
client_call_details: a grpc._interceptor._ClientCallDetails
instance containing request metadata.
request: a SearchGoogleAdsRequest or SearchGoogleAdsStreamRequest
message class instance.
Returns:
A grpc.Call/grpc.Future instance representing a service response.
"""
if client_call_details.metadata is None:
metadata = []
else:
metadata = list(client_call_details.metadata)
metadata.append(self.developer_token_meta)
if self.login_customer_id_meta:
metadata.append(self.login_customer_id_meta)
if self.linked_customer_id_meta:
metadata.append(self.linked_customer_id_meta)
client_call_details = self._update_client_call_details_metadata(
client_call_details, metadata
)
return continuation(client_call_details, request)
def intercept_unary_unary(self, continuation, client_call_details, request):
"""Intercepts and appends custom metadata for Unary-Unary requests.
Overrides abstract method defined in grpc.UnaryUnaryClientInterceptor.
Args:
continuation: a function to continue the request process.
client_call_details: a grpc._interceptor._ClientCallDetails
instance containing request metadata.
request: a SearchGoogleAdsRequest or SearchGoogleAdsStreamRequest
message class instance.
Returns:
A grpc.Call/grpc.Future instance representing a service response.
"""
return self._intercept(continuation, client_call_details, request)
def intercept_unary_stream(
self, continuation, client_call_details, request
):
"""Intercepts and appends custom metadata to Unary-Stream requests.
Overrides abstract method defined in grpc.UnaryStreamClientInterceptor.
Args:
continuation: a function to continue the request process.
client_call_details: a grpc._interceptor._ClientCallDetails
instance containing request metadata.
request: a SearchGoogleAdsRequest or SearchGoogleAdsStreamRequest
message class instance.
Returns:
A grpc.Call/grpc.Future instance representing a service response.
"""
return self._intercept(continuation, client_call_details, request)
| googleads/google-ads-python | google/ads/googleads/interceptors/metadata_interceptor.py | Python | apache-2.0 | 5,368 |
# Transcribing DNA into RNA
# rosalind.info/problems/rna
import sys
class rna:
def main(self, dna_seq):
dna_seq = list(dna_seq)
for idx, i in enumerate(dna_seq):
if i == 'T':
dna_seq[idx] = 'U'
print(''.join(dna_seq))
if __name__ == '__main__':
filename = sys.argv[1]
if not filename:
raise Exception('ERROR: File name should not be empty!')
with open(filename, 'r') as seq_file:
rna().main(seq_file.read())
| yuriyshapovalov/Prototypes | Rosalind/rna.py | Python | apache-2.0 | 513 |
#!/usr/bin/python3
# -*- coding: utf-8, vim: expandtab:ts=4 -*-
"""
trainer.py is a module of HunTag and is used to train maxent models
"""
import sys
from collections import Counter, defaultdict
import joblib
from scipy.sparse import csr_matrix
import numpy as np
from array import array
from sklearn.linear_model import LogisticRegression
# from sklearn.linear_model import SGDClassifier
# from sklearn.svm import SVC
# from sklearn.multiclass import OneVsRestClassifier
from .tools import BookKeeper, featurize_sentence, use_featurized_sentence, bind_features_to_indices, \
load_options_and_features
class Trainer:
def __init__(self, opts, source_fields=None, target_fields=None):
self.features, self.source_fields, self.target_fields, options = \
load_options_and_features(opts, source_fields, target_fields)
self._tag_field_name = options['gold_tag_field'] # One of the source fields
if options['train_params'] is None:
# Set clasifier algorithm here
parameters = {'solver': 'lbfgs', 'multi_class': 'auto', 'max_iter': 5000}
else:
parameters = options['train_params']
solver = LogisticRegression
# Possible alternative solvers:
# parameters = {'loss':'modified_huber', 'n_jobs': -1}
# solver = SGDClassifier
# ‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’
# parameters = {'kernel': 'rbf', 'probability': True}
# solver = SVC
# ‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’
# parameters = {'kernel': 'linear', 'probability': True}
# solver = OneVsRestClassifier(SVC(**parameters)) # XXX won't work because ** in parameters...
self._cutoff = options['cutoff']
self._parameters = parameters
self._model = solver(**parameters)
self._model_file_name = options['model_filename']
self._feat_counter_file_name = options['featcounter_filename']
self._label_counter_file_name = options['labelcounter_filename']
if options['inp_featurized']:
self._featurize_sentence_fun = use_featurized_sentence
else:
self._featurize_sentence_fun = featurize_sentence
self._tok_count = -1 # Index starts from 0
self._data_sizes = options['data_sizes']
self._rows = array(self._data_sizes['rows'])
self._cols = array(self._data_sizes['cols'])
self._data = array(self._data_sizes['data'])
self._labels = array(self._data_sizes['labels'])
self._sent_end = array(self._data_sizes['sent_end']) # Keep track of sentence boundaries
self._matrix = None
self._feat_counter = BookKeeper()
self._label_counter = BookKeeper()
self._feat_filter = lambda token_feats: token_feats
feat_filename = options.get('used_feats')
if feat_filename is not None:
with open(feat_filename, encoding='UTF-8') as fh:
used_feats = {line.strip() for line in fh}
self._feat_filter = lambda token_feats: [feat for feat in token_feats if feat in used_feats]
self._tag_field = 0 # Always the first field!
def save(self):
print('saving model...', end='', file=sys.stderr, flush=True)
joblib.dump(self._model, '{0}'.format(self._model_file_name), compress=3)
print('done\nsaving feature and label lists...', end='', file=sys.stderr, flush=True)
self._feat_counter.save(self._feat_counter_file_name)
self._label_counter.save(self._label_counter_file_name)
print('done', file=sys.stderr, flush=True)
def _update_sent_end(self, sent_ends, row_nums):
new_ends = array(self._data_sizes['sent_end'])
vbeg = 0
for end in sent_ends:
vend = -1
for i, e in enumerate(row_nums[vbeg:]):
if e <= end:
vend = vbeg + i
else:
break
if vend > 0:
new_ends.append(vend)
vbeg = vend + 1
return new_ends
def _convert_to_np_array(self):
rows_np = np.array(self._rows, dtype=self._data_sizes['rows_np'])
cols_np = np.array(self._cols, dtype=self._data_sizes['cols'])
data_np = np.array(self._data, dtype=self._data_sizes['data'])
labels_np = np.array(self._labels, dtype=self._data_sizes['labels'])
del self._rows
del self._cols
del self._data
del self._labels
self._rows = rows_np
self._cols = cols_np
self._data = data_np
self._labels = labels_np
def _make_sparse_array(self, row_num, col_num):
print('creating training problem...', end='', file=sys.stderr, flush=True)
matrix = csr_matrix((self._data, (self._rows, self._cols)), shape=(row_num, col_num),
dtype=self._data_sizes['data'])
del self._rows
del self._cols
del self._data
print('done!', file=sys.stderr, flush=True)
return matrix
def cutoff_feats(self):
self._tok_count += 1 # This actually was the token index which starts from 0...
self._convert_to_np_array()
col_num = self._feat_counter.num_of_names()
if self._cutoff < 2: # Keep all...
self._matrix = self._make_sparse_array(self._tok_count, col_num)
else:
print('discarding features with less than {0} occurences...'.format(self._cutoff), end='', file=sys.stderr,
flush=True)
to_delete = self._feat_counter.cutoff(self._cutoff)
print('done!\nreducing training events by {0}...'.format(len(to_delete)), end='', file=sys.stderr,
flush=True)
# ...that are not in featCounter anymore
indices_to_keep_np = np.fromiter((ind for ind, featNo in enumerate(self._cols) if featNo not in to_delete),
dtype=self._data_sizes['cols'])
del to_delete
# Reduce cols
cols_np_new = self._cols[indices_to_keep_np]
del self._cols
self._cols = cols_np_new
# Reduce data
data_np_new = self._data[indices_to_keep_np]
del self._data
self._data = data_np_new
# Reduce rows
rows_np_new = self._rows[indices_to_keep_np]
row_num_keep = np.unique(rows_np_new)
row_num = row_num_keep.shape[0]
col_num = indices_to_keep_np.max() + 1
del self._rows
self._rows = rows_np_new
del indices_to_keep_np
# Reduce labels
labels_np_new = self._labels[row_num_keep]
del self._labels
self._labels = labels_np_new
# Update sentence end markers
new_end = self._update_sent_end(self._sent_end, row_num_keep)
del self._sent_end
self._sent_end = new_end
del row_num_keep
print('done!', file=sys.stderr, flush=True)
matrix = self._make_sparse_array(row_num, col_num)
print('updating indices...', end='', file=sys.stderr, flush=True)
# Update rowNos
rows, _ = matrix.nonzero()
matrix_new = matrix[np.unique(rows), :]
del matrix
del rows
# Update featNos
_, cols = matrix_new.nonzero()
self._matrix = matrix_new[:, np.unique(cols)]
del matrix_new
del cols
print('done!', file=sys.stderr, flush=True)
def prepare_fields(self, field_names):
self._tag_field = field_names.get(self._tag_field_name) # Bind tag field separately as it has no feature
return bind_features_to_indices(self.features, {k: v for k, v in field_names.items()
if k != self._tag_field and v != self._tag_field})
def process_sentence(self, sen, features):
"""
Read input data in variable forms
:param sen: one token per elem
:param features: the features bound to columns
:return: dummy list of tokens which are list of features
"""
for label, *feats in self._featurize_sentence_fun(sen, features, self._feat_filter, self._tag_field):
self._tok_count += 1
self._add_context(feats, label, self._tok_count)
self._sent_end.append(self._tok_count)
return [[]] # Dummy
def _add_context(self, tok_feats, label, cur_tok):
rows_append = self._rows.append
cols_append = self._cols.append
data_append = self._data.append
# Features are sorted to ensure identical output no matter where the features are coming from
for featNumber in {self._feat_counter.get_no_train(feat) for feat in sorted(tok_feats)}:
rows_append(cur_tok)
cols_append(featNumber)
data_append(1)
self._labels.append(self._label_counter.get_no_train(label))
# Counting zero elements can be really slow...
def most_informative_features(self, output_stream=sys.stdout, n=-1, count_zero=False):
# Compute min(P(feature=value|label1), for any label1)/max(P(feature=value|label2), for any label2)
# (using contitional probs using joint probabilities) as in NLTK (Bird et al. 2009):
# P(feature=value|label) = P(feature=value, label)/P(label)
# P(feature=value, label) = C(feature=value, label)/C(feature=value)
# P(label) = C(label)/sum_i(C(label_i))
#
# P(feature=value|label) = (C(feature=value, label)/C(feature=value))/(C(label)/sum_i(C(label_i))) =
# (C(feature=value, label)*sum_i(C(label_i)))/(C(feature=value)*C(label))
#
# min(P(feature=value|label1), for any label1)/max(P(feature=value|label2), for any label2) =
#
# min((C(feature=value, label1)*sum_i(C(label_i)))/(C(feature=value)*C(label1)), for any label1)/
# max((C(feature=value, label2)*sum_i(C(label_i)))/(C(feature=value)*C(label2)), for any label2) =
#
# (sum_i(C(label_i))/C(feature=value))*min(C(feature=value, label1)/C(label1)), for any label1)/
# (sum_i(C(label_i))/C(feature=value))*max(C(feature=value, label2)/C(label2)), for any label2) =
#
# min(C(feature=value, label1)/C(label1), for any label1)/
# max(C(feature=value, label2)/C(label2), for any label2)
matrix = self._matrix # For easiser handling
self._feat_counter.makeno_to_name()
self._label_counter.makeno_to_name()
featno_to_name = self._feat_counter.no_to_name
labelno_to_name = self._label_counter.no_to_name
labels = self._labels # indexed by token rows (row = token number, column = feature number)
feat_val_counts = defaultdict(Counter) # feat, val -> label: count
if count_zero:
# Every index (including zeros to consider negative correlation)
for feat in range(matrix.shape[1]):
for tok in range(matrix.shape[0]):
feat_val_counts[feat, matrix[tok, feat]][labels[tok]] += 1
else:
matrix = matrix.tocoo()
# Every nonzero index
for tok, feat, val in zip(matrix.row, matrix.col, matrix.data):
feat_val_counts[feat, val][labels[tok]] += 1
del matrix
# (C(label2), for any label2)
label_counts = Counter()
for k, v in zip(*np.unique(self._labels, return_counts=True)):
label_counts[k] = v
num_of_labels = len(label_counts)
maxprob = defaultdict(lambda: 0.0)
minprob = defaultdict(lambda: 1.0)
features = set()
# For every (feature, val) touple (that has nonzero count)
for feature, counts in feat_val_counts.items():
# For every label label...
features.add(feature)
for label, count in counts.items():
# prob can only be 0 if the nominator is 0, but this case is already filtered in the Counter...
prob = count/label_counts[label]
maxprob[feature] = max(prob, maxprob[feature])
minprob[feature] = min(prob, minprob[feature])
# Convert features to a list, & sort it by how informative features are.
"""
From NTLK docs:
For the purpose of this function, the
informativeness of a feature ``(fname,fval)`` is equal to the
highest value of P(fname=fval|label), for any label, divided by
the lowest value of P(fname=fval|label), for any label:
| max[ P(fname=fval|label1) / P(fname=fval|label2) ]
"""
print('"Feature name"=Value (True/False)', 'Sum of occurences', 'Counts per label', 'Probability per label',
'Max prob.:Min prob.=Ratio:1.0', sep='\t', file=output_stream) # Print header (legend)
# To avoid division by zero...
for feature in sorted(features, key=lambda feature_: minprob[feature_]/maxprob[feature_])[:n]:
sum_occurences = sum(feat_val_counts[feature].values())
if len(feat_val_counts[feature]) < num_of_labels:
ratio = 'INF'
else:
ratio = maxprob[feature]/minprob[feature]
# NLTK notation
# print('{0:50} = {1:} {2:6} : {3:-6} = {4} : 1.0'.format(featno_to_name(feature[0]), feature[1],
# maxprob[feature],
# minprob[feature], ratio))
# More detailed notation
print('"{0:50s}"={1}\t{2}\t{3}\t{4}\t{5:6}:{6:-6}={7}:1.0'.format(
featno_to_name[feature[0]],
bool(feature[1]),
sum_occurences,
'/'.join(('{0}:{1}'.format(labelno_to_name[l], c)
for l, c in feat_val_counts[feature].items())),
'/'.join(('{0}:{1:.8f}'.format(labelno_to_name[l], c/label_counts[l])
for l, c in feat_val_counts[feature].items())),
maxprob[feature], minprob[feature], ratio), file=output_stream)
def write_featurized_input(self, output_stream=sys.stdout):
self._feat_counter.makeno_to_name()
self._label_counter.makeno_to_name()
featno_to_name = self._feat_counter.no_to_name
labelno_to_name = self._label_counter.no_to_name
sent_end = self._sent_end
matrix = self._matrix.tocsr()
labels = self._labels
beg = 0
for end in sent_end:
for row in range(beg, end + 1):
print('{0}\t{1}'.format(labelno_to_name[labels[row]],
'\t'.join(featno_to_name[col].replace(':', 'colon')
for col in matrix[row, :].nonzero()[1])),
file=output_stream)
print(file=output_stream) # Sentence separator blank line
beg = end + 1
def train(self):
print('training with option(s) "{0}"...'.format(self._parameters), end='', file=sys.stderr, flush=True)
_ = self._model.fit(self._matrix, self._labels)
print('done', file=sys.stderr, flush=True)
| ppke-nlpg/HunTag3 | huntag/trainer.py | Python | lgpl-3.0 | 15,454 |
#!/usr/bin/env python
import click
import os
import shutil
import uuid
from nlppln.utils import create_dirs, out_file_name
@click.command()
@click.argument('in_file', type=click.Path(exists=True))
@click.option('--rename', type=click.Choice(['random', 'spaces', 'copy']),
default='spaces')
@click.option('--out_dir', '-o', default=os.getcwd(), type=click.Path())
def command(in_file, rename, out_dir):
create_dirs(out_dir)
ext = os.path.splitext(in_file)[1].replace('.', '')
fname = os.path.basename(in_file)
if rename == 'spaces':
fname = fname.replace(' ', '-')
elif rename == 'random':
fname = '{}.{}'.format(uuid.uuid4(), ext)
fo = out_file_name(out_dir, fname)
shutil.copy2(in_file, fo)
if __name__ == '__main__':
command()
| WhatWorksWhenForWhom/nlppln | nlppln/commands/copy_and_rename.py | Python | apache-2.0 | 798 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2008 Albert Astals Cid <aacid@kde.org>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from PyQt4 import QtCore
from PyQt4 import QtGui
from PyQt4 import QtXml
app = QtGui.QApplication(sys.argv)
if len(sys.argv) != 5:
print "Error: You have to specify the sound theme file to check, the folder containing the graphical themes, the folder where to look for the sound files and the path to ignore in the file tag of the sound object"
print " E.g: For lt it would be something like 'python soundthemecheker.py /home/kdeunstable/l10n-kde4/lt/data/kdegames/ktuberling/lt.soundtheme /home/kdeunstable/kdegames/ktuberling/pics /home/kdeunstable/l10n-kde4/lt/data/kdegames/ktuberling/ lt'"
sys.exit(1)
soundThemePath = sys.argv[1]
graphicalThemesPath = sys.argv[2]
soundsPath = sys.argv[3]
ignorePath = sys.argv[4]
print "Processing " + soundThemePath + " " + graphicalThemesPath
soundThemeFile = QtCore.QFile(soundThemePath)
if soundThemeFile.exists():
if (soundThemeFile.open(QtCore.QIODevice.ReadOnly)):
doc = QtXml.QDomDocument()
doc.setContent(soundThemeFile.readAll())
root = doc.documentElement()
if (root.tagName() == "language"):
soundList = [];
soundTag = root.firstChildElement("sound");
while (not soundTag.isNull()):
name = soundTag.attribute("name")
contains = soundList.count(name)
if (contains == 0):
soundList.append(name)
fpath = soundTag.attribute("file")
if (fpath.startsWith(ignorePath)):
fpath = fpath.mid(len(ignorePath))
if (not QtCore.QFile.exists(soundsPath + fpath)):
print "Error: Sound file for " + name + " not found"
else:
print QtCore.QString("Error: The name %1 is used more than once in the sound theme file").arg(name)
soundTag = soundTag.nextSiblingElement("sound");
graphicalThemesDir = QtCore.QDir(graphicalThemesPath)
allSoundsList = []
for graphicalThemePath in graphicalThemesDir.entryList(QtCore.QStringList("*.theme")):
graphicalThemeFile = QtCore.QFile(graphicalThemesPath + "/" + graphicalThemePath)
if graphicalThemeFile.exists():
if (graphicalThemeFile.open(QtCore.QIODevice.ReadOnly)):
doc = QtXml.QDomDocument()
doc.setContent(graphicalThemeFile.readAll())
root = doc.documentElement()
if (root.tagName() == "playground"):
objectTag = root.firstChildElement("object");
while (not objectTag.isNull()):
sound = objectTag.attribute("sound")
contains = allSoundsList.count(sound)
if (sound == ""):
print "The sound of " + objectTag.attribute("name") + " in " + graphicalThemeFile.fileName() + " is empty"
if (contains == 0):
allSoundsList.append(sound)
objectTag = objectTag.nextSiblingElement("object");
else:
print "Error: The graphical theme file should begin with the playground tag " + graphicalThemeFile.fileName()
else:
print QtCore.QString("Error: Could not open %1 for reading").arg(graphicalThemePath)
else:
print QtCore.QString("Error: File %1 does not exist").arg(graphicalThemePath)
for sound in soundList:
if (allSoundsList.count(sound) == 1):
allSoundsList.remove(sound)
else:
print "Error: The sound theme defines " + sound + " that is not used in any graphical theme"
if (len(allSoundsList) > 0):
print "The following sounds used in the graphical themes are not defined in the sound theme:"
for sound in allSoundsList:
print "\t" + sound
else:
print "Error: The sound theme file should begin with the language tag"
soundThemeFile.close();
else:
print QtCore.QString("Error: Could not open %1 for reading").arg(path)
else:
print QtCore.QString("Error: File %1 does not exist").arg(soundThemePath)
sys.exit(0)
| jsj2008/kdegames | ktuberling/soundthemecheker.py | Python | gpl-2.0 | 4,415 |
import squeakspace.common.util as ut
import squeakspace.common.util_http as ht
import squeakspace.proxy.server.db_sqlite3 as db
import squeakspace.common.squeak_ex as ex
import config
def post_handler(environ):
query = ht.parse_post_request(environ)
cookies = ht.parse_cookies(environ)
user_id = ht.get_required_cookie(cookies, 'user_id')
session_id = ht.get_required_cookie(cookies, 'session_id')
node_name = ht.get_required(query, 'node_name')
url = ht.get_required(query, 'url')
real_node_name = ht.get_required(query, 'real_node_name')
fingerprint = ht.get_optional(query, 'fingerprint')
conn = db.connect(config.db_path)
try:
c = db.cursor(conn)
db.set_node_addr(c, user_id, session_id, node_name, url, real_node_name, fingerprint)
db.commit(conn)
raise ht.ok_json({'status' : 'ok'})
except ex.SqueakException as e:
raise ht.convert_squeak_exception(e)
finally:
db.close(conn)
def get_handler(environ):
query = ht.parse_get_request(environ)
cookies = ht.parse_cookies(environ)
user_id = ht.get_required_cookie(cookies, 'user_id')
session_id = ht.get_required_cookie(cookies, 'session_id')
node_name = ht.get_required(query, 'node_name')
conn = db.connect(config.db_path)
try:
c = db.cursor(conn)
addr = db.read_node_addr(c, user_id, session_id, node_name)
raise ht.ok_json({'status' : 'ok', 'addr' : addr})
except ex.SqueakException as e:
raise ht.convert_squeak_exception(e)
finally:
db.close(conn)
def delete_handler(environ):
query = ht.parse_post_request(environ)
cookies = ht.parse_cookies(environ)
user_id = ht.get_required_cookie(cookies, 'user_id')
session_id = ht.get_required_cookie(cookies, 'session_id')
node_name = ht.get_required(query, 'node_name')
conn = db.connect(config.db_path)
try:
c = db.cursor(conn)
db.delete_node_addr(c, user_id, session_id, node_name)
db.commit(conn)
raise ht.ok_json({'status' : 'ok'})
except ex.SqueakException as e:
raise ht.convert_squeak_exception(e)
finally:
db.close(conn)
def main_handler(environ):
ht.dispatch_on_method(environ, {
'POST' : post_handler,
'GET' : get_handler,
'DELETE' : delete_handler})
def application(environ, start_response):
return ht.respond_with_handler(environ, start_response, main_handler)
| eek6/squeakspace | www/proxy/scripts/local/node_addr.py | Python | gpl-3.0 | 2,510 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.