repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
skiselev/upm | examples/python/aeotecsdg2.py | 7 | 3146 | #!/usr/bin/python
# Author: Jon Trulson <jtrulson@ics.com>
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_ozw as sensorObj
def main():
# This function lets you run code on exit
def exitHandler():
print("Turning switch off and sleeping for 5 seconds...")
sensor.off()
time.sleep(5)
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
defaultDev = "/dev/ttyACM0"
if (len(sys.argv) > 1):
defaultDev = sys.argv[1]
print("Using device", defaultDev)
# Instantiate an Aeotec Smart Dimmer Gen2 instance, on device node
# 9. You will almost certainly need to change this to reflect your
# own network. Use the ozwdump example to see what nodes are
# available.
sensor = sensorObj.AeotecSDG2(9)
# The first thing to do is create options, then lock them when done.
sensor.optionsCreate()
sensor.optionsLock()
# Next, initialize it.
print("Initializing, this may take awhile depending on your ZWave network")
sensor.init(defaultDev)
print("Initialization complete")
# turn light on
print("Turning switch on, then sleeping for 5 secs")
sensor.on();
time.sleep(5);
print("Querying data...")
dim = False;
while (True):
# put on a light show...
if (dim):
sensor.setLevel(25)
else:
sensor.on()
dim = not dim;
sensor.update()
print("Current Level:", end=' ')
print(sensor.getLevel())
print("Volts:", end=' ')
print(sensor.getVolts(), end=' ')
print("volts")
print("Energy Consumption:", end=' ')
print(sensor.getEnergy(), end=' ')
print("kWh")
print("Watts:", end=' ')
print(sensor.getWatts())
print("Current:", end=' ')
print(sensor.getCurrent(), end=' ')
print("amps")
print()
time.sleep(5)
if __name__ == '__main__':
main()
| mit | 8,280,930,253,337,730,000 | 4,775,731,541,504,007,000 | 30.148515 | 79 | 0.655435 | false |
geotagx/geotagx-pybossa-archive | pybossa/auth/task.py | 1 | 1535 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from flask.ext.login import current_user
import pybossa.model as model
from pybossa.core import db
def create(task=None):
if not current_user.is_anonymous():
app = db.session.query(model.App).filter_by(id=task.app_id).one()
if app.owner_id == current_user.id or current_user.admin is True:
return True
else:
return False
else:
return False
def read(task=None):
return True
def update(task):
if not current_user.is_anonymous():
app = db.session.query(model.App).filter_by(id=task.app_id).one()
if app.owner_id == current_user.id or current_user.admin is True:
return True
else:
return False
else:
return False
def delete(task):
return update(task)
| agpl-3.0 | 632,532,869,763,057,300 | 2,688,696,324,854,364,000 | 29.098039 | 77 | 0.683388 | false |
MasterGowen/moonrain | moonrain/accounts/models.py | 1 | 2939 | from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from ..projects.models import Project
class UserManager(BaseUserManager):
def create_user(self, email, username, password=None):
if not email:
raise ValueError('Необходимо ввести электронный адрес')
user = self.model(
email=UserManager.normalize_email(email),
username=username,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, username, password):
user = self.create_user(email,
password=password,
username=username)
user.is_admin = True
user.save(using=self._db)
return user
class User(AbstractBaseUser):
'''
Пользователь
'''
email = models.EmailField(
verbose_name='Электронная почта',
max_length=32,
unique=True,
db_index=True,
)
username = models.CharField(
verbose_name='Имя пользователя',
blank=False,
max_length=32,
unique=True,
)
avatar = models.ImageField(
verbose_name='Аватар',
upload_to='images/%Y/%m',
blank=True,
)
first_name = models.CharField(
verbose_name='Имя',
max_length=16,
blank=True,
)
last_name = models.CharField(
verbose_name='Фамилия',
max_length=32,
blank=True,
)
department = models.CharField(
verbose_name='Подразделение',
max_length=255,
blank=True,
)
is_admin = models.BooleanField(
verbose_name='Является администратором?',
default=False,
)
is_superuser = models.BooleanField(
verbose_name='Является суперпользователем?',
default=False,
)
projects = models.ManyToManyField(Project, verbose_name='Проекты',
blank=True,
help_text='Проекты, в которых участвует пользователь',)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
objects = UserManager()
def get_full_name(self):
return '%s %s' % (self.last_name,
self.first_name,)
def get_short_name(self):
return self.username
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True
@property
def is_staff(self):
return self.is_admin
class Meta:
verbose_name = ('Пользователь')
verbose_name_plural = ('Пользователи') | gpl-2.0 | 5,536,759,598,815,103,000 | 3,585,794,387,274,161,700 | 23.070796 | 93 | 0.573005 | false |
weety/rt-thread | tools/rt_studio.py | 6 | 34261 | import os
import re
from string import Template
import rtconfig
import shutil
# version
MODULE_VER_NUM = 1
cproject_temp = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?fileVersion 4.0.0?><cproject storage_type_id="org.eclipse.cdt.core.XmlProjectDescriptionStorage">
<storageModule moduleId="org.eclipse.cdt.core.settings">
<cconfiguration id="ilg.gnuarmeclipse.managedbuild.cross.config.elf.debug.553091094">
<storageModule buildSystemId="org.eclipse.cdt.managedbuilder.core.configurationDataProvider" id="ilg.gnuarmeclipse.managedbuild.cross.config.elf.debug.553091094" moduleId="org.eclipse.cdt.core.settings" name="Debug">
<externalSettings/>
<extensions>
<extension id="org.eclipse.cdt.core.ELF" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.GASErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GmakeErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GLDErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.CWDLocator" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GCCErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
</extensions>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<configuration artifactName="rtthread" buildArtefactType="org.eclipse.cdt.build.core.buildArtefactType.exe" buildProperties="org.eclipse.cdt.build.core.buildArtefactType=org.eclipse.cdt.build.core.buildArtefactType.exe,org.eclipse.cdt.build.core.buildType=org.eclipse.cdt.build.core.buildType.debug" cleanCommand="${cross_rm} -rf" description="" id="ilg.gnuarmeclipse.managedbuild.cross.config.elf.debug.553091094" name="Debug" parent="ilg.gnuarmeclipse.managedbuild.cross.config.elf.debug">
<folderInfo id="ilg.gnuarmeclipse.managedbuild.cross.config.elf.debug.553091094." name="/" resourcePath="">
<toolChain id="ilg.gnuarmeclipse.managedbuild.cross.toolchain.elf.debug.1201710416" name="ARM Cross GCC" superClass="ilg.gnuarmeclipse.managedbuild.cross.toolchain.elf.debug">
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.addtools.createflash.251260409" name="Create flash image" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.addtools.createflash" useByScannerDiscovery="false" value="true" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.addtools.createlisting.1365878149" name="Create extended listing" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.addtools.createlisting" useByScannerDiscovery="false"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.addtools.printsize.709136944" name="Print size" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.addtools.printsize" useByScannerDiscovery="false" value="true" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.optimization.level.1986446770" name="Optimization Level" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.optimization.level" useByScannerDiscovery="true" value="ilg.gnuarmeclipse.managedbuild.cross.option.optimization.level.none" valueType="enumerated"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.optimization.messagelength.1312975261" name="Message length (-fmessage-length=0)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.optimization.messagelength" useByScannerDiscovery="true" value="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.optimization.signedchar.1538128212" name="'char' is signed (-fsigned-char)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.optimization.signedchar" useByScannerDiscovery="true" value="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.optimization.functionsections.2136804218" name="Function sections (-ffunction-sections)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.optimization.functionsections" useByScannerDiscovery="true" value="true" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.optimization.datasections.244767666" name="Data sections (-fdata-sections)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.optimization.datasections" useByScannerDiscovery="true" value="true" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.debugging.level.1055848773" name="Debug level" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.debugging.level" useByScannerDiscovery="true" value="ilg.gnuarmeclipse.managedbuild.cross.option.debugging.level.default" valueType="enumerated"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.debugging.format.501941135" name="Debug format" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.debugging.format" useByScannerDiscovery="true" value="ilg.gnuarmeclipse.managedbuild.cross.option.debugging.format.dwarf2" valueType="enumerated"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.toolchain.name.1696308067" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.toolchain.name" useByScannerDiscovery="false" value="GNU Tools for ARM Embedded Processors" valueType="string"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.architecture.1558403188" name="Architecture" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.architecture" useByScannerDiscovery="false" value="ilg.gnuarmeclipse.managedbuild.cross.option.architecture.arm" valueType="enumerated"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.arm.target.family.749415257" name="ARM family" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.arm.target.family" useByScannerDiscovery="false" value="ilg.gnuarmeclipse.managedbuild.cross.option.arm.target.mcpu.cortex-m4" valueType="enumerated"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.arm.target.instructionset.2114153533" name="Instruction set" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.arm.target.instructionset" useByScannerDiscovery="false" value="ilg.gnuarmeclipse.managedbuild.cross.option.arm.target.instructionset.thumb" valueType="enumerated"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.command.prefix.1600865811" name="Prefix" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.command.prefix" useByScannerDiscovery="false" value="arm-none-eabi-" valueType="string"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.command.c.1109963929" name="C compiler" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.command.c" useByScannerDiscovery="false" value="gcc" valueType="string"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.command.cpp.1040883831" name="C++ compiler" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.command.cpp" useByScannerDiscovery="false" value="g++" valueType="string"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.command.ar.1678200391" name="Archiver" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.command.ar" useByScannerDiscovery="false" value="ar" valueType="string"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.command.objcopy.1171840296" name="Hex/Bin converter" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.command.objcopy" useByScannerDiscovery="false" value="objcopy" valueType="string"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.command.objdump.342604837" name="Listing generator" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.command.objdump" useByScannerDiscovery="false" value="objdump" valueType="string"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.command.size.898269225" name="Size command" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.command.size" useByScannerDiscovery="false" value="size" valueType="string"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.command.make.2016398076" name="Build command" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.command.make" useByScannerDiscovery="false" value="make" valueType="string"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.command.rm.1606171496" name="Remove command" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.command.rm" useByScannerDiscovery="false" value="rm" valueType="string"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.toolchain.id.540792084" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.toolchain.id" useByScannerDiscovery="false" value="1287942917" valueType="string"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.arm.target.architecture.430121817" name="Architecture" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.arm.target.architecture" useByScannerDiscovery="false" value="ilg.gnuarmeclipse.managedbuild.cross.option.arm.target.arch.none" valueType="enumerated"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.arm.target.fpu.abi.966735324" name="Float ABI" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.arm.target.fpu.abi" useByScannerDiscovery="true" value="ilg.gnuarmeclipse.managedbuild.cross.option.arm.target.fpu.abi.hard" valueType="enumerated"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.warnings.allwarn.1381561249" name="Enable all common warnings (-Wall)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.warnings.allwarn" useByScannerDiscovery="true" value="true" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.target.other.2041717463" name="Other target flags" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.target.other" useByScannerDiscovery="true" value="" valueType="string"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.arm.target.fpu.unit.1463655269" name="FPU Type" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.arm.target.fpu.unit" useByScannerDiscovery="true" value="ilg.gnuarmeclipse.managedbuild.cross.option.arm.target.fpu.unit.fpv4spd16" valueType="enumerated"/>
<targetPlatform archList="all" binaryParser="org.eclipse.cdt.core.ELF" id="ilg.gnuarmeclipse.managedbuild.cross.targetPlatform.1798638225" isAbstract="false" osList="all" superClass="ilg.gnuarmeclipse.managedbuild.cross.targetPlatform"/>
<builder buildPath="${workspace_loc:/${ProjName}/Debug" cleanBuildTarget="clean2" id="ilg.gnuarmeclipse.managedbuild.cross.builder.1736709688" keepEnvironmentInBuildfile="false" managedBuildOn="true" name="Gnu Make Builder" parallelBuildOn="true" parallelizationNumber="optimal" superClass="ilg.gnuarmeclipse.managedbuild.cross.builder"/>
<tool commandLinePattern="${COMMAND} ${FLAGS} -c ${OUTPUT_FLAG} ${OUTPUT_PREFIX}${OUTPUT} ${INPUTS}" id="ilg.gnuarmeclipse.managedbuild.cross.tool.assembler.1810966071" name="GNU ARM Cross Assembler" superClass="ilg.gnuarmeclipse.managedbuild.cross.tool.assembler">
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.assembler.usepreprocessor.1072524326" name="Use preprocessor" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.assembler.usepreprocessor" useByScannerDiscovery="false" value="true" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.assembler.include.paths.161242639" name="Include paths (-I)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.assembler.include.paths" useByScannerDiscovery="true"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.assembler.defs.1521934876" name="Defined symbols (-D)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.assembler.defs" useByScannerDiscovery="true"/>
<option IS_BUILTIN_EMPTY="false" IS_VALUE_EMPTY="false" id="ilg.gnuarmeclipse.managedbuild.cross.option.assembler.flags.1325367962" name="Assembler flags" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.assembler.flags" useByScannerDiscovery="false" valueType="stringList">
<listOptionValue builtIn="false" value="-mimplicit-it=thumb"/>
</option>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.assembler.other.647856572" name="Other assembler flags" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.assembler.other" useByScannerDiscovery="false" value="a_misc_flag" valueType="string"/>
<inputType id="ilg.gnuarmeclipse.managedbuild.cross.tool.assembler.input.1843333483" superClass="ilg.gnuarmeclipse.managedbuild.cross.tool.assembler.input"/>
</tool>
<tool commandLinePattern="${COMMAND} ${FLAGS} -c ${OUTPUT_FLAG} ${OUTPUT_PREFIX}${OUTPUT} ${INPUTS}" id="ilg.gnuarmeclipse.managedbuild.cross.tool.c.compiler.1570350559" name="GNU ARM Cross C Compiler" superClass="ilg.gnuarmeclipse.managedbuild.cross.tool.c.compiler">
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.c.compiler.include.paths.634882052" name="Include paths (-I)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.c.compiler.include.paths" useByScannerDiscovery="true"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.c.compiler.defs.100549972" name="Defined symbols (-D)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.c.compiler.defs" useByScannerDiscovery="true"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.c.compiler.other.2133065240" name="Other compiler flags" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.c.compiler.other" useByScannerDiscovery="true" value="c_misc_flag" valueType="string"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.c.compiler.include.files.714348818" name="Include files (-include)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.c.compiler.include.files" useByScannerDiscovery="true"/>
<inputType id="ilg.gnuarmeclipse.managedbuild.cross.tool.c.compiler.input.992053063" superClass="ilg.gnuarmeclipse.managedbuild.cross.tool.c.compiler.input"/>
</tool>
<tool commandLinePattern="${COMMAND} ${FLAGS} ${OUTPUT_FLAG} ${OUTPUT_PREFIX}${OUTPUT} ${INPUTS}" id="ilg.gnuarmeclipse.managedbuild.cross.tool.c.linker.869072473" name="Cross ARM C Linker" superClass="ilg.gnuarmeclipse.managedbuild.cross.tool.c.linker">
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.gcsections.1167322178" name="Remove unused sections (-Xlinker --gc-sections)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.gcsections" useByScannerDiscovery="false" value="true" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.nostart.351692886" name="Do not use standard start files (-nostartfiles)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.nostart" useByScannerDiscovery="false" value="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.nostdlibs.1009243715" name="No startup or default libs (-nostdlib)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.nostdlibs" useByScannerDiscovery="false" value="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.nodeflibs.2016026082" name="Do not use default libraries (-nodefaultlibs)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.nodeflibs" useByScannerDiscovery="false" value="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.usenewlibnano.923990336" name="Use newlib-nano (--specs=nano.specs)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.usenewlibnano" useByScannerDiscovery="false" value="false" valueType="boolean"/>
<option defaultValue="true" id="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.shared.548869459" name="Shared (-shared)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.shared" useByScannerDiscovery="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.scriptfile.1818777301" name="Script files (-T)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.scriptfile" useByScannerDiscovery="false"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.libs.1135656995" name="Libraries (-l)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.libs" useByScannerDiscovery="false"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.paths.36884122" name="Library search path (-L)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.paths" useByScannerDiscovery="false"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.other.396049466" name="Other linker flags" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.other" useByScannerDiscovery="false" value="c_link_misc_flag" valueType="string"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.cref.1645737861" name="Cross reference (-Xlinker --cref)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.c.linker.cref" useByScannerDiscovery="false" value="true" valueType="boolean"/>
<inputType id="ilg.gnuarmeclipse.managedbuild.cross.tool.c.linker.input.334732222" superClass="ilg.gnuarmeclipse.managedbuild.cross.tool.c.linker.input">
<additionalInput kind="additionalinputdependency" paths="$(USER_OBJS)"/>
<additionalInput kind="additionalinput" paths="$(LIBS)"/>
</inputType>
</tool>
<tool commandLinePattern="${COMMAND} ${FLAGS} ${OUTPUT_FLAG} ${OUTPUT_PREFIX}${OUTPUT} ${INPUTS}" id="ilg.gnuarmeclipse.managedbuild.cross.tool.cpp.linker.1601059928" name="GNU ARM Cross C++ Linker" superClass="ilg.gnuarmeclipse.managedbuild.cross.tool.cpp.linker">
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.gcsections.437759352" name="Remove unused sections (-Xlinker --gc-sections)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.gcsections" useByScannerDiscovery="false" value="true" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.scriptfile.1101974459" name="Script files (-T)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.scriptfile" useByScannerDiscovery="false"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.cref.2007675975" name="Cross reference (-Xlinker --cref)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.cref" useByScannerDiscovery="false" value="true" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.usenewlibnano.2105838438" name="Use newlib-nano (--specs=nano.specs)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.usenewlibnano" useByScannerDiscovery="false" value="true" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.libs.934137837" name="Libraries (-l)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.libs" useByScannerDiscovery="false"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.nostart.2118356996" name="Do not use standard start files (-nostartfiles)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.nostart" useByScannerDiscovery="false" value="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.nodeflibs.1427884346" name="Do not use default libraries (-nodefaultlibs)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.nodeflibs" useByScannerDiscovery="false" value="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.nostdlibs.1433863653" name="No startup or default libs (-nostdlib)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.nostdlibs" useByScannerDiscovery="false" value="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.printgcsections.1387745410" name="Print removed sections (-Xlinker --print-gc-sections)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.printgcsections" useByScannerDiscovery="false" value="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.strip.1230158061" name="Omit all symbol information (-s)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.strip" useByScannerDiscovery="false" value="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.printmap.1307581821" name="Print link map (-Xlinker --print-map)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.printmap" useByScannerDiscovery="false" value="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.useprintffloat.960778920" name="Use float with nano printf (-u _printf_float)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.useprintffloat" useByScannerDiscovery="false" value="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.usescanffloat.637205035" name="Use float with nano scanf (-u _scanf_float)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.usescanffloat" useByScannerDiscovery="false" value="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.usenewlibnosys.1948314201" name="Do not use syscalls (--specs=nosys.specs)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.usenewlibnosys" useByScannerDiscovery="false" value="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.verbose.273162112" name="Verbose (-v)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.verbose" useByScannerDiscovery="false" value="false" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.paths.1399535143" name="Library search path (-L)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.paths" useByScannerDiscovery="false"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.other.882307902" name="Other linker flags" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.linker.other" useByScannerDiscovery="false" value="cpp_link_misc_flag" valueType="string"/>
<inputType id="ilg.gnuarmeclipse.managedbuild.cross.tool.cpp.linker.input.262373798" superClass="ilg.gnuarmeclipse.managedbuild.cross.tool.cpp.linker.input">
<additionalInput kind="additionalinputdependency" paths="$(USER_OBJS)"/>
<additionalInput kind="additionalinput" paths="$(LIBS)"/>
</inputType>
</tool>
<tool id="ilg.gnuarmeclipse.managedbuild.cross.tool.archiver.506412204" name="GNU ARM Cross Archiver" superClass="ilg.gnuarmeclipse.managedbuild.cross.tool.archiver"/>
<tool id="ilg.gnuarmeclipse.managedbuild.cross.tool.createflash.1461589245" name="GNU ARM Cross Create Flash Image" superClass="ilg.gnuarmeclipse.managedbuild.cross.tool.createflash">
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.createflash.choice.1937707052" name="Output file format (-O)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.createflash.choice" useByScannerDiscovery="false" value="ilg.gnuarmeclipse.managedbuild.cross.option.createflash.choice.binary" valueType="enumerated"/>
</tool>
<tool id="ilg.gnuarmeclipse.managedbuild.cross.tool.createlisting.82359725" name="GNU ARM Cross Create Listing" superClass="ilg.gnuarmeclipse.managedbuild.cross.tool.createlisting">
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.createlisting.source.601724476" name="Display source (--source|-S)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.createlisting.source" value="true" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.createlisting.allheaders.692505279" name="Display all headers (--all-headers|-x)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.createlisting.allheaders" value="true" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.createlisting.demangle.97345172" name="Demangle names (--demangle|-C)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.createlisting.demangle" value="true" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.createlisting.linenumbers.1342893377" name="Display line numbers (--line-numbers|-l)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.createlisting.linenumbers" value="true" valueType="boolean"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.createlisting.wide.1533725981" name="Wide lines (--wide|-w)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.createlisting.wide" value="true" valueType="boolean"/>
</tool>
<tool id="ilg.gnuarmeclipse.managedbuild.cross.tool.printsize.1073550295" name="GNU ARM Cross Print Size" superClass="ilg.gnuarmeclipse.managedbuild.cross.tool.printsize">
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.printsize.format.946451386" name="Size format" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.printsize.format" useByScannerDiscovery="false"/>
</tool>
<tool commandLinePattern="${COMMAND} ${FLAGS} -c ${OUTPUT_FLAG} ${OUTPUT_PREFIX}${OUTPUT} ${INPUTS}" id="ilg.gnuarmeclipse.managedbuild.cross.tool.cpp.compiler.1302177015" name="GNU ARM Cross C++ Compiler" superClass="ilg.gnuarmeclipse.managedbuild.cross.tool.cpp.compiler">
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.compiler.defs.704468062" name="Defined symbols (-D)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.compiler.defs" useByScannerDiscovery="true"/>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.compiler.include.paths.302877723" name="Include paths (-I)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.compiler.include.paths" useByScannerDiscovery="true"/>
<option IS_BUILTIN_EMPTY="false" IS_VALUE_EMPTY="false" id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.compiler.include.files.343249373" name="Include files (-include)" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.compiler.include.files" useByScannerDiscovery="true" valueType="includeFiles">
<listOptionValue builtIn="false" value=""${workspace_loc:/${ProjName}/rtconfig_preinc.h}""/>
</option>
<option id="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.compiler.other.465079095" name="Other compiler flags" superClass="ilg.gnuarmeclipse.managedbuild.cross.option.cpp.compiler.other" useByScannerDiscovery="true" value="cpp_misc_flag" valueType="string"/>
<inputType id="ilg.gnuarmeclipse.managedbuild.cross.tool.cpp.compiler.input.45918001" superClass="ilg.gnuarmeclipse.managedbuild.cross.tool.cpp.compiler.input"/>
</tool>
</toolChain>
</folderInfo>
<sourceEntries>
<entry excluding="|" flags="VALUE_WORKSPACE_PATH|RESOLVED" kind="sourcePath" name=""/>
</sourceEntries>
</configuration>
</storageModule>
<storageModule moduleId="org.eclipse.cdt.core.externalSettings"/>
</cconfiguration>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
<project id="qemu-vexpress-a9.ilg.gnuarmeclipse.managedbuild.cross.target.elf.860020518" name="Executable" projectType="ilg.gnuarmeclipse.managedbuild.cross.target.elf"/>
</storageModule>
<storageModule moduleId="scannerConfiguration">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId=""/>
<scannerConfigBuildInfo instanceId="ilg.gnuarmeclipse.managedbuild.cross.config.elf.debug.553091094;ilg.gnuarmeclipse.managedbuild.cross.config.elf.debug.553091094.;ilg.gnuarmeclipse.managedbuild.cross.tool.c.compiler.1570350559;ilg.gnuarmeclipse.managedbuild.cross.tool.c.compiler.input.992053063">
<autodiscovery enabled="true" problemReportingEnabled="true" selectedProfileId=""/>
</scannerConfigBuildInfo>
</storageModule>
<storageModule moduleId="org.eclipse.cdt.core.LanguageSettingsProviders"/>
<storageModule moduleId="refreshScope" versionNumber="2">
<configuration configurationName="Debug">
<resource resourceType="PROJECT" workspacePath="/f429_tmp"/>
</configuration>
</storageModule>
<storageModule moduleId="org.eclipse.cdt.make.core.buildtargets"/>
<storageModule moduleId="org.eclipse.cdt.internal.ui.text.commentOwnerProjectMappings">
<doc-comment-owner id="org.eclipse.cdt.ui.doxygen">
<path value=""/>
</doc-comment-owner>
</storageModule>
</cproject>"""
project_temp = """<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>__project_name_flag__</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.cdt.managedbuilder.core.genmakebuilder</name>
<triggers>clean,full,incremental,</triggers>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder</name>
<triggers>full,incremental,</triggers>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.cdt.core.cnature</nature>
<nature>org.rt-thread.studio.rttnature</nature>
<nature>org.eclipse.cdt.managedbuilder.core.managedBuildNature</nature>
<nature>org.eclipse.cdt.managedbuilder.core.ScannerConfigNature</nature>
</natures>
</projectDescription>"""
projcfg_ini_temp = """#RT-Thread Studio Project Configuration
#Sat Jan 16 15:18:32 CST 2021
project_type=rtt
chip_name=${chip_name}
cpu_name=None
target_freq=
clock_source=
dvendor_name=
rx_pin_name=
rtt_path=
source_freq=
csp_path=
sub_series_name=
selected_rtt_version=latest
cfg_version=v3.0
tool_chain=gcc
uart_name=
tx_pin_name=
rtt_nano_path=
output_project_path=
hardware_adapter=J-Link
project_name=${project_name}"""
eclipse_core_runtime_temp = """content-types/enabled=true
content-types/org.eclipse.cdt.core.asmSource/file-extensions=s
eclipse.preferences.version=1"""
makefile_targets_temp = """clean2:
\t-$(RM) $(CC_DEPS)$(C++_DEPS)$(C_UPPER_DEPS)$(CXX_DEPS)$(SECONDARY_FLASH)$(SECONDARY_SIZE)$(ASM_DEPS)$(S_UPPER_DEPS)$(C_DEPS)$(CPP_DEPS)
\t-$(RM) $(OBJS) *.elf
\t-@echo ' '
*.elf: $(wildcard ../linkscripts/*/*.lds) $(wildcard ../linkscripts/*/*/*.lds)"""
def get_mcu_info(uvproj_file_path):
if os.path.exists(uvproj_file_path):
with open(uvproj_file_path, mode='r') as f:
data = f.read()
result = re.search("<Device>(.*)</Device>", data)
if result:
return result.group(1)
else:
return "unknown"
else:
return "unknown"
def gen_makefile_targets(output_file_path):
try:
w_str = makefile_targets_temp
dir_name = os.path.dirname(output_file_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(output_file_path, 'w') as f:
f.write(w_str)
return True
except Exception as e:
print(e)
return False
def gen_org_eclipse_core_runtime_prefs(output_file_path):
try:
w_str = eclipse_core_runtime_temp
dir_name = os.path.dirname(output_file_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(output_file_path, 'w') as f:
f.write(w_str)
return True
except Exception as e:
print(e)
return False
def gen_cproject_file(output_file_path):
template_file_path = os.path.join(os.path.dirname(output_file_path), "template.cproject")
if os.path.exists(template_file_path):
try:
shutil.copy(template_file_path, output_file_path)
except Exception as e:
print(e)
return True
else:
CFLAGS = rtconfig.CFLAGS
AFLAGS = rtconfig.AFLAGS
LFLAGS = rtconfig.LFLAGS
if 'CXXFLAGS' in dir(rtconfig):
CXXFLAGS = rtconfig.CXXFLAGS
else:
CXXFLAGS = ""
if "-T" in LFLAGS:
items = str(LFLAGS).split()
t_index = items.index("-T")
items[t_index] = ""
items[t_index + 1] = ""
LFLAGS = " ".join(items)
try:
w_str = cproject_temp
if "a_misc_flag" in w_str:
w_str = w_str.replace("a_misc_flag", AFLAGS)
if "c_misc_flag" in w_str:
w_str = w_str.replace("c_misc_flag", CFLAGS)
if "cpp_misc_flag" in w_str:
w_str = w_str.replace("cpp_misc_flag", CXXFLAGS)
if "c_link_misc_flag" in w_str:
w_str = w_str.replace("c_link_misc_flag", LFLAGS)
if "cpp_link_misc_flag" in w_str:
w_str = w_str.replace("cpp_link_misc_flag", LFLAGS)
dir_name = os.path.dirname(output_file_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(output_file_path, 'w') as f:
f.write(w_str)
return True
except Exception as e:
return False
def gen_project_file(output_file_path):
try:
w_str = project_temp
dir_name = os.path.dirname(output_file_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(output_file_path, 'w') as f:
f.write(w_str)
return True
except Exception as e:
return False
def gen_projcfg_ini_file(chip_name, project_name, output_file_path):
try:
projcfg_file_tmp = Template(projcfg_ini_temp)
w_str = projcfg_file_tmp.substitute(project_name=project_name,
chip_name=(chip_name))
dir_name = os.path.dirname(output_file_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(output_file_path, 'w') as f:
f.write(w_str)
return True
except Exception as e:
return False
| apache-2.0 | 448,295,796,708,379,500 | 3,715,553,524,788,498,000 | 96.609687 | 495 | 0.757042 | false |
rosudrag/Freemium-winner | VirtualEnvironment/Lib/site-packages/pip-7.1.0-py3.4.egg/pip/locations.py | 59 | 6362 | """Locations where we look for configs, install stuff, etc"""
from __future__ import absolute_import
import getpass
import os
import os.path
import site
import sys
from distutils import sysconfig
from distutils.command.install import install, SCHEME_KEYS # noqa
from pip.compat import WINDOWS
from pip.utils import appdirs
# CA Bundle Locations
CA_BUNDLE_PATHS = [
# Debian/Ubuntu/Gentoo etc.
"/etc/ssl/certs/ca-certificates.crt",
# Fedora/RHEL
"/etc/pki/tls/certs/ca-bundle.crt",
# OpenSUSE
"/etc/ssl/ca-bundle.pem",
# OpenBSD
"/etc/ssl/cert.pem",
# FreeBSD/DragonFly
"/usr/local/share/certs/ca-root-nss.crt",
# Homebrew on OSX
"/usr/local/etc/openssl/cert.pem",
]
# Attempt to locate a CA Bundle that we can pass into requests, we have a list
# of possible ones from various systems. If we cannot find one then we'll set
# this to None so that we default to whatever requests is setup to handle.
#
# Note to Downstream: If you wish to disable this autodetection and simply use
# whatever requests does (likely you've already patched
# requests.certs.where()) then simply edit this line so
# that it reads ``CA_BUNDLE_PATH = None``.
CA_BUNDLE_PATH = next((x for x in CA_BUNDLE_PATHS if os.path.exists(x)), None)
# Application Directories
USER_CACHE_DIR = appdirs.user_cache_dir("pip")
DELETE_MARKER_MESSAGE = '''\
This file is placed here by pip to indicate the source was put
here by pip.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
'''
PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
def write_delete_marker_file(directory):
"""
Write the pip delete marker file into this directory.
"""
filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
with open(filepath, 'w') as marker_fp:
marker_fp.write(DELETE_MARKER_MESSAGE)
def running_under_virtualenv():
"""
Return True if we're running inside a virtualenv, False otherwise.
"""
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
return False
def virtualenv_no_global():
"""
Return True if in a venv and no system site packages.
"""
# this mirrors the logic in virtualenv.py for locating the
# no-global-site-packages.txt file
site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')
if running_under_virtualenv() and os.path.isfile(no_global_file):
return True
def __get_username():
""" Returns the effective username of the current process. """
if WINDOWS:
return getpass.getuser()
import pwd
return pwd.getpwuid(os.geteuid()).pw_name
if running_under_virtualenv():
src_prefix = os.path.join(sys.prefix, 'src')
else:
# FIXME: keep src in cwd for now (it is not a temporary folder)
try:
src_prefix = os.path.join(os.getcwd(), 'src')
except OSError:
# In case the current working directory has been renamed or deleted
sys.exit(
"The folder you are executing pip from can no longer be found."
)
# under Mac OS X + virtualenv sys.prefix is not properly resolved
# it is something like /path/to/python/bin/..
# Note: using realpath due to tmp dirs on OSX being symlinks
src_prefix = os.path.abspath(src_prefix)
# FIXME doesn't account for venv linked to global site-packages
site_packages = sysconfig.get_python_lib()
user_site = site.USER_SITE
user_dir = os.path.expanduser('~')
if WINDOWS:
bin_py = os.path.join(sys.prefix, 'Scripts')
bin_user = os.path.join(user_site, 'Scripts')
# buildout uses 'bin' on Windows too?
if not os.path.exists(bin_py):
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.ini'
legacy_storage_dir = os.path.join(user_dir, 'pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
else:
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.conf'
legacy_storage_dir = os.path.join(user_dir, '.pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
# Forcing to use /usr/local/bin for standard Mac OS X framework installs
# Also log to ~/Library/Logs/ for use with the Console.app log viewer
if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':
bin_py = '/usr/local/bin'
site_config_files = [
os.path.join(path, config_basename)
for path in appdirs.site_config_dirs('pip')
]
def distutils_scheme(dist_name, user=False, home=None, root=None,
isolated=False):
"""
Return a distutils install scheme
"""
from distutils.dist import Distribution
scheme = {}
if isolated:
extra_dist_args = {"script_args": ["--no-user-cfg"]}
else:
extra_dist_args = {}
dist_args = {'name': dist_name}
dist_args.update(extra_dist_args)
d = Distribution(dist_args)
d.parse_config_files()
i = d.get_command_obj('install', create=True)
# NOTE: setting user or home has the side-effect of creating the home dir
# or user base for installations during finalize_options()
# ideally, we'd prefer a scheme class that has no side-effects.
i.user = user or i.user
if user:
i.prefix = ""
i.home = home or i.home
i.root = root or i.root
i.finalize_options()
for key in SCHEME_KEYS:
scheme[key] = getattr(i, 'install_' + key)
if i.install_lib is not None:
# install_lib takes precedence over purelib and platlib
scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
if running_under_virtualenv():
scheme['headers'] = os.path.join(
sys.prefix,
'include',
'site',
'python' + sys.version[:3],
dist_name,
)
if root is not None:
scheme["headers"] = os.path.join(
root,
os.path.abspath(scheme["headers"])[1:],
)
return scheme
| mit | -428,240,074,871,780,540 | -7,492,360,554,614,116,000 | 28.590698 | 78 | 0.645237 | false |
dfranco/shinken | test/test_db_mysql.py | 19 | 1856 | #!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
try:
from shinken.db_mysql import DBMysql
except ImportError:
# Oups this server do not have mysql installed, skip this test
DBMysql = None
class TestConfig(ShinkenTest):
# setUp is inherited from ShinkenTest
def create_db(self):
self.db = DBMysql(host='localhost', user='root', password='root', database='merlin', character_set='utf8')
def test_connect_database(self):
if not DBMysql:
return
self.create_db()
try:
self.db.connect_database()
except Exception: # arg, no database here? sic!
pass
def test_execute_query(self):
if not DBMysql:
return
self.create_db()
try:
self.db.connect_database()
q = "DELETE FROM service WHERE instance_id = '0'"
self.db.execute_query(q)
except Exception:
pass
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | 4,690,696,538,929,323,000 | -5,008,938,127,483,577,000 | 28.935484 | 114 | 0.663793 | false |
lmEshoo/st2contrib | packs/dripstat/sensors/dripstat_alert_sensor.py | 12 | 2855 | import eventlet
import requests
from datetime import datetime
from st2reactor.sensor.base import PollingSensor
__all_ = [
'DripstatAlertSensor'
]
BASE_URL = 'https://api.dripstat.com/api/v1'
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=True,
time=True)
class DripstatAlertSensor(PollingSensor):
def __init__(self, sensor_service, config=None, poll_interval=30):
super(DripstatAlertSensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._trigger_ref = 'dripstat.alert'
self._log = self._sensor_service.get_logger(__name__)
def setup(self):
self._api_key = self._config['api_key']
self._applications = self._api_request(endpoint='/apps')
def poll(self):
for application in self._applications:
params = {'appId': application['id']}
alerts = self._api_request(endpoint='/activeAlerts', params=params)
for alert in alerts:
last_alert_timestamp = self._get_last_alert_timestamp(application['name'])
epoch = int(alert['startedAt']) / 1000
if epoch > last_alert_timestamp:
self._set_last_alert_timestamp(application['name'], epoch)
self._dispatch_trigger_for_alert(application=application['name'], alert=alert,
epoch=epoch)
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _api_request(self, endpoint, params={}):
url = BASE_URL + endpoint
default_params = {'clientId': self._api_key}
params.update(default_params)
response = requests.get(url, params=params)
return response.json()
def _dispatch_trigger_for_alert(self, application, alert, epoch):
trigger = self._trigger_ref
payload = {
'app_name': application,
'alert_type': alert['name'],
'started_at': epoch,
'started_at_iso8601': datetime.fromtimestamp(epoch).isoformat(),
'jvm_host': alert['jvmHost']
}
self._sensor_service.dispatch(trigger=trigger, payload=payload)
def _get_last_alert_timestamp(self, app):
last_alert_timestamp = self._sensor_service.get_value("%s.last_alert_timestamp" % app)
if last_alert_timestamp:
return int(last_alert_timestamp)
else:
return 0
def _set_last_alert_timestamp(self, app, timestamp):
self._sensor_service.set_value(name='%s.last_alert_timestamp' % app, value=str(timestamp))
| apache-2.0 | -65,196,950,188,027,336 | 7,089,836,343,868,483,000 | 32.988095 | 98 | 0.587391 | false |
agaveapi/SC17-container-tutorial | content/images/jupyter/examples/setvars.py | 1 | 2421 | # Here we define some utility commands to simplify interaction with the shell.
# You don't need to read or understand this, but it's here in case you want to.
import re
import os
def repvar(v):
"""
repvar() is short for "Replace Variables." The idea is that this
function looks for strings of the form $VAR or ${VAR} or even
$(CMD) in the input string and replaces them, either with
the contents of os.environ[VAR] or os.pipe(CMD), mimicking the
behavior of bash. If a backslace precedes the $, then the backslash
will be removed but the string will not be evaluated. Thus:
${HOME} becomes "/home/user"
$HOME becomes "/home/usr"
$(echo Hello) becomes "Hello"
\$HOME becomes $HOME
"""
epos = 0
buf = ''
for g in re.finditer(r'\$((\w+)|\{([^}]*)\}|\(([^())]*)\))|(\\+\$)',v):
if g:
i = 2
while g.group(i) == None:
i += 1
p = g.start(0)
buf += v[epos:p]
epos = p + len(g.group(0))
if i == 4:
fh = os.popen(g.group(i),"r")
c = repvar(fh.read())
fh.close()
elif i == 5:
c = '$'
else:
if not g.group(i) in os.environ:
raise Exception("no such environment variable: "+g.group(i))
c = repvar(os.environ[g.group(i)])
buf += c
else:
break
buf += v[epos:]
return buf.strip()
def setvar(e):
"""
setvar() emulates the ability of BASH to set environment variables.
Thus, NAME=VALUE will set os.environ["NAME"]="VALUE". Bash-style
comments will be stripped, and bash-line continuations will be processed.
"""
e = re.sub(r'#[^\r\n]*','',e)
e = re.sub(r'\\\n\s*','',e)
for m in re.finditer(r'(?m)(\w+)=(.*)',e):
k = m.group(1)
v = repvar(m.group(2))
print(k+"="+v)
os.environ[k]=v
def readfile(f):
"""
Reads in a file. repvar() will be applied to the file name.
"""
n = repvar(f)
print("Reading file `"+n+"'")
fh = open(n)
c = fh.read()
fh.close()
return c
def writefile(f,c):
"""
Writes out a file. repvar() will be applied both to the file name
and the file contents.
"""
n = repvar(f)
print("Writing file `"+n+"'")
fh = open(n,"w")
fh.write(repvar(c))
fh.close()
| bsd-3-clause | 8,702,782,141,510,393,000 | -3,238,516,749,568,402,400 | 31.28 | 80 | 0.523337 | false |
jaredweiss/nupic | tests/integration/nupic/algorithms/temporal_memory_performance_test.py | 9 | 4753 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import time
import unittest
import numpy
from nupic.data.generators.pattern_machine import PatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
from nupic.research.temporal_memory import TemporalMemory
from nupic.research.TP import TP
from nupic.research.TP10X2 import TP10X2
# ==============================
# Tests
# ==============================
class TemporalMemoryPerformanceTest(unittest.TestCase):
def setUp(self):
self.tm = TemporalMemory(columnDimensions=[2048],
cellsPerColumn=32,
initialPermanence=0.5,
connectedPermanence=0.8,
minThreshold=10,
maxNewSynapseCount=12,
permanenceIncrement=0.1,
permanenceDecrement=0.05,
activationThreshold=15)
self.tp = TP(numberOfCols=2048,
cellsPerColumn=32,
initialPerm=0.5,
connectedPerm=0.8,
minThreshold=10,
newSynapseCount=12,
permanenceInc=0.1,
permanenceDec=0.05,
activationThreshold=15,
globalDecay=0, burnIn=1,
checkSynapseConsistency=False,
pamLength=1)
self.tp10x2 = TP10X2(numberOfCols=2048,
cellsPerColumn=32,
initialPerm=0.5,
connectedPerm=0.8,
minThreshold=10,
newSynapseCount=12,
permanenceInc=0.1,
permanenceDec=0.05,
activationThreshold=15,
globalDecay=0, burnIn=1,
checkSynapseConsistency=False,
pamLength=1)
self.patternMachine = PatternMachine(2048, 40, num=100)
self.sequenceMachine = SequenceMachine(self.patternMachine)
def testSingleSequence(self):
print "Test: Single sequence"
sequence = self.sequenceMachine.generateFromNumbers(range(50))
times = self._feedAll(sequence)
self.assertTrue(times[0] < times[1])
self.assertTrue(times[2] < times[1])
self.assertTrue(times[2] < times[0])
# ==============================
# Helper functions
# ==============================
def _feedAll(self, sequence, learn=True, num=1):
repeatedSequence = sequence * num
times = []
def tmComputeFn(pattern, instance):
instance.compute(pattern, learn)
def tpComputeFn(pattern, instance):
array = self._patternToNumpyArray(pattern)
instance.compute(array, enableLearn=learn, computeInfOutput=True)
elapsed = self._feedOne(repeatedSequence, self.tm, tmComputeFn)
times.append(elapsed)
print "TM:\t{0}s".format(elapsed)
elapsed = self._feedOne(repeatedSequence, self.tp, tpComputeFn)
times.append(elapsed)
print "TP:\t{0}s".format(elapsed)
elapsed = self._feedOne(repeatedSequence, self.tp10x2, tpComputeFn)
times.append(elapsed)
print "TP10X2:\t{0}s".format(elapsed)
return times
@staticmethod
def _feedOne(sequence, instance, computeFn):
start = time.clock()
for pattern in sequence:
if pattern == None:
instance.reset()
else:
computeFn(pattern, instance)
elapsed = time.clock() - start
return elapsed
@staticmethod
def _patternToNumpyArray(pattern):
array = numpy.zeros(2048, dtype='int32')
array[list(pattern)] = 1
return array
# ==============================
# Main
# ==============================
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | 7,763,459,353,349,979,000 | -7,057,086,492,469,743,000 | 30.269737 | 72 | 0.578161 | false |
ltilve/chromium | tools/telemetry/telemetry/core/browser_finder.py | 3 | 5942 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds browsers that can be controlled by telemetry."""
import logging
import operator
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.core.backends.chrome import cros_browser_finder
from telemetry.core.backends.chrome import desktop_browser_finder
from telemetry.core.backends.chrome import ios_browser_finder
from telemetry.core.backends.remote import trybot_browser_finder
from telemetry.core.backends.webdriver import webdriver_desktop_browser_finder
from telemetry.core import browser_finder_exceptions
from telemetry.core import device_finder
from telemetry import decorators
BROWSER_FINDERS = [
desktop_browser_finder,
android_browser_finder,
cros_browser_finder,
ios_browser_finder,
trybot_browser_finder,
webdriver_desktop_browser_finder,
]
def FindAllBrowserTypes(options):
return reduce(operator.add,
[bf.FindAllBrowserTypes(options) for bf in BROWSER_FINDERS])
@decorators.Cache
def FindBrowser(options):
"""Finds the best PossibleBrowser object given a BrowserOptions object.
Args:
A BrowserOptions object.
Returns:
A PossibleBrowser object.
Raises:
BrowserFinderException: Options improperly set, or an error occurred.
"""
if options.browser_type == 'exact' and options.browser_executable == None:
raise browser_finder_exceptions.BrowserFinderException(
'--browser=exact requires --browser-executable to be set.')
if options.browser_type != 'exact' and options.browser_executable != None:
raise browser_finder_exceptions.BrowserFinderException(
'--browser-executable requires --browser=exact.')
if options.browser_type == 'cros-chrome' and options.cros_remote == None:
raise browser_finder_exceptions.BrowserFinderException(
'browser_type=cros-chrome requires cros_remote be set.')
if (options.browser_type != 'cros-chrome' and
options.browser_type != 'cros-chrome-guest' and
options.cros_remote != None):
raise browser_finder_exceptions.BrowserFinderException(
'--remote requires --browser=cros-chrome or cros-chrome-guest.')
devices = device_finder.GetDevicesMatchingOptions(options)
browsers = []
default_browsers = []
for device in devices:
for finder in BROWSER_FINDERS:
if(options.browser_type and options.browser_type != 'any' and
options.browser_type not in finder.FindAllBrowserTypes(options)):
continue
curr_browsers = finder.FindAllAvailableBrowsers(options, device)
new_default_browser = finder.SelectDefaultBrowser(curr_browsers)
if new_default_browser:
default_browsers.append(new_default_browser)
browsers.extend(curr_browsers)
if options.browser_type == None:
if default_browsers:
default_browser = sorted(default_browsers,
key=lambda b: b.last_modification_time())[-1]
logging.warning('--browser omitted. Using most recent local build: %s' %
default_browser.browser_type)
default_browser.UpdateExecutableIfNeeded()
return default_browser
if len(browsers) == 1:
logging.warning('--browser omitted. Using only available browser: %s' %
browsers[0].browser_type)
browsers[0].UpdateExecutableIfNeeded()
return browsers[0]
raise browser_finder_exceptions.BrowserTypeRequiredException(
'--browser must be specified. Available browsers:\n%s' %
'\n'.join(sorted(set([b.browser_type for b in browsers]))))
if options.browser_type == 'any':
types = FindAllBrowserTypes(options)
def CompareBrowsersOnTypePriority(x, y):
x_idx = types.index(x.browser_type)
y_idx = types.index(y.browser_type)
return x_idx - y_idx
browsers.sort(CompareBrowsersOnTypePriority)
if len(browsers) >= 1:
browsers[0].UpdateExecutableIfNeeded()
return browsers[0]
else:
return None
matching_browsers = [b for b in browsers
if b.browser_type == options.browser_type and b.SupportsOptions(options)]
chosen_browser = None
if len(matching_browsers) == 1:
chosen_browser = matching_browsers[0]
elif len(matching_browsers) > 1:
logging.warning('Multiple browsers of the same type found: %s' % (
repr(matching_browsers)))
chosen_browser = sorted(matching_browsers,
key=lambda b: b.last_modification_time())[-1]
if chosen_browser:
logging.info('Chose browser: %s' % (repr(chosen_browser)))
chosen_browser.UpdateExecutableIfNeeded()
return chosen_browser
@decorators.Cache
def GetAllAvailableBrowsers(options, device):
"""Returns a list of available browsers on the device.
Args:
options: A BrowserOptions object.
device: The target device, which can be None.
Returns:
A list of browser instances.
Raises:
BrowserFinderException: Options are improperly set, or an error occurred.
"""
if not device:
return []
possible_browsers = []
for browser_finder in BROWSER_FINDERS:
possible_browsers.extend(
browser_finder.FindAllAvailableBrowsers(options, device))
return possible_browsers
@decorators.Cache
def GetAllAvailableBrowserTypes(options):
"""Returns a list of available browser types.
Args:
options: A BrowserOptions object.
Returns:
A list of browser type strings.
Raises:
BrowserFinderException: Options are improperly set, or an error occurred.
"""
devices = device_finder.GetDevicesMatchingOptions(options)
possible_browsers = []
for device in devices:
possible_browsers.extend(GetAllAvailableBrowsers(options, device))
type_list = set([browser.browser_type for browser in possible_browsers])
type_list = list(type_list)
type_list.sort()
return type_list
| bsd-3-clause | 1,524,279,948,425,038,800 | -745,429,777,875,846,800 | 33.346821 | 79 | 0.715921 | false |
GitHublong/hue | desktop/core/ext-py/Django-1.6.10/django/utils/importlib.py | 105 | 1384 | # Taken from Python 2.7 with permission from/by the original author.
import sys
from django.utils import six
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in range(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
if six.PY3:
from importlib import import_module
else:
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
| apache-2.0 | 6,276,170,284,156,545,000 | 8,887,496,523,728,815,000 | 32.756098 | 82 | 0.58237 | false |
CTSRD-SOAAP/chromium-42.0.2311.135 | build/android/gyp/javac.py | 2 | 8917 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import optparse
import os
import shutil
import re
import sys
import textwrap
from util import build_utils
from util import md5_check
import jar
sys.path.append(build_utils.COLORAMA_ROOT)
import colorama
def ColorJavacOutput(output):
fileline_prefix = r'(?P<fileline>(?P<file>[-.\w/\\]+.java):(?P<line>[0-9]+):)'
warning_re = re.compile(
fileline_prefix + r'(?P<full_message> warning: (?P<message>.*))$')
error_re = re.compile(
fileline_prefix + r'(?P<full_message> (?P<message>.*))$')
marker_re = re.compile(r'\s*(?P<marker>\^)\s*$')
warning_color = ['full_message', colorama.Fore.YELLOW + colorama.Style.DIM]
error_color = ['full_message', colorama.Fore.MAGENTA + colorama.Style.BRIGHT]
marker_color = ['marker', colorama.Fore.BLUE + colorama.Style.BRIGHT]
def Colorize(line, regex, color):
match = regex.match(line)
start = match.start(color[0])
end = match.end(color[0])
return (line[:start]
+ color[1] + line[start:end]
+ colorama.Fore.RESET + colorama.Style.RESET_ALL
+ line[end:])
def ApplyColor(line):
if warning_re.match(line):
line = Colorize(line, warning_re, warning_color)
elif error_re.match(line):
line = Colorize(line, error_re, error_color)
elif marker_re.match(line):
line = Colorize(line, marker_re, marker_color)
return line
return '\n'.join(map(ApplyColor, output.split('\n')))
def DoJavac(
classpath, classes_dir, chromium_code, java_files):
"""Runs javac.
Builds |java_files| with the provided |classpath| and puts the generated
.class files into |classes_dir|. If |chromium_code| is true, extra lint
checking will be enabled.
"""
jar_inputs = []
for path in classpath:
if os.path.exists(path + '.TOC'):
jar_inputs.append(path + '.TOC')
else:
jar_inputs.append(path)
javac_args = [
'-g',
# Chromium only allows UTF8 source files. Being explicit avoids
# javac pulling a default encoding from the user's environment.
'-encoding', 'UTF-8',
'-source', '1.7',
'-target', '1.7',
'-classpath', ':'.join(classpath),
'-d', classes_dir]
if chromium_code:
javac_args.extend(['-Xlint:unchecked', '-Xlint:deprecation'])
else:
# XDignore.symbol.file makes javac compile against rt.jar instead of
# ct.sym. This means that using a java internal package/class will not
# trigger a compile warning or error.
javac_args.extend(['-XDignore.symbol.file'])
javac_cmd = ['javac'] + javac_args + java_files
def Compile():
build_utils.CheckOutput(
javac_cmd,
print_stdout=chromium_code,
stderr_filter=ColorJavacOutput)
record_path = os.path.join(classes_dir, 'javac.md5.stamp')
md5_check.CallAndRecordIfStale(
Compile,
record_path=record_path,
input_paths=java_files + jar_inputs,
input_strings=javac_cmd)
_MAX_MANIFEST_LINE_LEN = 72
def CreateManifest(manifest_path, classpath, main_class=None,
manifest_entries=None):
"""Creates a manifest file with the given parameters.
This generates a manifest file that compiles with the spec found at
http://docs.oracle.com/javase/7/docs/technotes/guides/jar/jar.html#JAR_Manifest
Args:
manifest_path: The path to the manifest file that should be created.
classpath: The JAR files that should be listed on the manifest file's
classpath.
main_class: If present, the class containing the main() function.
manifest_entries: If present, a list of (key, value) pairs to add to
the manifest.
"""
output = ['Manifest-Version: 1.0']
if main_class:
output.append('Main-Class: %s' % main_class)
if manifest_entries:
for k, v in manifest_entries:
output.append('%s: %s' % (k, v))
if classpath:
sanitized_paths = []
for path in classpath:
sanitized_paths.append(os.path.basename(path.strip('"')))
output.append('Class-Path: %s' % ' '.join(sanitized_paths))
output.append('Created-By: ')
output.append('')
wrapper = textwrap.TextWrapper(break_long_words=True,
drop_whitespace=False,
subsequent_indent=' ',
width=_MAX_MANIFEST_LINE_LEN - 2)
output = '\r\n'.join(w for l in output for w in wrapper.wrap(l))
with open(manifest_path, 'w') as f:
f.write(output)
def main(argv):
colorama.init()
argv = build_utils.ExpandFileArgs(argv)
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option(
'--src-gendirs',
help='Directories containing generated java files.')
parser.add_option(
'--java-srcjars',
action='append',
default=[],
help='List of srcjars to include in compilation.')
parser.add_option(
'--classpath',
action='append',
help='Classpath for javac. If this is specified multiple times, they '
'will all be appended to construct the classpath.')
parser.add_option(
'--javac-includes',
help='A list of file patterns. If provided, only java files that match'
'one of the patterns will be compiled.')
parser.add_option(
'--jar-excluded-classes',
default='',
help='List of .class file patterns to exclude from the jar.')
parser.add_option(
'--chromium-code',
type='int',
help='Whether code being compiled should be built with stricter '
'warnings for chromium code.')
parser.add_option(
'--classes-dir',
help='Directory for compiled .class files.')
parser.add_option('--jar-path', help='Jar output path.')
parser.add_option(
'--main-class',
help='The class containing the main method.')
parser.add_option(
'--manifest-entry',
action='append',
help='Key:value pairs to add to the .jar manifest.')
parser.add_option('--stamp', help='Path to touch on success.')
options, args = parser.parse_args(argv)
if options.main_class and not options.jar_path:
parser.error('--main-class requires --jar-path')
classpath = []
for arg in options.classpath:
classpath += build_utils.ParseGypList(arg)
java_srcjars = []
for arg in options.java_srcjars:
java_srcjars += build_utils.ParseGypList(arg)
java_files = args
if options.src_gendirs:
src_gendirs = build_utils.ParseGypList(options.src_gendirs)
java_files += build_utils.FindInDirectories(src_gendirs, '*.java')
input_files = classpath + java_srcjars + java_files
with build_utils.TempDir() as temp_dir:
classes_dir = os.path.join(temp_dir, 'classes')
os.makedirs(classes_dir)
if java_srcjars:
java_dir = os.path.join(temp_dir, 'java')
os.makedirs(java_dir)
for srcjar in java_srcjars:
build_utils.ExtractAll(srcjar, path=java_dir, pattern='*.java')
java_files += build_utils.FindInDirectory(java_dir, '*.java')
if options.javac_includes:
javac_includes = build_utils.ParseGypList(options.javac_includes)
filtered_java_files = []
for f in java_files:
for include in javac_includes:
if fnmatch.fnmatch(f, include):
filtered_java_files.append(f)
break
java_files = filtered_java_files
DoJavac(
classpath,
classes_dir,
options.chromium_code,
java_files)
if options.jar_path:
if options.main_class or options.manifest_entry:
if options.manifest_entry:
entries = map(lambda e: e.split(":"), options.manifest_entry)
else:
entries = []
manifest_file = os.path.join(temp_dir, 'manifest')
CreateManifest(manifest_file, classpath, options.main_class, entries)
else:
manifest_file = None
jar.JarDirectory(classes_dir,
build_utils.ParseGypList(options.jar_excluded_classes),
options.jar_path,
manifest_file=manifest_file)
if options.classes_dir:
# Delete the old classes directory. This ensures that all .class files in
# the output are actually from the input .java files. For example, if a
# .java file is deleted or an inner class is removed, the classes
# directory should not contain the corresponding old .class file after
# running this action.
build_utils.DeleteDirectory(options.classes_dir)
shutil.copytree(classes_dir, options.classes_dir)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
input_files + build_utils.GetPythonDependencies())
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | -2,459,847,862,707,260,400 | 3,152,819,765,344,665,600 | 30.846429 | 81 | 0.647527 | false |
heromod/migrid | mig/shared/functionality/migadmin.py | 1 | 14406 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# migadmin - admin control panel with daemon status monitor
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""MiG administrators page with daemon status and configuration"""
import os
import subprocess
import shared.returnvalues as returnvalues
from shared.certreq import build_certreqitem_object, list_cert_reqs, \
get_cert_req, delete_cert_req, accept_cert_req
from shared.defaults import default_pager_entries
from shared.fileio import send_message_to_grid_script
from shared.findtype import is_admin
from shared.functional import validate_input_and_cert
from shared.html import html_post_helper, themed_styles
from shared.init import initialize_main_variables, find_entry
grid_actions = {'reloadconfig': 'RELOADCONFIG',
'showqueued': 'JOBQUEUEINFO',
'showexecuting': 'EXECUTINGQUEUEINFO',
'showdone': 'DONEQUEUEINFO',
'dropqueued': 'DROPQUEUED',
'dropexecuting': 'DROPEXECUTING',
'dropdone': 'DROPDONE',
}
certreq_actions = ['addcertreq', 'delcertreq']
def signature():
"""Signature of the main function"""
defaults = {'action': [''], 'req_id': [], 'job_id': [], 'lines': [20]}
return ['html_form', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
action = accepted['action'][-1]
req_list = accepted['req_id']
job_list = accepted['job_id']
lines = int(accepted['lines'][-1])
meta = '''<meta http-equiv="refresh" content="%s" />
''' % configuration.sleep_secs
style = themed_styles(configuration)
script = '''
<script type="text/javascript" src="/images/js/jquery.js"></script>
<script type="text/javascript" src="/images/js/jquery.tablesorter.js"></script>
<script type="text/javascript" src="/images/js/jquery.tablesorter.pager.js">
</script>
<script type="text/javascript" src="/images/js/jquery.tablesorter.widgets.js"></script>
<script type="text/javascript" src="/images/js/jquery-ui.js"></script>
<script type="text/javascript" src="/images/js/jquery.confirm.js"></script>
<script type="text/javascript" >
$(document).ready(function() {
// init confirmation dialog
$( "#confirm_dialog" ).dialog(
// see http://jqueryui.com/docs/dialog/ for options
{ autoOpen: false,
modal: true, closeOnEscape: true,
width: 500,
buttons: {
"Cancel": function() { $( "#" + name ).dialog("close"); }
}
});
// table initially sorted by col. 9 (created)
var sortOrder = [[9,0]];
$("#certreqtable").tablesorter({widgets: ["zebra", "saveSort"],
sortList:sortOrder
})
.tablesorterPager({ container: $("#pager"),
size: %s
});
}
);
</script>
''' % default_pager_entries
title_entry = find_entry(output_objects, 'title')
title_entry['text'] = '%s administration panel' % configuration.short_title
title_entry['meta'] = meta
title_entry['style'] = style
title_entry['javascript'] = script
output_objects.append({'object_type': 'html_form',
'text':'''
<div id="confirm_dialog" title="Confirm" style="background:#fff;">
<div id="confirm_text"><!-- filled by js --></div>
<textarea cols="40" rows="4" id="confirm_input"
style="display:none;"></textarea>
</div>
''' })
if not is_admin(client_id, configuration, logger):
output_objects.append(
{'object_type': 'error_text', 'text'
: 'You must be an admin to access this control panel.'})
return (output_objects, returnvalues.CLIENT_ERROR)
html = ''
if action and not action in grid_actions.keys() + certreq_actions:
output_objects.append({'object_type': 'error_text', 'text'
: 'Invalid action: %s' % action})
return (output_objects, returnvalues.SYSTEM_ERROR)
if action in grid_actions:
msg = "%s" % grid_actions[action]
if job_list:
msg += ' %s' % ' '.join(job_list)
msg += '\n'
if not send_message_to_grid_script(msg, logger, configuration):
output_objects.append(
{'object_type': 'error_text', 'text'
: '''Error sending %s message to grid_script.''' % action
})
status = returnvalues.SYSTEM_ERROR
elif action in certreq_actions:
if action == "addcertreq":
for req_id in req_list:
if accept_cert_req(req_id, configuration):
output_objects.append(
{'object_type': 'text', 'text':
'Accepted certificate request %s' % req_id})
else:
output_objects.append(
{'object_type': 'error_text', 'text':
'Accept certificate request failed - details in log'
})
elif action == "delcertreq":
for req_id in req_list:
if delete_cert_req(req_id, configuration):
output_objects.append(
{'object_type': 'text', 'text':
'Deleted certificate request %s' % req_id})
else:
output_objects.append(
{'object_type': 'error_text', 'text':
'Delete certificate request failed - details in log'
})
show, drop = '', ''
general = """
<h1>Server Status</h1>
<p class='importanttext'>
This page automatically refreshes every %s seconds.
</p>
<p>
You can see the current grid daemon status and server logs below. The buttons
provide access to e.g. managing the grid job queues.
</p>
<form method='get' action='migadmin.py'>
<input type='hidden' name='action' value='' />
<input type='submit' value='Show last log lines' />
<input type='text' size='2' name='lines' value='%s' />
</form>
<br />
<form method='get' action='migadmin.py'>
<input type='hidden' name='lines' value='%s' />
<input type='hidden' name='action' value='reloadconfig' />
<input type='submit' value='Reload Configuration' />
</form>
<br />
""" % (configuration.sleep_secs, lines, lines)
show += """
<form method='get' action='migadmin.py'>
<input type='hidden' name='lines' value='%s' />
<input type='submit' value='Log Jobs' />
<select name='action'>
""" % lines
drop += """
<form method='get' action='migadmin.py'>
<input type='hidden' name='lines' value='%s' />
<input type='submit' value='Drop Job' />
<select name='action'>
""" % lines
for queue in ['queued', 'executing', 'done']:
selected = ''
if action.find(queue) != -1:
selected = 'selected'
show += "<option %s value='show%s'>%s</option>" % (selected, queue,
queue)
drop += "<option %s value='drop%s'>%s</option>" % (selected, queue,
queue)
show += """
</select>
</form>
<br />
"""
drop += """
</select>
<input type='text' size='20' name='job_id' value='' />
</form>
<br />
"""
html += general
html += show
html += drop
daemons = """
<div id='daemonstatus'>
"""
daemon_names = ['grid_script.py', 'grid_monitor.py', 'grid_sshmux.py']
# No need to run im_notify unless any im notify protocols are enabled
if [i for i in configuration.notify_protocols if i != 'email']:
daemon_names.append('grid_imnotify.py')
if configuration.site_enable_sftp:
daemon_names.append('grid_sftp.py')
if configuration.site_enable_davs:
daemon_names.append('grid_webdavs.py')
if configuration.site_enable_ftps:
daemon_names.append('grid_ftps.py')
if configuration.site_enable_openid:
daemon_names.append('grid_openid.py')
for proc in daemon_names:
pgrep_proc = subprocess.Popen(['pgrep', '-f', proc],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pgrep_proc.wait()
ps_out = pgrep_proc.stdout.read().strip()
if pgrep_proc.returncode == 0:
daemons += "<div class='status_online'>%s running (pid %s)</div>" \
% (proc, ps_out)
else:
daemons += "<div class='status_offline'>%s not running!</div>" % \
proc
daemons += """</div>
<br />
"""
html += daemons
output_objects.append({'object_type': 'header', 'text'
: 'Pending Certificate Requests'})
(status, ret) = list_cert_reqs(configuration)
if not status:
logger.error("%s: failed for '%s': %s" % (op_name,
client_id, ret))
output_objects.append({'object_type': 'error_text', 'text'
: ret})
return (output_objects, returnvalues.SYSTEM_ERROR)
certreqs = []
for req_id in ret:
(load_status, req_dict) = get_cert_req(req_id, configuration)
if not load_status:
logger.error("%s: load failed for '%s': %s" % \
(op_name, req_id, req_dict))
output_objects.append({'object_type': 'error_text', 'text'
: 'Could not read details for "%s"' % \
req_id})
return (output_objects, returnvalues.SYSTEM_ERROR)
req_item = build_certreqitem_object(configuration, req_dict)
js_name = 'create%s' % req_id
helper = html_post_helper(js_name, 'migadmin.py',
{'action': 'addcertreq', 'req_id': req_id})
output_objects.append({'object_type': 'html_form', 'text': helper})
req_item['addcertreqlink'] = {
'object_type': 'link', 'destination':
"javascript: confirmDialog(%s, '%s');" % \
(js_name, 'Really accept %s?' % req_id),
'class': 'addlink', 'title': 'Accept %s' % req_id, 'text': ''}
js_name = 'delete%s' % req_id
helper = html_post_helper(js_name, 'migadmin.py',
{'action': 'delcertreq', 'req_id': req_id})
output_objects.append({'object_type': 'html_form', 'text': helper})
req_item['delcertreqlink'] = {
'object_type': 'link', 'destination':
"javascript: confirmDialog(%s, '%s');" % \
(js_name, 'Really remove %s?' % req_id),
'class': 'removelink', 'title': 'Remove %s' % req_id, 'text': ''}
certreqs.append(req_item)
output_objects.append({'object_type': 'table_pager', 'entry_name':
'pending certificate requests',
'default_entries': default_pager_entries})
output_objects.append({'object_type': 'certreqs',
'certreqs': certreqs})
log_path_list = []
if os.path.isabs(configuration.logfile):
log_path_list.append(configuration.logfile)
else:
log_path_list.append(os.path.join(configuration.log_dir,
configuration.logfile))
for log_path in log_path_list:
html += '''
<h1>%s</h1>
<textarea rows=%s cols=200 readonly="readonly">
''' % (log_path, lines)
try:
logger.debug("loading %d lines from %s" % (lines, log_path))
log_fd = open(log_path, 'r')
log_fd.seek(0, os.SEEK_END)
size = log_fd.tell()
pos = log_fd.tell()
log_lines = []
step_size = 100
# locate last X lines
while pos > 0 and len(log_lines) < lines:
offset = min(lines * step_size, size)
logger.debug("seek to offset %d from end of %s" % (offset,
log_path))
log_fd.seek(-offset, os.SEEK_END)
pos = log_fd.tell()
log_lines = log_fd.readlines()
step_size *= 2
logger.debug("reading %d lines from %s" % (lines, log_path))
html += ''.join(log_lines[-lines:])
log_fd.close()
except Exception, exc:
logger.error("reading %d lines from %s: %s" % (lines, log_path,
exc))
output_objects.append({'object_type': 'error_text', 'text'
: 'Error reading log (%s)' % exc})
return (output_objects, returnvalues.SYSTEM_ERROR)
html += '''</textarea>
'''
output_objects.append({'object_type': 'html_form', 'text'
: html})
return (output_objects, returnvalues.OK)
| gpl-2.0 | -3,987,997,243,724,843,500 | -3,767,373,464,236,254,700 | 38.253406 | 87 | 0.543801 | false |
DucQuang1/youtube-dl | youtube_dl/extractor/youku.py | 49 | 7678 | # coding: utf-8
from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..utils import ExtractorError
from ..compat import (
compat_urllib_parse,
compat_ord,
compat_urllib_request,
)
class YoukuIE(InfoExtractor):
IE_NAME = 'youku'
IE_DESC = '优酷'
_VALID_URL = r'''(?x)
(?:
http://(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|
youku:)
(?P<id>[A-Za-z0-9]+)(?:\.html|/v\.swf|)
'''
_TESTS = [{
'url': 'http://v.youku.com/v_show/id_XMTc1ODE5Njcy.html',
'md5': '5f3af4192eabacc4501508d54a8cabd7',
'info_dict': {
'id': 'XMTc1ODE5Njcy_part1',
'title': '★Smile﹗♡ Git Fresh -Booty Music舞蹈.',
'ext': 'flv'
}
}, {
'url': 'http://player.youku.com/player.php/sid/XNDgyMDQ2NTQw/v.swf',
'only_matching': True,
}, {
'url': 'http://v.youku.com/v_show/id_XODgxNjg1Mzk2_ev_1.html',
'info_dict': {
'id': 'XODgxNjg1Mzk2',
'title': '武媚娘传奇 85',
},
'playlist_count': 11,
}, {
'url': 'http://v.youku.com/v_show/id_XMTI1OTczNDM5Mg==.html',
'info_dict': {
'id': 'XMTI1OTczNDM5Mg',
'title': '花千骨 04',
},
'playlist_count': 13,
'skip': 'Available in China only',
}]
def construct_video_urls(self, data1, data2):
# get sid, token
def yk_t(s1, s2):
ls = list(range(256))
t = 0
for i in range(256):
t = (t + ls[i] + compat_ord(s1[i % len(s1)])) % 256
ls[i], ls[t] = ls[t], ls[i]
s = bytearray()
x, y = 0, 0
for i in range(len(s2)):
y = (y + 1) % 256
x = (x + ls[y]) % 256
ls[x], ls[y] = ls[y], ls[x]
s.append(compat_ord(s2[i]) ^ ls[(ls[x] + ls[y]) % 256])
return bytes(s)
sid, token = yk_t(
b'becaf9be', base64.b64decode(data2['ep'].encode('ascii'))
).decode('ascii').split('_')
# get oip
oip = data2['ip']
# get fileid
string_ls = list(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890')
shuffled_string_ls = []
seed = data1['seed']
N = len(string_ls)
for ii in range(N):
seed = (seed * 0xd3 + 0x754f) % 0x10000
idx = seed * len(string_ls) // 0x10000
shuffled_string_ls.append(string_ls[idx])
del string_ls[idx]
fileid_dict = {}
for format in data1['streamtypes']:
streamfileid = [
int(i) for i in data1['streamfileids'][format].strip('*').split('*')]
fileid = ''.join(
[shuffled_string_ls[i] for i in streamfileid])
fileid_dict[format] = fileid[:8] + '%s' + fileid[10:]
def get_fileid(format, n):
fileid = fileid_dict[format] % hex(int(n))[2:].upper().zfill(2)
return fileid
# get ep
def generate_ep(format, n):
fileid = get_fileid(format, n)
ep_t = yk_t(
b'bf7e5f01',
('%s_%s_%s' % (sid, fileid, token)).encode('ascii')
)
ep = base64.b64encode(ep_t).decode('ascii')
return ep
# generate video_urls
video_urls_dict = {}
for format in data1['streamtypes']:
video_urls = []
for dt in data1['segs'][format]:
n = str(int(dt['no']))
param = {
'K': dt['k'],
'hd': self.get_hd(format),
'myp': 0,
'ts': dt['seconds'],
'ypp': 0,
'ctype': 12,
'ev': 1,
'token': token,
'oip': oip,
'ep': generate_ep(format, n)
}
video_url = \
'http://k.youku.com/player/getFlvPath/' + \
'sid/' + sid + \
'_' + str(int(n) + 1).zfill(2) + \
'/st/' + self.parse_ext_l(format) + \
'/fileid/' + get_fileid(format, n) + '?' + \
compat_urllib_parse.urlencode(param)
video_urls.append(video_url)
video_urls_dict[format] = video_urls
return video_urls_dict
def get_hd(self, fm):
hd_id_dict = {
'flv': '0',
'mp4': '1',
'hd2': '2',
'hd3': '3',
'3gp': '0',
'3gphd': '1'
}
return hd_id_dict[fm]
def parse_ext_l(self, fm):
ext_dict = {
'flv': 'flv',
'mp4': 'mp4',
'hd2': 'flv',
'hd3': 'flv',
'3gp': 'flv',
'3gphd': 'mp4'
}
return ext_dict[fm]
def get_format_name(self, fm):
_dict = {
'3gp': 'h6',
'3gphd': 'h5',
'flv': 'h4',
'mp4': 'h3',
'hd2': 'h2',
'hd3': 'h1'
}
return _dict[fm]
def _real_extract(self, url):
video_id = self._match_id(url)
def retrieve_data(req_url, note):
req = compat_urllib_request.Request(req_url)
cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
if cn_verification_proxy:
req.add_header('Ytdl-request-proxy', cn_verification_proxy)
raw_data = self._download_json(req, video_id, note=note)
return raw_data['data'][0]
# request basic data
data1 = retrieve_data(
'http://v.youku.com/player/getPlayList/VideoIDS/%s' % video_id,
'Downloading JSON metadata 1')
data2 = retrieve_data(
'http://v.youku.com/player/getPlayList/VideoIDS/%s/Pf/4/ctype/12/ev/1' % video_id,
'Downloading JSON metadata 2')
error_code = data1.get('error_code')
if error_code:
error = data1.get('error')
if error is not None and '因版权原因无法观看此视频' in error:
raise ExtractorError(
'Youku said: Sorry, this video is available in China only', expected=True)
else:
msg = 'Youku server reported error %i' % error_code
if error is not None:
msg += ': ' + error
raise ExtractorError(msg)
title = data1['title']
# generate video_urls_dict
video_urls_dict = self.construct_video_urls(data1, data2)
# construct info
entries = [{
'id': '%s_part%d' % (video_id, i + 1),
'title': title,
'formats': [],
# some formats are not available for all parts, we have to detect
# which one has all
} for i in range(max(len(v) for v in data1['segs'].values()))]
for fm in data1['streamtypes']:
video_urls = video_urls_dict[fm]
for video_url, seg, entry in zip(video_urls, data1['segs'][fm], entries):
entry['formats'].append({
'url': video_url,
'format_id': self.get_format_name(fm),
'ext': self.parse_ext_l(fm),
'filesize': int(seg['size']),
})
return {
'_type': 'multi_video',
'id': video_id,
'title': title,
'entries': entries,
}
| unlicense | 6,010,049,894,195,143,000 | -1,673,757,147,842,040,800 | 31.305085 | 94 | 0.452387 | false |
AsimmHirani/ISpyPi | tensorflow/contrib/tensorflow-master/tensorflow/tensorboard/lib/python/http_util_test.py | 21 | 6517 | # -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests HTTP utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import gzip
import six
from werkzeug import test as wtest
from werkzeug import wrappers
from tensorflow.python.platform import test
from tensorflow.tensorboard.lib.python import http_util
class RespondTest(test.TestCase):
def testHelloWorld(self):
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, '<b>hello world</b>', 'text/html')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.response[0], six.b('<b>hello world</b>'))
def testHeadRequest_doesNotWrite(self):
builder = wtest.EnvironBuilder(method='HEAD')
env = builder.get_environ()
request = wrappers.Request(env)
r = http_util.Respond(request, '<b>hello world</b>', 'text/html')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.response[0], six.b(''))
def testPlainText_appendsUtf8ToContentType(self):
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, 'hello', 'text/plain')
h = r.headers
self.assertEqual(h.get('Content-Type'), 'text/plain; charset=utf-8')
def testContentLength_isInBytes(self):
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, '爱', 'text/plain')
self.assertEqual(r.headers.get('Content-Length'), '3')
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, '爱'.encode('utf-8'), 'text/plain')
self.assertEqual(r.headers.get('Content-Length'), '3')
def testResponseCharsetTranscoding(self):
bean = '要依法治国是赞美那些谁是公义的和惩罚恶人。 - 韩非'
# input is unicode string, output is gbk string
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, bean, 'text/plain; charset=gbk')
self.assertEqual(r.response[0], bean.encode('gbk'))
# input is utf-8 string, output is gbk string
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, bean.encode('utf-8'), 'text/plain; charset=gbk')
self.assertEqual(r.response[0], bean.encode('gbk'))
# input is object with unicode strings, output is gbk json
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, {'red': bean}, 'application/json; charset=gbk')
self.assertEqual(r.response[0], b'{"red": "' + bean.encode('gbk') + b'"}')
# input is object with utf-8 strings, output is gbk json
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(
q, {'red': bean.encode('utf-8')}, 'application/json; charset=gbk')
self.assertEqual(r.response[0], b'{"red": "' + bean.encode('gbk') + b'"}')
# input is object with gbk strings, output is gbk json
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(
q, {'red': bean.encode('gbk')},
'application/json; charset=gbk',
encoding='gbk')
self.assertEqual(r.response[0], b'{"red": "' + bean.encode('gbk') + b'"}')
def testAcceptGzip_compressesResponse(self):
fall_of_hyperion_canto1_stanza1 = '\n'.join([
'Fanatics have their dreams, wherewith they weave',
'A paradise for a sect; the savage too',
'From forth the loftiest fashion of his sleep',
'Guesses at Heaven; pity these have not',
'Trac\'d upon vellum or wild Indian leaf',
'The shadows of melodious utterance.',
'But bare of laurel they live, dream, and die;',
'For Poesy alone can tell her dreams,',
'With the fine spell of words alone can save',
'Imagination from the sable charm',
'And dumb enchantment. Who alive can say,',
'\'Thou art no Poet may\'st not tell thy dreams?\'',
'Since every man whose soul is not a clod',
'Hath visions, and would speak, if he had loved',
'And been well nurtured in his mother tongue.',
'Whether the dream now purpos\'d to rehearse',
'Be poet\'s or fanatic\'s will be known',
'When this warm scribe my hand is in the grave.',
])
e1 = wtest.EnvironBuilder(headers={'Accept-Encoding': '*'}).get_environ()
any_encoding = wrappers.Request(e1)
r = http_util.Respond(
any_encoding, fall_of_hyperion_canto1_stanza1, 'text/plain')
self.assertEqual(r.headers.get('Content-Encoding'), 'gzip')
self.assertEqual(
_gunzip(r.response[0]), fall_of_hyperion_canto1_stanza1.encode('utf-8'))
e2 = wtest.EnvironBuilder(headers={'Accept-Encoding': 'gzip'}).get_environ()
gzip_encoding = wrappers.Request(e2)
r = http_util.Respond(
gzip_encoding, fall_of_hyperion_canto1_stanza1, 'text/plain')
self.assertEqual(r.headers.get('Content-Encoding'), 'gzip')
self.assertEqual(
_gunzip(r.response[0]), fall_of_hyperion_canto1_stanza1.encode('utf-8'))
r = http_util.Respond(
any_encoding, fall_of_hyperion_canto1_stanza1, 'image/png')
self.assertEqual(
r.response[0], fall_of_hyperion_canto1_stanza1.encode('utf-8'))
def testJson_getsAutoSerialized(self):
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, [1, 2, 3], 'application/json')
self.assertEqual(r.response[0], b'[1, 2, 3]')
def testExpires_setsCruiseControl(self):
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, '<b>hello world</b>', 'text/html', expires=60)
self.assertEqual(r.headers.get('Cache-Control'), 'private, max-age=60')
def _gunzip(bs):
return gzip.GzipFile('', 'rb', 9, six.BytesIO(bs)).read()
if __name__ == '__main__':
test.main()
| apache-2.0 | -7,569,588,846,147,872,000 | -8,558,554,457,962,095,000 | 40.455128 | 80 | 0.662904 | false |
MadCat34/Sick-Beard | lib/requests/status_codes.py | 56 | 3029 | # -*- coding: utf-8 -*-
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('resume_incomplete', 'resume'),
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
}
codes = LookupDict(name='status_codes')
for (code, titles) in list(_codes.items()):
for title in titles:
setattr(codes, title, code)
if not title.startswith('\\'):
setattr(codes, title.upper(), code)
| gpl-3.0 | -3,069,077,825,971,343,000 | -8,493,272,386,036,985,000 | 34.22093 | 89 | 0.583691 | false |
gurneyalex/OpenUpgrade | addons/report_intrastat/report_intrastat.py | 39 | 5615 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.sql import drop_view_if_exists
from openerp.addons.decimal_precision import decimal_precision as dp
class res_country(osv.osv):
_name = 'res.country'
_inherit = 'res.country'
_columns = {
'intrastat': fields.boolean('Intrastat member'),
}
_defaults = {
'intrastat': lambda *a: False,
}
class report_intrastat_code(osv.osv):
_name = "report.intrastat.code"
_description = "Intrastat code"
_columns = {
'name': fields.char('Intrastat Code', size=16),
'description': fields.char('Description', size=64),
}
class product_template(osv.osv):
_name = "product.template"
_inherit = "product.template"
_columns = {
'intrastat_id': fields.many2one('report.intrastat.code', 'Intrastat code'),
}
class report_intrastat(osv.osv):
_name = "report.intrastat"
_description = "Intrastat report"
_auto = False
_columns = {
'name': fields.char('Year',size=64,required=False, readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month',readonly=True),
'supply_units':fields.float('Supply Units', readonly=True),
'ref':fields.char('Source document',size=64, readonly=True),
'code': fields.char('Country code', size=2, readonly=True),
'intrastat_id': fields.many2one('report.intrastat.code', 'Intrastat code', readonly=True),
'weight': fields.float('Weight', readonly=True),
'value': fields.float('Value', readonly=True, digits_compute=dp.get_precision('Account')),
'type': fields.selection([('import', 'Import'), ('export', 'Export')], 'Type'),
'currency_id': fields.many2one('res.currency', "Currency", readonly=True),
}
def init(self, cr):
drop_view_if_exists(cr, 'report_intrastat')
cr.execute("""
create or replace view report_intrastat as (
select
to_char(inv.create_date, 'YYYY') as name,
to_char(inv.create_date, 'MM') as month,
min(inv_line.id) as id,
intrastat.id as intrastat_id,
upper(inv_country.code) as code,
sum(case when inv_line.price_unit is not null
then inv_line.price_unit * inv_line.quantity
else 0
end) as value,
sum(
case when uom.category_id != puom.category_id then (pt.weight_net * inv_line.quantity)
else (pt.weight_net * inv_line.quantity * uom.factor) end
) as weight,
sum(
case when uom.category_id != puom.category_id then inv_line.quantity
else (inv_line.quantity * uom.factor) end
) as supply_units,
inv.currency_id as currency_id,
inv.number as ref,
case when inv.type in ('out_invoice','in_refund')
then 'export'
else 'import'
end as type
from
account_invoice inv
left join account_invoice_line inv_line on inv_line.invoice_id=inv.id
left join (product_template pt
left join product_product pp on (pp.product_tmpl_id = pt.id))
on (inv_line.product_id = pp.id)
left join product_uom uom on uom.id=inv_line.uos_id
left join product_uom puom on puom.id = pt.uom_id
left join report_intrastat_code intrastat on pt.intrastat_id = intrastat.id
left join (res_partner inv_address
left join res_country inv_country on (inv_country.id = inv_address.country_id))
on (inv_address.id = inv.partner_id)
where
inv.state in ('open','paid')
and inv_line.product_id is not null
and inv_country.intrastat=true
group by to_char(inv.create_date, 'YYYY'), to_char(inv.create_date, 'MM'),intrastat.id,inv.type,pt.intrastat_id, inv_country.code,inv.number, inv.currency_id
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -2,139,317,874,646,911,500 | -718,959,603,225,556,700 | 44.650407 | 174 | 0.552093 | false |
Tilo15/PhotoFiddle2 | PF2/Tools/HueEqualiser.py | 1 | 5526 | import cv2
import numpy
import Tool
class HueEqualiser(Tool.Tool):
def on_init(self):
self.id = "hueequaliser"
self.name = "Hue Equaliser"
self.icon_path = "ui/PF2_Icons/HueEqualiser.png"
self.properties = [
Tool.Property("header", "Hue Equaliser", "Header", None, has_toggle=False, has_button=False),
Tool.Property("bleed", "Hue Bleed", "Slider", 0.5, max=2.0, min=0.01),
Tool.Property("neighbour_bleed", "Neighbour Bleed", "Slider", 0.25, max=2.0, min=0.0),
# Red
Tool.Property("header_red", "Red", "Header", None, has_toggle=False, has_button=False),
Tool.Property("red_value", "Value", "Slider", 0, max=50, min=-50),
Tool.Property("red_saturation", "Saturation", "Slider", 0, max=50, min=-50),
# Yellow
Tool.Property("header_yellow", "Yellow", "Header", None, has_toggle=False, has_button=False),
Tool.Property("yellow_value", "Value", "Slider", 0, max=50, min=-50),
Tool.Property("yellow_saturation", "Saturation", "Slider", 0, max=50, min=-50),
# Green
Tool.Property("header_green", "Green", "Header", None, has_toggle=False, has_button=False),
Tool.Property("green_value", "Value", "Slider", 0, max=50, min=-50),
Tool.Property("green_saturation", "Saturation", "Slider", 0, max=50, min=-50),
# Cyan
Tool.Property("header_cyan", "Cyan", "Header", None, has_toggle=False, has_button=False),
Tool.Property("cyan_value", "Value", "Slider", 0, max=50, min=-50),
Tool.Property("cyan_saturation", "Saturation", "Slider", 0, max=50, min=-50),
# Blue
Tool.Property("header_blue", "Blue", "Header", None, has_toggle=False, has_button=False),
Tool.Property("blue_value", "Value", "Slider", 0, max=50, min=-50),
Tool.Property("blue_saturation", "Saturation", "Slider", 0, max=50, min=-50),
# Violet
Tool.Property("header_violet", "Violet", "Header", None, has_toggle=False, has_button=False),
Tool.Property("violet_value", "Value", "Slider", 0, max=50, min=-50),
Tool.Property("violet_saturation", "Saturation", "Slider", 0, max=50, min=-50),
]
def on_update(self, image):
hues = {
"red": 0,
"yellow": 60,
"green": 120,
"cyan": 180,
"blue": 240,
"violet": 300,
"_red": 360,
}
out = image
if(not self.is_default()):
bleed = self.props["bleed"].get_value()
neighbour_bleed = self.props["neighbour_bleed"].get_value()
out = out.astype(numpy.float32)
# Convert to HSV colorspace
out = cv2.cvtColor(out, cv2.COLOR_BGR2HSV)
# Bits per pixel
bpp = float(str(image.dtype).replace("uint", "").replace("float", ""))
# Pixel value range
np = float(2 ** bpp - 1)
imhue = out[0:, 0:, 0]
imsat = out[0:, 0:, 1]
imval = out[0:, 0:, 2]
for hue in hues:
hsat = self.props["%s_saturation" % hue.replace('_', '')].get_value()
hval = self.props["%s_value" % hue.replace('_', '')].get_value()
isHue = self._is_hue(imhue, hues[hue], (3.5/bleed))
isHue = self._neighbour_bleed(isHue, neighbour_bleed)
imsat = imsat + ((hsat / 10000) * 255) * isHue
imval = imval + ((hval / 1000) * np) * isHue
# Clip any values out of bounds
imval[imval < 0.0] = 0.0
imval[imval > np] = np
imsat[imsat < 0.0] = 0.0
imsat[imsat > 1.0] = 1.0
out[0:, 0:, 1] = imsat
out[0:, 0:, 2] = imval
# Convert back to BGR colorspace
out = cv2.cvtColor(out, cv2.COLOR_HSV2BGR)
out = out.astype(image.dtype)
return out
def _is_hue(self, image, hue_value, bleed_value = 3.5):
mif = hue_value - 30
mir = hue_value + 30
if (mir > 360):
mir = 360
if (mif < 0):
mif = 0
bleed = float(360 / bleed_value)
icopy = image.copy()
print(bleed, mif, mir)
if(mif != 0):
icopy[icopy < mif - bleed] = 0.0
icopy[icopy > mir + bleed] = 0.0
icopy[(icopy < mif) * (icopy != 0.0)] = (((mif - (icopy[(icopy < mif) * (icopy != 0.0)]))/360.0) / (bleed/360.0)) * -1 + 1
icopy[(icopy > mir) * (icopy != 0.0)] = ((((icopy[(icopy > mir) * (icopy != 0.0)]) - mir)/360.0) / (bleed/360.0)) * -1 + 1
icopy[(icopy >= mif) * (icopy <= mir)] = 1.0
if(mif == 0):
icopy[icopy > mir + bleed] = 0.0
icopy[(icopy > mir) * (icopy != 0.0)] = ((((icopy[(icopy > mir) * (icopy != 0.0)]) - mir) / 360.0) / (bleed/360.0)) * -1 + 1
return icopy
def _neighbour_bleed(self, map, bleed):
strength = bleed*30
if (strength > 0):
height, width = map.shape[:2]
size = (height * width)
mul = numpy.math.sqrt(size) / 1064.416 # numpy.math.sqrt(1132982.0)
map = map*255
blur_size = abs(2 * round((round(strength * mul) + 1) / 2) - 1)
im = cv2.blur(map, (int(blur_size), int(blur_size)))
return im/255.0
return map | gpl-3.0 | 7,670,447,236,447,794,000 | 6,945,207,761,187,526,000 | 35.361842 | 136 | 0.500181 | false |
OCA/sale-workflow | sale_product_set/wizard/product_set_add.py | 1 | 3428 | # Copyright 2015 Anybox S.A.S
# Copyright 2016-2018 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import models, fields, api, exceptions, _
import odoo.addons.decimal_precision as dp
class ProductSetAdd(models.TransientModel):
_name = 'product.set.add'
_rec_name = 'product_set_id'
_description = "Wizard model to add product set into a quotation"
order_id = fields.Many2one(
'sale.order', 'Sale Order', required=True,
default=lambda self: self.env.context.get('active_id'),
ondelete='cascade'
)
partner_id = fields.Many2one(
related='order_id.partner_id',
ondelete='cascade'
)
product_set_id = fields.Many2one(
'product.set', 'Product set',
required=True,
ondelete='cascade'
)
quantity = fields.Float(
digits=dp.get_precision('Product Unit of Measure'), required=True,
default=1)
skip_existing_products = fields.Boolean(
default=False,
help='Enable this to not add new lines '
'for products already included in SO lines.'
)
def _check_partner(self):
if self.product_set_id.partner_id:
if self.product_set_id.partner_id != self.order_id.partner_id:
raise exceptions.ValidationError(_(
"Select a product set assigned to "
"the same partner of the order."
))
@api.multi
def add_set(self):
""" Add product set, multiplied by quantity in sale order line """
self._check_partner()
order_lines = self._prepare_order_lines()
if order_lines:
self.order_id.write({
"order_line": order_lines
})
return order_lines
def _prepare_order_lines(self):
max_sequence = self._get_max_sequence()
order_lines = []
for set_line in self._get_lines():
order_lines.append(
(0, 0,
self.prepare_sale_order_line_data(
set_line, max_sequence=max_sequence))
)
return order_lines
def _get_max_sequence(self):
max_sequence = 0
if self.order_id.order_line:
max_sequence = max([
line.sequence for line in self.order_id.order_line
])
return max_sequence
def _get_lines(self):
# hook here to take control on used lines
so_product_ids = self.order_id.order_line.mapped('product_id').ids
for set_line in self.product_set_id.set_line_ids:
if (self.skip_existing_products
and set_line.product_id.id in so_product_ids):
continue
yield set_line
@api.multi
def prepare_sale_order_line_data(self, set_line,
max_sequence=0):
self.ensure_one()
sale_line = self.env['sale.order.line'].new({
'order_id': self.order_id.id,
'product_id': set_line.product_id.id,
'product_uom_qty': set_line.quantity * self.quantity,
'product_uom': set_line.product_id.uom_id.id,
'sequence': max_sequence + set_line.sequence,
'discount': set_line.discount,
})
sale_line.product_id_change()
line_values = sale_line._convert_to_write(sale_line._cache)
return line_values
| agpl-3.0 | -3,324,732,059,555,974,000 | -5,287,528,074,547,344,000 | 34.340206 | 74 | 0.574096 | false |
MjAbuz/django-social-auth | setup.py | 3 | 2193 | # -*- coding: utf-8 -*-
"""Setup file for easy installation"""
from os.path import join, dirname
from setuptools import setup
version = __import__('social_auth').__version__
LONG_DESCRIPTION = """
Django Social Auth is an easy to setup social authentication/registration
mechanism for Django projects.
Crafted using base code from django-twitter-oauth_ and django-openid-auth_,
implements a common interface to define new authentication providers from
third parties.
"""
def long_description():
"""Return long description from README.rst if it's present
because it doesn't get installed."""
try:
return open(join(dirname(__file__), 'README.rst')).read()
except IOError:
return LONG_DESCRIPTION
setup(name='django-social-auth',
version=version,
author='Matías Aguirre',
author_email='matiasaguirre@gmail.com',
description='Django social authentication made simple.',
license='BSD',
keywords='django, openid, oauth, social auth, application',
url='https://github.com/omab/django-social-auth',
packages=['social_auth',
'social_auth.management',
'social_auth.management.commands',
'social_auth.backends',
'social_auth.backends.contrib',
'social_auth.backends.pipeline',
'social_auth.migrations',
'social_auth.tests',
'social_auth.db'],
package_data={'social_auth': ['locale/*/LC_MESSAGES/*']},
long_description=long_description(),
install_requires=['django>=1.2.5',
'oauth2>=1.5.167',
'python_openid>=2.2'],
classifiers=['Framework :: Django',
'Development Status :: 4 - Beta',
'Topic :: Internet',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'],
zip_safe=False)
| bsd-3-clause | -4,006,601,077,219,413,500 | -9,831,906,009,847,710 | 36.152542 | 75 | 0.587591 | false |
xfumihiro/powerline | powerline/lint/markedjson/error.py | 33 | 6948 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
import re
from powerline.lib.encoding import get_preferred_output_encoding
NON_PRINTABLE_STR = (
'[^'
# ASCII control characters: 0x00-0x19
+ '\t\n' # Tab, newline: allowed ASCII control characters
+ '\x20-\x7E' # ASCII printable characters
# Unicode control characters: 0x7F-0x9F
+ '\u0085' # Allowed unicode control character: next line character
+ '\u00A0-\uD7FF'
# Surrogate escapes: 0xD800-0xDFFF
+ '\uE000-\uFFFD'
+ ((
'\uD800-\uDFFF'
) if sys.maxunicode < 0x10FFFF else (
'\U00010000-\U0010FFFF'
))
+ ']'
+ ((
# Paired surrogate escapes: allowed in UCS-2 builds as the only way to
# represent characters above 0xFFFF. Only paired variant is allowed.
'|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF]'
+ '|[\uD800-\uDBFF](?![\uDC00-\uDFFF])'
) if sys.maxunicode < 0x10FFFF else (
''
))
)
NON_PRINTABLE_RE = re.compile(NON_PRINTABLE_STR)
def repl(s):
return '<x%04x>' % ord(s.group())
def strtrans(s):
return NON_PRINTABLE_RE.sub(repl, s.replace('\t', '>---'))
class Mark:
def __init__(self, name, line, column, buffer, pointer, old_mark=None, merged_marks=None):
self.name = name
self.line = line
self.column = column
self.buffer = buffer
self.pointer = pointer
self.old_mark = old_mark
self.merged_marks = merged_marks or []
def copy(self):
return Mark(self.name, self.line, self.column, self.buffer, self.pointer, self.old_mark, self.merged_marks[:])
def get_snippet(self, indent=4, max_length=75):
if self.buffer is None:
return None
head = ''
start = self.pointer
while start > 0 and self.buffer[start - 1] not in '\0\n':
start -= 1
if self.pointer - start > max_length / 2 - 1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while end < len(self.buffer) and self.buffer[end] not in '\0\n':
end += 1
if end - self.pointer > max_length / 2 - 1:
tail = ' ... '
end -= 5
break
snippet = [self.buffer[start:self.pointer], self.buffer[self.pointer], self.buffer[self.pointer + 1:end]]
snippet = [strtrans(s) for s in snippet]
return (
' ' * indent + head + ''.join(snippet) + tail + '\n'
+ ' ' * (indent + len(head) + len(snippet[0])) + '^'
)
def advance_string(self, diff):
ret = self.copy()
# FIXME Currently does not work properly with escaped strings.
ret.column += diff
ret.pointer += diff
return ret
def set_old_mark(self, old_mark):
if self is old_mark:
return
checked_marks = set([id(self)])
older_mark = old_mark
while True:
if id(older_mark) in checked_marks:
raise ValueError('Trying to set recursive marks')
checked_marks.add(id(older_mark))
older_mark = older_mark.old_mark
if not older_mark:
break
self.old_mark = old_mark
def set_merged_mark(self, merged_mark):
self.merged_marks.append(merged_mark)
def to_string(self, indent=0, head_text='in ', add_snippet=True):
mark = self
where = ''
processed_marks = set()
while mark:
indentstr = ' ' * indent
where += ('%s %s"%s", line %d, column %d' % (
indentstr, head_text, mark.name, mark.line + 1, mark.column + 1))
if add_snippet:
snippet = mark.get_snippet(indent=(indent + 4))
if snippet:
where += ':\n' + snippet
if mark.merged_marks:
where += '\n' + indentstr + ' with additionally merged\n'
where += mark.merged_marks[0].to_string(indent + 4, head_text='', add_snippet=False)
for mmark in mark.merged_marks[1:]:
where += '\n' + indentstr + ' and\n'
where += mmark.to_string(indent + 4, head_text='', add_snippet=False)
if add_snippet:
processed_marks.add(id(mark))
if mark.old_mark:
where += '\n' + indentstr + ' which replaced value\n'
indent += 4
mark = mark.old_mark
if id(mark) in processed_marks:
raise ValueError('Trying to dump recursive mark')
return where
if sys.version_info < (3,):
def __str__(self):
return self.to_string().encode('utf-8')
def __unicode__(self):
return self.to_string()
else:
def __str__(self):
return self.to_string()
def __eq__(self, other):
return self is other or (
self.name == other.name
and self.line == other.line
and self.column == other.column
)
if sys.version_info < (3,):
def echoerr(**kwargs):
stream = kwargs.pop('stream', sys.stderr)
stream.write('\n')
stream.write((format_error(**kwargs) + '\n').encode(get_preferred_output_encoding()))
else:
def echoerr(**kwargs):
stream = kwargs.pop('stream', sys.stderr)
stream.write('\n')
stream.write(format_error(**kwargs) + '\n')
def format_error(context=None, context_mark=None, problem=None, problem_mark=None, note=None, indent=0):
lines = []
indentstr = ' ' * indent
if context is not None:
lines.append(indentstr + context)
if (
context_mark is not None
and (
problem is None or problem_mark is None
or context_mark != problem_mark
)
):
lines.append(context_mark.to_string(indent=indent))
if problem is not None:
lines.append(indentstr + problem)
if problem_mark is not None:
lines.append(problem_mark.to_string(indent=indent))
if note is not None:
lines.append(indentstr + note)
return '\n'.join(lines)
class MarkedError(Exception):
def __init__(self, context=None, context_mark=None, problem=None, problem_mark=None, note=None):
Exception.__init__(self, format_error(context, context_mark, problem, problem_mark, note))
class EchoErr(object):
__slots__ = ('echoerr', 'logger', 'indent')
def __init__(self, echoerr, logger, indent=0):
self.echoerr = echoerr
self.logger = logger
self.indent = indent
def __call__(self, **kwargs):
kwargs = kwargs.copy()
kwargs.setdefault('indent', self.indent)
self.echoerr(**kwargs)
class DelayedEchoErr(EchoErr):
__slots__ = ('echoerr', 'logger', 'errs', 'message', 'separator_message', 'indent', 'indent_shift')
def __init__(self, echoerr, message='', separator_message=''):
super(DelayedEchoErr, self).__init__(echoerr, echoerr.logger)
self.errs = [[]]
self.message = message
self.separator_message = separator_message
self.indent_shift = (4 if message or separator_message else 0)
self.indent = echoerr.indent + self.indent_shift
def __call__(self, **kwargs):
kwargs = kwargs.copy()
kwargs['indent'] = kwargs.get('indent', 0) + self.indent
self.errs[-1].append(kwargs)
def next_variant(self):
self.errs.append([])
def echo_all(self):
if self.message:
self.echoerr(problem=self.message, indent=(self.indent - self.indent_shift))
for variant in self.errs:
if not variant:
continue
if self.separator_message and variant is not self.errs[0]:
self.echoerr(problem=self.separator_message, indent=(self.indent - self.indent_shift))
for kwargs in variant:
self.echoerr(**kwargs)
def __nonzero__(self):
return not not self.errs
__bool__ = __nonzero__
| mit | 5,395,849,162,543,872,000 | 2,535,862,800,401,325,600 | 27.829876 | 112 | 0.653857 | false |
pbrazdil/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py | 119 | 12052 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import unittest2 as unittest
from webkitpy.common.net import resultsjsonparser_unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests.layout_package.json_results_generator import strip_json_wrapper
from webkitpy.port.base import Port
from webkitpy.tool.commands.rebaselineserver import TestConfig, RebaselineServer
from webkitpy.tool.servers import rebaselineserver
class RebaselineTestTest(unittest.TestCase):
def test_text_rebaseline_update(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='none',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_new(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='none',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move_no_op_1(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/win/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move_no_op_2(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.checksum',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Moving current mac baselines to mac-leopard',
' No current baselines to move',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Moving current mac baselines to mac-leopard',
' Moved text-expected.txt',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move_only_images(self):
self._assertRebaseline(
test_files=(
'fast/image-expected.txt',
'platform/mac/fast/image-expected.txt',
'platform/mac/fast/image-expected.png',
'platform/mac/fast/image-expected.checksum',
),
results_files=(
'fast/image-actual.png',
'fast/image-actual.checksum',
),
test_name='fast/image.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/image...',
' Moving current mac baselines to mac-leopard',
' Moved image-expected.checksum',
' Moved image-expected.png',
' Updating baselines for mac',
' Updated image-expected.checksum',
' Updated image-expected.png',
])
def test_text_rebaseline_move_already_exist(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac-leopard/fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=False,
expected_log=[
'Rebaselining fast/text...',
' Moving current mac baselines to mac-leopard',
' Already had baselines in mac-leopard, could not move existing mac ones',
])
def test_image_rebaseline(self):
self._assertRebaseline(
test_files=(
'fast/image-expected.txt',
'platform/mac/fast/image-expected.png',
'platform/mac/fast/image-expected.checksum',
),
results_files=(
'fast/image-actual.png',
'fast/image-actual.checksum',
),
test_name='fast/image.html',
baseline_target='mac',
baseline_move_to='none',
expected_success=True,
expected_log=[
'Rebaselining fast/image...',
' Updating baselines for mac',
' Updated image-expected.checksum',
' Updated image-expected.png',
])
def test_gather_baselines(self):
example_json = resultsjsonparser_unittest.ResultsJSONParserTest._example_full_results_json
results_json = json.loads(strip_json_wrapper(example_json))
server = RebaselineServer()
server._test_config = get_test_config()
server._gather_baselines(results_json)
self.assertEqual(results_json['tests']['svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html']['state'], 'needs_rebaseline')
self.assertNotIn('prototype-chocolate.html', results_json['tests'])
def _assertRebaseline(self, test_files, results_files, test_name, baseline_target, baseline_move_to, expected_success, expected_log):
log = []
test_config = get_test_config(test_files, results_files)
success = rebaselineserver._rebaseline_test(
test_name,
baseline_target,
baseline_move_to,
test_config,
log=lambda l: log.append(l))
self.assertEqual(expected_log, log)
self.assertEqual(expected_success, success)
class GetActualResultFilesTest(unittest.TestCase):
def test(self):
test_config = get_test_config(result_files=(
'fast/text-actual.txt',
'fast2/text-actual.txt',
'fast/text2-actual.txt',
'fast/text-notactual.txt',
))
self.assertItemsEqual(
('text-actual.txt',),
rebaselineserver._get_actual_result_files(
'fast/text.html', test_config))
class GetBaselinesTest(unittest.TestCase):
def test_no_baselines(self):
self._assertBaselines(
test_files=(),
test_name='fast/missing.html',
expected_baselines={})
def test_text_baselines(self):
self._assertBaselines(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
test_name='fast/text.html',
expected_baselines={
'mac': {'.txt': True},
'base': {'.txt': False},
})
def test_image_and_text_baselines(self):
self._assertBaselines(
test_files=(
'fast/image-expected.txt',
'platform/mac/fast/image-expected.png',
'platform/mac/fast/image-expected.checksum',
'platform/win/fast/image-expected.png',
'platform/win/fast/image-expected.checksum',
),
test_name='fast/image.html',
expected_baselines={
'base': {'.txt': True},
'mac': {'.checksum': True, '.png': True},
'win': {'.checksum': False, '.png': False},
})
def test_extra_baselines(self):
self._assertBaselines(
test_files=(
'fast/text-expected.txt',
'platform/nosuchplatform/fast/text-expected.txt',
),
test_name='fast/text.html',
expected_baselines={'base': {'.txt': True}})
def _assertBaselines(self, test_files, test_name, expected_baselines):
actual_baselines = rebaselineserver.get_test_baselines(test_name, get_test_config(test_files))
self.assertEqual(expected_baselines, actual_baselines)
def get_test_config(test_files=[], result_files=[]):
# We could grab this from port.layout_tests_dir(), but instantiating a fully mocked port is a pain.
layout_tests_directory = "/mock-checkout/LayoutTests"
results_directory = '/WebKitBuild/Debug/layout-test-results'
host = MockHost()
for file in test_files:
host.filesystem.write_binary_file(host.filesystem.join(layout_tests_directory, file), '')
for file in result_files:
host.filesystem.write_binary_file(host.filesystem.join(results_directory, file), '')
class TestMacPort(Port):
port_name = "mac"
return TestConfig(
TestMacPort(host, 'mac'),
layout_tests_directory,
results_directory,
('mac', 'mac-leopard', 'win', 'linux'),
host.filesystem,
host.scm())
| bsd-3-clause | -7,588,825,257,014,014,000 | -7,290,359,188,354,096,000 | 37.752412 | 149 | 0.569781 | false |
rjschof/gem5 | src/arch/x86/isa/insts/simd128/integer/data_conversion/convert_gpr_integer_to_floating_point.py | 91 | 2989 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop CVTSI2SS_XMM_R {
mov2fp ufp1, regm, destSize=dsz, srcSize=dsz
cvti2f xmml, ufp1, srcSize=dsz, destSize=4, ext=Scalar
};
def macroop CVTSI2SS_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
cvti2f xmml, ufp1, srcSize=dsz, destSize=4, ext=Scalar
};
def macroop CVTSI2SS_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
cvti2f xmml, ufp1, srcSize=dsz, destSize=4, ext=Scalar
};
def macroop CVTSI2SD_XMM_R {
mov2fp ufp1, regm, destSize=dsz, srcSize=dsz
cvti2f xmml, ufp1, srcSize=dsz, destSize=8, ext=Scalar
};
def macroop CVTSI2SD_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
cvti2f xmml, ufp1, srcSize=dsz, destSize=8, ext=Scalar
};
def macroop CVTSI2SD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
cvti2f xmml, ufp1, srcSize=dsz, destSize=8, ext=Scalar
};
'''
| bsd-3-clause | 4,771,962,581,865,475,000 | -2,263,172,238,871,944,200 | 41.7 | 72 | 0.763466 | false |
dungvtdev/upsbayescpm | bayespy/inference/vmp/nodes/tests/test_beta.py | 3 | 2667 | ################################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `beta` module.
"""
import numpy as np
from scipy import special
from bayespy.nodes import Beta
from bayespy.utils import random
from bayespy.utils.misc import TestCase
class TestBeta(TestCase):
"""
Unit tests for Beta node
"""
def test_init(self):
"""
Test the creation of beta nodes.
"""
# Some simple initializations
p = Beta([1.5, 4.2])
# Check that plates are correct
p = Beta([2, 3], plates=(4,3))
self.assertEqual(p.plates,
(4,3))
p = Beta(np.ones((4,3,2)))
self.assertEqual(p.plates,
(4,3))
# Parent not a vector
self.assertRaises(ValueError,
Beta,
4)
# Parent vector has wrong shape
self.assertRaises(ValueError,
Beta,
[4])
self.assertRaises(ValueError,
Beta,
[4,4,4])
# Parent vector has invalid values
self.assertRaises(ValueError,
Beta,
[-2,3])
# Plates inconsistent
self.assertRaises(ValueError,
Beta,
np.ones((4,2)),
plates=(3,))
# Explicit plates too small
self.assertRaises(ValueError,
Beta,
np.ones((4,2)),
plates=(1,))
pass
def test_moments(self):
"""
Test the moments of beta nodes.
"""
p = Beta([2, 3])
u = p._message_to_child()
self.assertAllClose(u[0],
special.psi([2,3]) - special.psi(2+3))
pass
def test_random(self):
"""
Test random sampling of beta nodes.
"""
p = Beta([1e20, 3e20])
x = p.random()
self.assertAllClose(x,
0.25)
p = Beta([[1e20, 3e20],
[1e20, 1e20]])
x = p.random()
self.assertAllClose(x,
[0.25, 0.5])
p = Beta([1e20, 3e20], plates=(3,))
x = p.random()
self.assertAllClose(x,
[0.25, 0.25, 0.25])
pass
| mit | 1,190,224,102,601,117,200 | -32,946,991,061,025,020 | 23.245455 | 80 | 0.403075 | false |
marguslaak/django-xadmin | xadmin/plugins/refresh.py | 28 | 1272 | # coding=utf-8
from django.template import loader
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ListAdminView
REFRESH_VAR = '_refresh'
class RefreshPlugin(BaseAdminPlugin):
refresh_times = []
# Media
def get_media(self, media):
if self.refresh_times and self.request.GET.get(REFRESH_VAR):
media = media + self.vendor('xadmin.plugin.refresh.js')
return media
# Block Views
def block_top_toolbar(self, context, nodes):
if self.refresh_times:
current_refresh = self.request.GET.get(REFRESH_VAR)
context.update({
'has_refresh': bool(current_refresh),
'clean_refresh_url': self.admin_view.get_query_string(remove=(REFRESH_VAR,)),
'current_refresh': current_refresh,
'refresh_times': [{
'time': r,
'url': self.admin_view.get_query_string({REFRESH_VAR: r}),
'selected': str(r) == current_refresh,
} for r in self.refresh_times],
})
nodes.append(loader.render_to_string('xadmin/blocks/model_list.top_toolbar.refresh.html', context_instance=context))
site.register_plugin(RefreshPlugin, ListAdminView)
| bsd-3-clause | -3,724,624,949,143,850,500 | -8,045,101,120,230,539,000 | 33.378378 | 128 | 0.610063 | false |
SiccarPoint/landlab | landlab/components/nonlinear_diffusion/examples/drive_perron.py | 6 | 2924 | from __future__ import print_function
import numpy
from landlab import RasterModelGrid, CLOSED_BOUNDARY
from landlab import ModelParameterDictionary
from landlab.components.nonlinear_diffusion.Perron_nl_diffuse import PerronNLDiffuse
import pylab
import time
inputs = ModelParameterDictionary('./drive_perron_params.txt')
nrows = inputs.read_int('nrows')
ncols = inputs.read_int('ncols')
dx = inputs.read_float('dx')
dt = inputs.read_float('dt')
time_to_run = inputs.read_float('run_time')
# nt needs defining
uplift = inputs.read_float('uplift_rate')
init_elev = inputs.read_float('init_elev')
mg = RasterModelGrid(nrows, ncols, dx)
#mg.set_inactive_boundaries(False, False, False, False)
# mg.set_inactive_boundaries(True,True,True,True)
mg.set_looped_boundaries(True, True)
#create the fields in the grid
mg.add_zeros('topographic__elevation', at='node')
z = mg.zeros(at='node') + init_elev
mg['node'][ 'topographic__elevation'] = z + numpy.random.rand(len(z))/1000.
# Now add a step to diffuse out:
# mg.at_node['topographic__elevation'][mg.active_nodes[:(mg.active_nodes.shape[0]//2.)]]
# += 0.05 #half block uplift
# pylab.figure(1)
# pylab.close()
#elev = mg['node']['topographic__elevation']
#elev_r = mg.node_vector_to_raster(elev)
# pylab.figure(1)
#im = pylab.imshow(elev_r, cmap=pylab.cm.RdBu)
# pylab.show()
# Display a message
print('Running ...')
start_time = time.time()
# instantiate the component:
diffusion_component = PerronNLDiffuse(mg, './drive_perron_params.txt')
# perform the loop:
elapsed_time = 0. # total time in simulation
while elapsed_time < time_to_run:
print(elapsed_time)
if elapsed_time + dt < time_to_run:
diffusion_component.input_timestep(dt)
mg.at_node['topographic__elevation'][mg.core_nodes] += uplift * dt
# mg.at_node['topographic__elevation'][mg.active_nodes[:(mg.active_nodes.shape[0]//2.)]] += uplift*dt #half block uplift
# mg.at_node['topographic__elevation'][mg.active_nodes] += (numpy.arange(len(mg.active_nodes))) #nodes are tagged with their ID
# pylab.figure(1)
# pylab.close()
#elev = mg['node']['topographic__elevation']
#elev_r = mg.node_vector_to_raster(elev)
# pylab.figure(1)
#im = pylab.imshow(elev_r, cmap=pylab.cm.RdBu)
# pylab.show()
mg = diffusion_component.diffuse(mg, elapsed_time)
elapsed_time += dt
#Finalize and plot
elev = mg['node']['topographic__elevation']
elev_r = mg.node_vector_to_raster(elev)
# Clear previous plots
pylab.figure(1)
pylab.close()
# Plot topography
pylab.figure(1)
im = pylab.imshow(elev_r, cmap=pylab.cm.RdBu) # display a colored image
print(elev_r)
pylab.colorbar(im)
pylab.title('Topography')
pylab.figure(2)
# display a colored image
im = pylab.plot(dx * numpy.arange(nrows), elev_r[:, int(ncols // 2)])
pylab.title('Vertical cross section')
pylab.show()
print('Done.')
print(('Total run time = ' + str(time.time() - start_time) + ' seconds.'))
| mit | 5,134,502,431,338,678,000 | -5,601,569,863,589,633,000 | 30.44086 | 131 | 0.70383 | false |
wolfelee/luokr.com | www.luokr.com/app/ctrls/admin/posts.py | 1 | 10035 | #coding=utf-8
from admin import admin, AdminCtrl
class Admin_PostsCtrl(AdminCtrl):
@admin
def get(self):
pager = {}
pager['qnty'] = min(int(self.input('qnty', 10)), 50)
pager['page'] = max(int(self.input('page', 1)), 1)
pager['list'] = 0;
cur_posts = self.dbase('posts').cursor()
cur_users = self.dbase('users').cursor()
cur_posts.execute('select * from posts order by post_id desc limit ? offset ?', (pager['qnty'], (pager['page']-1)*pager['qnty'], ))
posts = cur_posts.fetchall()
psers = {}
if posts:
pager['list'] = len(posts)
cur_users.execute('select * from users where user_id in (' + ','.join(str(i['user_id']) for i in posts) + ')')
psers = self.utils().array_keyto(cur_users.fetchall(), 'user_id')
cur_posts.close()
cur_users.close()
self.render('admin/posts.html', pager = pager, posts = posts, psers = psers)
class Admin_PostHiddenCtrl(AdminCtrl):
@admin
def post(self):
try:
post_id = self.input('post_id')
con = self.dbase('posts')
cur = con.cursor()
cur.execute('update posts set post_stat = 0 where post_id = ?', (post_id, ))
con.commit()
cur.close()
self.flash(1)
except:
self.flash(0)
class Admin_PostCreateCtrl(AdminCtrl):
@admin
def get(self):
cur = self.dbase('terms').cursor()
cur.execute('select * from terms order by term_id desc, term_refc desc limit 9')
terms = cur.fetchall()
cur.close()
mode = self.input('mode', None)
self.render('admin/post-create.html', mode = mode, terms = terms)
@admin
def post(self):
try:
user = self.current_user
post_type = self.input('post_type', 'blog')
post_title = self.input('post_title')
post_descp = self.input('post_descp')
post_author = self.input('post_author')
post_source = self.input('post_source')
post_summary = self.input('post_summary')
post_content = self.input('post_content')
post_rank = self.input('post_rank')
post_stat = self.input('post_stat', 0)
post_ptms = int(self.timer().mktime(self.timer().strptime(self.input('post_ptms'), '%Y-%m-%d %H:%M:%S')))
post_ctms = self.stime()
post_utms = post_ctms
term_list = []
for term_name in self.input('term_list').split(' '):
if term_name == '':
continue
term_list.append(term_name)
if len(term_list) > 10:
self.flash(0, {'msg': '标签数量限制不能超过 10 个'})
return
con_posts = self.dbase('posts')
cur_posts = con_posts.cursor()
con_terms = self.dbase('terms')
cur_terms = con_terms.cursor()
term_imap = {}
term_ctms = self.stime()
for term_name in term_list:
cur_terms.execute('select term_id from terms where term_name = ?', (term_name ,))
term_id = cur_terms.fetchone()
if term_id:
term_id = term_id['term_id']
else:
cur_terms.execute('insert or ignore into terms (term_name, term_ctms) values (?, ?)', (term_name , term_ctms, ))
if cur_terms.lastrowid:
term_id = cur_terms.lastrowid
if term_id:
term_imap[term_id] = term_name
cur_posts.execute('insert into posts (user_id, post_type, post_title, post_descp, post_author, post_source, post_summary, post_content,post_stat, post_rank, post_ptms, post_ctms, post_utms) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', \
(user['user_id'], post_type, post_title, post_descp, post_author, post_source, post_summary, post_content, post_stat, post_rank, post_ptms, post_ctms, post_utms ,))
post_id = cur_posts.lastrowid
if term_imap:
for term_id in term_imap:
cur_posts.execute('insert or ignore into post_terms (post_id, term_id) values (' + str(post_id) + ',' + str(term_id) + ')')
if term_imap:
cur_terms.execute('update terms set term_refc = term_refc + 1 where term_id in (' + ','.join([str(i) for i in term_imap.keys()]) + ')')
con_posts.commit()
cur_posts.close()
con_terms.commit()
con_terms.close()
self.model('alogs').add(self.dbase('alogs'), '新增文章:' + str(post_id), user_ip = self.request.remote_ip, user_id = user['user_id'], user_name = user['user_name'])
self.flash(1, {'url': '/admin/post?post_id=' + str(post_id)})
except:
self.flash(0)
class Admin_PostCtrl(AdminCtrl):
@admin
def get(self):
post_id = self.input('post_id')
con_posts = self.dbase('posts')
cur_posts = con_posts.cursor()
cur_posts.execute('select * from posts where post_id = ?', (post_id, ))
post = cur_posts.fetchone()
if not post:
cur_posts.close()
return self.send_error(404)
mode = self.input('mode', None)
con_terms = self.dbase('terms')
cur_terms = con_terms.cursor()
cur_terms.execute('select * from terms order by term_id desc, term_refc desc limit 9')
terms = cur_terms.fetchall()
ptids = {}
ptags = {}
cur_posts.execute('select post_id,term_id from post_terms where post_id = ?', (post_id, ))
ptids = cur_posts.fetchall()
if ptids:
cur_terms.execute('select * from terms where term_id in (' + ','.join(str(i['term_id']) for i in ptids) + ')')
ptags = cur_terms.fetchall()
if ptags:
ptids = self.utils().array_group(ptids, 'post_id')
ptags = self.utils().array_keyto(ptags, 'term_id')
cur_posts.close()
cur_terms.close()
self.render('admin/post.html', mode = mode, post = post, terms = terms, ptids = ptids, ptags = ptags)
@admin
def post(self):
try:
user = self.current_user
post_id = self.input('post_id')
post_title = self.input('post_title')
post_descp = self.input('post_descp')
post_author = self.input('post_author')
post_source = self.input('post_source')
post_summary = self.input('post_summary')
post_content = self.input('post_content')
post_rank = self.input('post_rank')
post_stat = self.input('post_stat', 0)
post_ptms = int(self.timer().mktime(self.timer().strptime(self.input('post_ptms'), '%Y-%m-%d %H:%M:%S')))
post_utms = self.stime()
term_list = []
for term_name in self.input('term_list').split(' '):
if term_name == '':
continue
term_list.append(term_name)
if len(term_list) > 10:
self.flash(0, {'msg': '标签数量限制不能超过 10 个'})
return
con_posts = self.dbase('posts')
cur_posts = con_posts.cursor()
con_terms = self.dbase('terms')
cur_terms = con_terms.cursor()
cur_posts.execute('select * from posts where post_id = ?', (post_id, ))
post = cur_posts.fetchone()
if not post:
cur_posts.close()
cur_terms.close()
self.flash(0, '没有指定文章ID')
return
term_imap = {}
term_ctms = self.stime()
for term_name in term_list:
cur_terms.execute('select term_id from terms where term_name = ?', (term_name ,))
term_id = cur_terms.fetchone()
if term_id:
term_id = term_id['term_id']
else:
cur_terms.execute('insert or ignore into terms (term_name, term_ctms) values (?, ?)', (term_name , term_ctms, ))
if cur_terms.lastrowid:
term_id = cur_terms.lastrowid
if term_id:
term_imap[term_id] = term_name
cur_posts.execute('select term_id from post_terms where post_id = ?', (post_id, ))
post_tids = cur_posts.fetchall()
cur_posts.execute('update posts set user_id=?,post_title=?,post_descp=?,post_author=?,post_source=?,post_summary=?,post_content=?,post_stat=?,post_rank=?,post_ptms=?,post_utms=? where post_id=?', \
(user['user_id'], post_title, post_descp, post_author, post_source, post_summary, post_content, post_stat, post_rank, post_ptms, post_utms, post_id,))
cur_posts.execute('delete from post_terms where post_id = ?', (post_id,))
if term_imap:
for term_id in term_imap:
cur_posts.execute('insert or ignore into post_terms (post_id, term_id) values (' + str(post_id) + ',' + str(term_id) + ')')
if post_tids:
cur_terms.execute('update terms set term_refc = term_refc - 1 where term_id in (' + ','.join([str(i['term_id']) for i in post_tids]) + ')')
if term_imap:
cur_terms.execute('update terms set term_refc = term_refc + 1 where term_id in (' + ','.join([str(i) for i in term_imap.keys()]) + ')')
con_posts.commit()
cur_posts.close()
con_terms.commit()
cur_terms.close()
self.model('alogs').add(self.dbase('alogs'), '更新文章:' + str(post_id), user_ip = self.request.remote_ip, user_id = user['user_id'], user_name = user['user_name'])
self.flash(1)
except:
self.flash(0)
| bsd-3-clause | 78,995,998,920,044,750 | 5,998,117,502,487,296,000 | 38.519841 | 252 | 0.520835 | false |
Telestream/telestream-cloud-python-sdk | telestream_cloud_notifications_sdk/test/test_params.py | 1 | 1740 | # coding: utf-8
"""
Notifications API
Notifications # noqa: E501
The version of the OpenAPI document: 2.1.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import telestream_cloud_notifications
from telestream_cloud_notifications.models.params import Params # noqa: E501
from telestream_cloud_notifications.rest import ApiException
class TestParams(unittest.TestCase):
"""Params unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test Params
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = telestream_cloud_notifications.models.params.Params() # noqa: E501
if include_optional :
return Params(
addresses = [
'0'
],
url = '0',
method = 'GET',
retries = 56,
content_type = 'application/json',
topic_arn = '0',
role_arn = '0',
topic_endpoint = '0',
access_key = '0',
project_id = '0',
topic_name = '0'
)
else :
return Params(
)
def testParams(self):
"""Test Params"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| mit | -8,916,539,634,186,547,000 | 6,295,483,481,270,100,000 | 25.769231 | 85 | 0.556322 | false |
ppqm/fitting | fitter/fit.py | 1 | 9239 |
import sklearn
import sklearn.model_selection
import time
import itertools
import functools
import multiprocessing as mp
import os
import subprocess
import time
import copy
import json
import numpy as np
import pandas as pd
from numpy.linalg import norm
from scipy.optimize import minimize
import rmsd
import joblib
import mndo
cachedir = '.pycache'
memory = joblib.Memory(cachedir, verbose=0)
def get_penalty(calc_properties, refs_properties, property_weights, keys=None):
penalty = 0.0
n = 0
return penalty
@memory.cache
def load_data():
reference = "../dataset-qm9/reference.csv"
reference = pd.read_csv(reference)
filenames = reference["name"]
# energies = reference["binding energy"]
atoms_list = []
coord_list = []
charges = []
titles = []
for filename in filenames:
titles.append(filename)
charges.append(0)
filename = "../dataset-qm9/xyz/" + filename + ".xyz"
atoms, coord = rmsd.get_coordinates_xyz(filename)
atoms_list.append(atoms)
coord_list.append(coord)
offset = 10+100
to_offset = 110+100
atoms_list = atoms_list[offset:to_offset]
coord_list = coord_list[offset:to_offset]
charges = charges[offset:to_offset]
titles = titles[offset:to_offset]
reference = reference[offset:to_offset]
return atoms_list, coord_list, charges, titles, reference
def minimize_parameters(mols_atoms, mols_coords, reference_properties, start_parameters,
n_procs=1,
method="PM3",
ignore_keys=['DD2','DD3','PO1','PO2','PO3','PO9','HYF','CORE','EISOL','FN1','FN2','FN3','GSCAL','BETAS','ZS']):
"""
"""
n_mols = len(mols_atoms)
# Select header
header = """{:} 1SCF MULLIK PRECISE charge={{:}} iparok=1 jprint=5
nextmol=-1
TITLE {{:}}"""
header = header.format(method)
filename = "_tmp_optimizer"
inputtxt = mndo.get_inputs(mols_atoms, mols_coords, np.zeros(n_mols), range(n_mols), header=header)
with open(filename, 'w') as f:
f.write(inputtxt)
# Select atom parameters to optimize
atoms = [np.unique(atom) for atom in mols_atoms]
atoms = list(itertools.chain(*atoms))
atoms = np.unique(atoms)
parameters_values = []
parameters_keys = []
parameters = {}
# Select parameters
for atom in atoms:
atom_params = start_parameters[atom]
current = {}
for key in atom_params:
if key in ignore_keys: continue
value = atom_params[key]
current[key] = value
parameters_values.append(value)
parameters_keys.append([atom, key])
parameters[atom] = current
# Define penalty func
def penalty(params, debug=True):
for param, key in zip(params, parameters_keys):
parameters[key[0]][key[1]] = param
mndo.set_params(parameters)
properties_list = mndo.calculate(filename)
calc_energies = np.array([properties["energy"] for properties in properties_list])
diff = reference_properties - calc_energies
idxs = np.argwhere(np.isnan(diff))
diff[idxs] = 700.0
error = np.abs(diff)
error = error.mean()
if debug:
print("penalty: {:10.2f}".format(error))
return error
def penalty_properties(properties_list):
calc_energies = np.array([properties["energy"] for properties in properties_list])
diff = reference_properties - calc_energies
idxs = np.argwhere(np.isnan(diff))
diff[idxs] = 700.0
error = np.abs(diff)
error = error.mean()
return error
def jacobian(params, dh=10**-5, debug=False):
# TODO Parallelt
grad = []
for i, p in enumerate(params):
dparams = copy.deepcopy(params)
dparams[i] += dh
forward = penalty(dparams, debug=False)
dparams[i] -= (2.0 * dh)
backward = penalty(dparams, debug=False)
de = forward - backward
grad.append(de/(2.0 * dh))
grad = np.array(grad)
if debug:
nm = np.linalg.norm(grad)
print("penalty grad: {:10.2f}".format(nm))
return grad
def jacobian_parallel(params, dh=10**-5, procs=1):
"""
"""
for param, key in zip(params, parameters_keys):
parameters[key[0]][key[1]] = param
params_grad = mndo.numerical_jacobian(inputtxt, parameters, n_procs=procs, dh=dh)
grad = []
for atom, key in parameters_keys:
forward_mols, backward_mols = params_grad[atom][key]
penalty_forward = penalty_properties(forward_mols)
penalty_backward = penalty_properties(backward_mols)
de = penalty_forward - penalty_backward
grad.append(de/(2.0 * dh))
grad = np.array(grad)
return grad
start_error = penalty(parameters_values)
# check grad
dh = 10**-5
t = time.time()
grad = jacobian(parameters_values, dh=dh)
nm = np.linalg.norm(grad)
secs = time.time() - t
print("penalty grad: {:10.2f} time: {:10.2f}".format(nm, secs))
t = time.time()
grad = jacobian_parallel(parameters_values, procs=2, dh=dh)
nm = np.linalg.norm(grad)
secs = time.time() - t
print("penalty grad: {:10.2f} time: {:10.2f}".format(nm, secs))
quit()
res = minimize(penalty, parameters_values,
method="L-BFGS-B",
jac=jacobian,
options={"maxiter": 1000, "disp": True})
parameters_values = res.x
error = penalty(parameters_values)
for param, key in zip(parameters_values, parameters_keys):
parameters[key[0]][key[1]] = param
end_parameters = parameters
return end_parameters, error
def learning_curve(
mols_atoms,
mols_coords,
reference_properties,
start_parameters):
fold_five = sklearn.model_selection.KFold(n_splits=5, random_state=42, shuffle=True)
n_items = len(mols_atoms)
X = list(range(n_items))
score = []
for train_idxs, test_idxs in fold_five.split(X):
train_atoms = [mols_atoms[i] for i in train_idxs]
train_coords = [mols_coords[i] for i in train_idxs]
train_properties = reference_properties[train_idxs]
test_atoms = [mols_atoms[i] for i in test_idxs]
test_coords = [mols_coords[i] for i in test_idxs]
test_properties = reference_properties[test_idxs]
train_parameters, train_error = minimize_parameters(train_atoms, train_coords, train_properties, start_parameters)
print(train_parameters)
quit()
return
def main():
import argparse
import sys
description = """"""
parser = argparse.ArgumentParser(
usage='%(prog)s [options]',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-f', '--format', action='store', help='', metavar='fmt')
parser.add_argument('-s', '--settings', action='store', help='', metavar='json')
parser.add_argument('-p', '--parameters', action='store', help='', metavar='json')
parser.add_argument('-o', '--results_parameters', action='store', help='', metavar='json')
parser.add_argument('--methods', action='store', help='', metavar='str')
args = parser.parse_args()
mols_atoms, mols_coords, mols_charges, titles, reference = load_data()
ref_energies = reference.iloc[:,1].tolist()
ref_energies = np.array(ref_energies)
with open(args.parameters, 'r') as f:
start_params = f.read()
start_params = json.loads(start_params)
# end_params = minimize_parameters(mols_atoms, mols_coords, ref_energies, start_params)
end_params = learning_curve(mols_atoms, mols_coords, ref_energies, start_params)
print(end_params)
quit()
# TODO select reference
# TODO prepare input file
filename = "_tmp_optimizer"
txt = mndo.get_inputs(atoms_list, coord_list, charges, titles)
f = open(filename, 'w')
f.write(txt)
f.close()
# TODO prepare parameters
parameters = np.array([
-99.,
-77.,
2.,
-32.,
3.,
])
parameter_keys = [
["O", "USS"],
["O", "UPP"],
["O", "ZP"],
["O", "BETAP"],
["O", "ALP"],
]
parameter_dict = {}
parameter_dict["O"] = {}
# TODO calculate penalty
# properties_list = mndo.calculate(filename)
def penalty(params):
for param, key in zip(params, parameter_keys):
parameter_dict[key[0]][key[1]] = param
mndo.set_params(parameter_dict)
properties_list = mndo.calculate(filename)
calc_energies = np.array([properties["energy"] for properties in properties_list])
diff = ref_energies - calc_energies
idxs = np.argwhere(np.isnan(diff))
diff[idxs] = 700.0
error = diff.mean()
return error
print(penalty(parameters))
status = minimize(penalty, parameters,
method="L-BFGS-B",
options={"maxiter": 1000, "disp": True})
print()
print(status)
# TODO optimize
return
if __name__ == "__main__":
main()
| cc0-1.0 | -4,455,821,785,314,852,000 | -2,231,139,079,880,124,200 | 22.997403 | 122 | 0.603312 | false |
TheWardoctor/Wardoctors-repo | script.module.nanscrapers/lib/nanscrapers/scraperplugins/streamthis.py | 6 | 3297 | import re
import requests
import difflib
import xbmc
from ..scraper import Scraper
from ..common import clean_title
User_Agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:56.0) Gecko/20100101 Firefox/56.0'
headers = {'User-Agent':User_Agent}
class streamthis(Scraper):
domains = ['streamthis.tv']
name = "streamthis"
sources = []
def __init__(self):
self.base_link = 'http://streamthis.tv'
self.search_link = '/index.php?menu=search&query='
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
start_url = self.base_link+self.search_link+title.replace(' ','+')
html = requests.get(start_url,headers=headers).content
match = re.compile('<div class="col xs12 s6 m3 l2 animated bounceInUp">.+?<a href="(.+?)".+?<p class="smallttl"> (.+?)</p>.+?<i class="fa fa-calendar-o" aria-hidden="true"></i> (.+?)</div>',re.DOTALL).findall(html)
for url,name,year in match:
if clean_title(name) in clean_title(title):
if year == show_year:
html2 = requests.get(url,headers=headers).content
ep_match = re.compile('<a class="collection-item black-text".+?href="(.+?)".+?<b>(.+?)</b>').findall(html2)
for url2,episodes in ep_match:
if len(season)==1:
season ='0'+season
if len(episode)==1:
episode ='0'+episode
ep_check = 'S'+season+'E'+episode
if ep_check == episodes:
self.get_sources(url2)
return self.sources
except:
pass
return []
def scrape_movie(self, title, year, imdb, debrid = False):
try:
start_url = self.base_link+self.search_link+title.replace(' ','+')
html = requests.get(start_url,headers=headers).content
match = re.compile('<div class="col xs12 s6 m3 l2 animated bounceInUp">.+?<a href="(.+?)".+?<p class="smallttl"> (.+?)</p>.+?<i class="fa fa-calendar-o" aria-hidden="true"></i> (.+?)</div>',re.DOTALL).findall(html)
for url,name,movie_year in match:
if clean_title(name) in clean_title(title):
if year == movie_year:
self.get_sources(url)
return self.sources
except:
pass
return[]
def get_sources(self,url2):
try:
print url2
html = requests.get(url2,headers=headers).content
match = re.findall('<a class="collection-item black-text" href="(.+?)" target="_blank"><img src=".+?"> (.+?)</a>',html)
for link,name in match:
if name.lower() == 'full hd 1080p':
pass
else:
self.sources.append({'source': name, 'quality': 'SD', 'scraper': self.name, 'url': link,'direct': False})
except:
pass
#streamthis().scrape_episode('the blacklist','2013','2017','2','4','','')
#streamthis().scrape_movie('moana','2016','')
| apache-2.0 | 7,497,778,827,890,978,000 | 9,092,365,726,253,694,000 | 44.164384 | 226 | 0.509857 | false |
konstruktoid/ansible-upstream | lib/ansible/plugins/action/ce_template.py | 95 | 3891 | #
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import time
import glob
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.module_utils._text import to_text
from ansible.plugins.action.ce import ActionModule as _ActionModule
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
try:
self._handle_template()
except (ValueError, AttributeError) as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, __backup__ key may not be in results.
self._write_backup(task_vars['inventory_hostname'], result['__backup__'])
if '__backup__' in result:
del result['__backup__']
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
def _handle_template(self):
src = self._task.args.get('src')
if not src:
raise ValueError('missing required arguments: src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit(src).scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
return
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 | -4,894,397,403,003,611,000 | 91,454,542,149,417,280 | 36.413462 | 88 | 0.623747 | false |
Fat-Zer/FreeCAD_sf_master | src/Mod/Show/mTempoVis.py | 22 | 24926 | #/***************************************************************************
# * Copyright (c) Victor Titov (DeepSOIC) *
# * (vv.titov@gmail.com) 2016 *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This library is free software; you can redistribute it and/or *
# * modify it under the terms of the GNU Library General Public *
# * License as published by the Free Software Foundation; either *
# * version 2 of the License, or (at your option) any later version. *
# * *
# * This library is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this library; see the file COPYING.LIB. If not, *
# * write to the Free Software Foundation, Inc., 59 Temple Place, *
# * Suite 330, Boston, MA 02111-1307, USA *
# * *
# ***************************************************************************/
# module is named mTempoVis, because Show.TimpoVis exposes the class as its member, and hides the module TempoVis.py.
from . import Containers
from . import TVStack
import FreeCAD as App
if App.GuiUp:
import FreeCADGui as Gui
Wrn = lambda msg: App.Console.PrintWarning(msg + "\n")
Err = lambda msg: App.Console.PrintError(msg + "\n")
Log = lambda msg: App.Console.PrintLog(msg + "\n")
from copy import copy
S_EMPTY = 0 # TV is initialized, but no changes were done through it
S_ACTIVE = 1 # TV has something to be undone
S_RESTORED = 2 # TV has been restored
S_INTERNAL = 3 # TV instance is being used by another TV instance as a redo data storage
def _printTraceback(err):
import sys
if err is sys.exc_info()[1]:
import traceback
tb = traceback.format_exc()
Log(tb)
class MAINSTACK(object):
'''it's just a default value definition for TV constructor'''
pass
class JUST_SAVE(object):
'''it's just a default value meaning "save current scene value but don't modify anything"'''
pass
class TempoVis(object):
'''TempoVis - helper object to save visibilities of objects before doing
some GUI editing, hiding or showing relevant stuff during edit, and
then restoring all visibilities after editing.
Constructors:
TempoVis(document, stack = MAINSTACK, **kwargs): creates a new TempoVis.
document: required. Objects not belonging to the document can't be modified via TempoVis.
stack: optional. Which stack to insert this new TV into. Can be:
a TVStack instance (then, the new TV is added to the top of the stack),
MAINSTACK special value (a global stack for the document will be used), or
None (then, the TV is not in any stack, and can be manually instertd into one if desired).
Any additional keyword args are assigned as attributes. You can use it to immediately set a tag, for example.'''
document = None
stack = None # reference to stack this TV is in
data = None # dict. key = ("class_id","key"), value = instance of SceneDetail
data_requested = None #same as data, but stores (wanted) values passed to modify()
state = S_EMPTY
tag = '' #stores any user-defined string for identification purposes
def _init_attrs(self):
'''initialize member variables to empty values (needed because we can't use mutable initial values when initializing member variables in class definition)'''
self.data = {}
self.data_requested = {}
#<core interface>
def __init__(self, document, stack = MAINSTACK, **kwargs):
self._init_attrs()
self.document = document
if stack is MAINSTACK:
stack = TVStack.mainStack(document)
if stack is None:
pass
else:
stack.insert(self)
for key,val in kwargs.items():
setattr(self, key, val)
def __del__(self):
if self.state == S_ACTIVE:
self.restore(ultimate= True)
def has(self, detail):
'''has(self, detail): returns True if this TV has this detail value saved.
example: tv.has(VProperty(obj, "Visibility"))'''
return detail.full_key in self.data
def stored_val(self, detail):
'''stored_val(self, detail): returns value of detail remembered by this TV. If not, raises KeyError.'''
return self.data[detail.full_key].data
def save(self, detail, mild_restore = False):
'''save(detail, mild_restore = False):saves the scene detail to be restored.
The detail is saved only once; repeated calls are ignored.
mild_restore: internal, do not use.'''
self._change()
if not detail.full_key in self.data:
#not saved yet
tv1, curr = self._value_after(detail, query_scene= True)
self.data[detail.full_key] = copy(curr)
self.data[detail.full_key].mild_restore = mild_restore
else:
#saved already. Change restore policy, if necessary.
stored_dt = self.data[detail.full_key]
if not mild_restore:
stored_dt.mild_restore = False
def modify(self, detail, mild_restore = None):
'''modify(detail, mild_restore = True): modifies scene detail through this TV.
The value is provided as an instance of SceneDetail implementation.
The procedure takes care to account for the stack - that is, if in a TV applied
later than this one this detail was changed too, the value saved therein is altered,
rather than applied to the scene.
mild_restore: if True, when restoring later, checks if the value was changed
by user after last call to modify(), and doesn't restore if it was changed.
Example: tv.modify(VProperty(obj, "Visibility", True))'''
self._change()
if mild_restore is not None:
detail.mild_restore = mild_restore
# save current
self.save(detail, detail.mild_restore)
# apply
tv1, curr = self._value_after(detail)
if tv1 is not None:
tv1.data[detail.full_key].data = detail.data
else:
detail.apply_data(detail.data)
# and record.
if detail.mild_restore:
self.data_requested[detail.full_key] = copy(detail)
def restoreDetail(self, detail, ultimate = False):
'''restoreDetail(detail, ultimate = False): restores a specific scene detail.
ultimate: if true, the saved value is cleaned out.
If the detail is not found, nothing is done.
'''
if not self.has(detail):
return
self._restore_detail(detail)
if ultimate:
self.forgetDetail(detail)
def forgetDetail(self, detail):
'''forgetDetail(detail): ditches a saved detail value, making the change done through this TV permanent.'''
self.data.pop(detail.full_key, None)
self.data_requested.pop(detail.full_key, None)
def forget(self):
'''forget(self): clears this TV, making all changes done through it permanent.
Also, withdraws the TV from the stack.'''
self.state = S_EMPTY
self.data = {}
if self.is_in_stack:
self.stack.withdraw(self)
def restore(self, ultimate = True):
'''restore(ultimate = True): undoes all changes done through this tempovis / restores saved scene details.
ultimate: if true, the saved values are cleaned out, and the TV is withdrawn from
the stack. If false, the TV will still remember stuff, and restore can be called again.
'''
if self.state == S_RESTORED:
return
if self.state != S_INTERNAL and ultimate:
self.state = S_RESTORED
for key, detail in self.data.items():
try:
self._restoreDetail(detail)
except Exception as err:
Err("TempoVis.restore: failed to restore detail {key}: {err}".format(key= key, err= str(err)))
_printTraceback(err)
if ultimate:
self.data = {}
if self.is_in_stack:
self.stack.withdraw(self)
#</core interface>
#<stack interface>
def _inserted(self, stack, index):
'''calles when this tv is inserted into a stack'''
self.stack = stack
def _withdrawn(self, stack, index):
'''calles when this tv is withdrawn from a stack'''
self.stack = None
@property
def is_in_stack(self):
return self.stack is not None
#</stack interface>
#<convenience functions>
def modifyVPProperty(self, doc_obj_or_list, prop_names, new_value = JUST_SAVE, mild_restore = None):
'''modifyVPProperty(doc_obj_or_list, prop_names, new_value = JUST_SAVE, mild_restore = None): modifies
prop_name property of ViewProvider of doc_obj_or_list, and remembers
original value of the property. Original values will be restored upon
TempoVis deletion, or call to restore().
mild_restore: test if user changed the value manually when restoring the TV.'''
if self.state == S_RESTORED:
Wrn("Attempting to use a TV that has been restored. There must be a problem with code.")
return
if not hasattr(doc_obj_or_list, '__iter__'):
doc_obj_or_list = [doc_obj_or_list]
if not isinstance(prop_names,(list,tuple)):
prop_names = [prop_names]
for doc_obj in doc_obj_or_list:
for prop_name in prop_names:
if not hasattr(doc_obj.ViewObject, prop_name):
Wrn("TempoVis: object {obj} has no attribute {attr}. Skipped."
.format(obj= doc_obj.Name, attr= prop_name))
continue
# Because the introduction of external objects, we shall now
# accept objects from all opened documents.
#
# if doc_obj.Document is not self.document: #ignore objects from other documents
# raise ValueError("Document object to be modified does not belong to document TempoVis was made for.")
from .SceneDetails.VProperty import VProperty
if new_value is JUST_SAVE:
if mild_restore:
Wrn("TempoVis: can't just save a value for mild restore. Saving for hard restore.")
self.save(VProperty(doc_obj, prop_name, new_value))
else:
self.modify(VProperty(doc_obj, prop_name, new_value), mild_restore)
def restoreVPProperty(self, doc_obj_or_list, prop_names):
'''restoreVPProperty(doc_obj_or_list, prop_name, new_value): restores specific property changes.'''
from .SceneDetails.VProperty import VProperty
if not hasattr(doc_obj_or_list, '__iter__'):
doc_obj_or_list = [doc_obj_or_list]
if not isinstance(prop_names,(tuple,list)):
prop_names = [prop_names]
for doc_obj in doc_obj_or_list:
for prop_name in prop_names:
try:
self.restoreDetail(VProperty(doc_obj, prop_name))
except Exception as err:
Err("TempoVis.restore: failed to restore detail {key}: {err}".format(key= key, err= str(err)))
_printTraceback(err)
def saveBodyVisibleFeature(self, doc_obj_or_list):
"""saveBodyVisibleFeature(self, doc_obj_or_list): saves Visibility of currently
visible feature, for every body of PartDesign features in the provided list."""
if not hasattr(doc_obj_or_list, '__iter__'):
doc_obj_or_list = [doc_obj_or_list]
objs = []
bodies = set()
for obj in doc_obj_or_list:
body = getattr(obj,'_Body',None)
if not body or body in bodies:
continue
bodies.add(body)
feature = getattr(body,'VisibleFeature',None)
if feature:
objs.append(feature)
self.modifyVPProperty(objs, 'Visibility', JUST_SAVE)
return objs
def show(self, doc_obj_or_list, links_too = True, mild_restore = None):
'''show(doc_obj_or_list, links_too = True): shows objects (sets their Visibility to True).
doc_obj_or_list can be a document object, or a list of document objects.
If links_too is True, all Links of the objects are also hidden, by setting LinkVisibility attribute of each object.'''
doc_obj_or_list = self._3D_objects(doc_obj_or_list)
self.saveBodyVisibleFeature(doc_obj_or_list) #fix implicit hiding of other features by PartDesign not being recorded to TV
self.modifyVPProperty(doc_obj_or_list, 'Visibility', True, mild_restore)
if links_too:
self.modifyVPProperty(doc_obj_or_list, 'LinkVisibility', True, mild_restore)
def hide(self, doc_obj_or_list, links_too = True, mild_restore = None):
'''hide(doc_obj_or_list): hides objects (sets their Visibility to False). doc_obj_or_list can be a document object, or a list of document objects'''
doc_obj_or_list = self._3D_objects(doc_obj_or_list)
# no need to saveBodyVisibleFeature here, as no implicit showing will happen
self.modifyVPProperty(doc_obj_or_list, 'Visibility', False, mild_restore)
if links_too:
self.modifyVPProperty(doc_obj_or_list, 'LinkVisibility', False, mild_restore)
def get_all_dependent(self, doc_obj, subname = None):
'''get_all_dependent(doc_obj, subname = None): gets all objects that depend on doc_obj. Containers and Links (if subname) required for visibility of the object are excluded from the list.'''
from . import Containers
from .Containers import isAContainer
from .DepGraphTools import getAllDependencies, getAllDependent
if subname:
# a link-path was provided. doc_obj has nothing to do with the object we want
# to collect dependencies from. So, replace it with the one pointed by link-path.
cnt_chain = doc_obj.getSubObjectList(subname)
doc_obj = cnt_chain[-1].getLinkedObject()
# cnt_chain can either end with the object (e.g. if a sketch is in a part, and
# a link is to a part), or it may be a Link object (if we have a straight or
# even nested Link to the sketch).
#
# I don't know why do we need that isAContainer check here, but I'm leaving it,
# realthunder must be knowing his business --DeepSOIC
cnt_chain = [ o for o in cnt_chain
if o==cnt_chain[-1] or isAContainer(o, links_too= True) ]
else:
cnt_chain = Containers.ContainerChain(doc_obj)
return [o for o in getAllDependent(doc_obj) if not o in cnt_chain]
def hide_all_dependent(self, doc_obj):
'''hide_all_dependent(doc_obj): hides all objects that depend on doc_obj. Groups, Parts and Bodies are not hidden by this.'''
self.hide(self._3D_objects(self.get_all_dependent(doc_obj)))
def show_all_dependent(self, doc_obj):
'''show_all_dependent(doc_obj): shows all objects that depend on doc_obj. This method is probably useless.'''
from .DepGraphTools import getAllDependencies, getAllDependent
self.show(self._3D_objects(getAllDependent(doc_obj)))
def restore_all_dependent(self, doc_obj):
'''show_all_dependent(doc_obj): restores original visibilities of all dependent objects.'''
from .DepGraphTools import getAllDependencies, getAllDependent
self.restoreVPProperty( getAllDependent(doc_obj), ('Visibility', 'LinkVisibility') )
def hide_all_dependencies(self, doc_obj):
'''hide_all_dependencies(doc_obj): hides all objects that doc_obj depends on (directly and indirectly).'''
from .DepGraphTools import getAllDependencies, getAllDependent
self.hide(self._3D_objects(getAllDependencies(doc_obj)))
def show_all_dependencies(self, doc_obj):
'''show_all_dependencies(doc_obj): shows all objects that doc_obj depends on (directly and indirectly). This method is probably useless.'''
from .DepGraphTools import getAllDependencies, getAllDependent
self.show(self._3D_objects(getAllDependencies(doc_obj)))
def saveCamera(self, vw = None):
self._change()
from .SceneDetails.Camera import Camera
self.save(Camera(self.document))
def restoreCamera(self, ultimate = False):
from .SceneDetails.Camera import Camera
dt = Camera(self.document)
self.restoreDetail(dt, ultimate)
def setUnpickable(self, doc_obj_or_list, actual_pick_style = 2): #2 is coin.SoPickStyle.UNPICKABLE
'''setUnpickable(doc_obj_or_list, actual_pick_style = 2): sets object unpickable (transparent to clicks).
doc_obj_or_list: object or list of objects to alter (App)
actual_pick_style: optional parameter, specifying the actual pick style:
0 = regular, 1 = bounding box, 2 (default) = unpickable.
Implementation detail: uses SoPickStyle node. If viewprovider already has a node
of this type as direct child, one is used. Otherwise, new one is created and
inserted as the very first node, and remains there even after restore()/deleting
tempovis. '''
from .SceneDetails.Pickability import Pickability
from .ShowUtils import is3DObject
if not hasattr(doc_obj_or_list, '__iter__'):
doc_obj_or_list = [doc_obj_or_list]
for doc_obj in doc_obj_or_list:
if not is3DObject(doc_obj):
continue
dt = Pickability(doc_obj, actual_pick_style)
self.modify(dt)
def clipPlane(self, doc_obj_or_list, enable, placement, offset = 0.02):
'''clipPlane(doc_obj_or_list, enable, placement, offset): slices off the object with a clipping plane.
doc_obj_or_list: object or list of objects to alter (App)
enable: True if you want clipping, False if you want to remove clipping:
placement: XY plane of local coordinates of the placement is the clipping plane. The placement must be in document's global coordinate system.
offset: shifts the plane. Positive offset reveals more of the object.
Implementation detail: uses SoClipPlane node. If viewprovider already has a node
of this type as direct child, one is used. Otherwise, new one is created and
inserted as the very first node. The node is left, but disabled when tempovis is restoring.'''
from .SceneDetails.ObjectClipPlane import ObjectClipPlane
from .ShowUtils import is3DObject
if not hasattr(doc_obj_or_list, '__iter__'):
doc_obj_or_list = [doc_obj_or_list]
for doc_obj in doc_obj_or_list:
if not is3DObject(doc_obj):
continue
dt = ObjectClipPlane(doc_obj, enable, placement, offset)
self.modify(dt)
@staticmethod
def allVisibleObjects(aroundObject):
'''allVisibleObjects(aroundObject): returns list of objects that have to be toggled invisible for only aroundObject to remain.
If a whole container can be made invisible, it is returned, instead of its child objects.'''
from .ShowUtils import is3DObject
from . import Containers
chain = Containers.VisGroupChain(aroundObject)
result = []
for i in range(len(chain)):
cnt = chain[i]
cnt_next = chain[i+1] if i+1 < len(chain) else aroundObject
container = Containers.Container(cnt)
for obj in container.getVisGroupChildren():
if not is3DObject(obj):
continue
if obj is not cnt_next:
if container.isChildVisible(obj):
result.append(obj)
return result
def sketchClipPlane(self, sketch, enable = None):
'''sketchClipPlane(sketch, enable = None): Clips all objects by plane of sketch.
If enable argument is omitted, calling the routine repeatedly will toggle clipping plane.'''
from .SceneDetails.ClipPlane import ClipPlane
editDoc = Gui.editDocument()
if editDoc is None:
doc = sketch.Document
pla = sketch.getGlobalPlacement()
else:
doc = editDoc.Document
pla = App.Placement(editDoc.EditingTransform)
toggle = {False: 0, True: 1, None: -1}[enable]
self.modify(ClipPlane(doc, toggle, pla, 0.02))
def activateWorkbench(self, wb_name):
from .SceneDetails.Workbench import Workbench
self.modify(Workbench(wb_name))
#</convenience functions>
#<internals>
def _restoreDetail(self, detail):
p = self.data[detail.full_key]
tv1, curr = self._value_after(detail, query_scene= p.mild_restore)
if p.mild_restore:
if self.data_requested[detail.full_key] != curr:
#the value on the scene doesn't match what was requested through TV. User probably changed it. We don't want to mess it up.
self._purge_milds(detail)
return
if tv1 is None:
# no other TV has changed this detail later, apply to the scene
detail.apply_data(p.data)
else:
#modify saved detail of higher TV
tv1.data[detail.full_key].data = p.data
def _purge_milds(self, detail):
"""_purge_milds(detail): wipes out detail from earlier TVs if the detail is mild-restore."""
if not self.is_in_stack:
return
seq_before, seq_after = self.stack.getSplitSequence(self)
for tv in reversed(seq_before):
if tv.has(detail):
if tv.data[detail.full_key].mild_restore:
tv.forgetDetail(detail)
else:
#hard-restoring value encountered, stop
break
def _change(self):
'''to be called whenever anything is done that is to be restored later.'''
if self.state == S_EMPTY:
self.state = S_ACTIVE
if self.state == S_RESTORED:
Wrn("Attempting to use a TV that has been restored. There must be a problem with code.")
self.tv_redo = None
def _value_after(self, detail, query_scene = False):
'''_value_current(detail): returns (tv, detail1). SceneDetail instance holds "current" value of
scene detail (current from the context of this TV; i.e. either the current scene
status, or the saved state from upper TVs).
If no upper TV has saved the detail value, returns either (None, None), or
(None, detail1) if query_scene is True, where detail1 holds value from the scene.'''
def scene_value():
if query_scene:
cpy = copy(detail)
cpy.data = cpy.scene_value()
return (None, cpy)
else:
return (None, None)
if self.is_in_stack:
va = self.stack.value_after(self, detail)
if va is None:
return scene_value()
else:
return va
else:
return scene_value()
def _3D_objects(self, doc_obj_or_list):
"""_3D_objects(doc_obj_or_list): returns list of objects that are in 3d view."""
from .ShowUtils import is3DObject
if not hasattr(doc_obj_or_list, '__iter__'):
doc_obj_or_list = [doc_obj_or_list]
return [obj for obj in doc_obj_or_list if is3DObject(obj)]
def __getstate__(self):
return None
def __setstate__(self, state):
self._init_attrs()
| lgpl-2.1 | -740,256,230,812,950,300 | -1,409,755,402,528,049,000 | 45.503731 | 198 | 0.600939 | false |
dillia23/code-dot-org | blockly-core/appengine/report.py | 22 | 1677 | """Blockly Demo: Report
Copyright 2012 Google Inc.
http://blockly.googlecode.com/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Store reports about code written by users.
"""
__author__ = "ellen.spertus@gmail.com (Ellen Spertus)"
import cgi
import logging
from google.appengine.ext import db
print "Content-type: text/plain\n"
class Report(db.Model):
identifier = db.FloatProperty()
application = db.StringProperty()
date = db.DateTimeProperty(auto_now_add=True)
level = db.IntegerProperty()
result = db.IntegerProperty()
# StringProperty is limited to 500 characters, so use TextProperty.
program = db.TextProperty()
# Catch errors extracting form fields or converting to numeric types.
# Let any other errors propagate up.
try:
forms = cgi.FieldStorage()
identifier = float(forms["id"].value)
application = forms["app"].value
level = int(forms["level"].value)
result = int(forms["result"].value)
program = forms["program"].value
row = Report(identifier = identifier, application = application,
level = level, result = result, program = program)
row.put()
except ValueError, KeyError:
logging.error("Unable to extract all form fields.")
| apache-2.0 | 5,264,235,597,033,053,000 | 7,636,007,119,095,933,000 | 30.641509 | 72 | 0.742397 | false |
cheral/orange3 | Orange/tests/sql/test_filter.py | 11 | 32037 | # Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import unittest
from Orange.data.sql.table import SqlTable, SqlRowInstance
from Orange.data import filter, domain
from Orange.tests.sql.base import PostgresTest, sql_version, sql_test
@sql_test
class TestIsDefinedSql(PostgresTest):
def setUp(self):
self.data = [
[1, 2, 3, None, 'm'],
[2, 3, 1, 4, 'f'],
[None, None, None, None, None],
[7, None, 3, None, 'f'],
]
conn, self.table_name = self.create_sql_table(self.data)
self.table = SqlTable(conn, self.table_name, inspect_values=True)
def tearDown(self):
self.drop_sql_table(self.table_name)
def test_on_all_columns(self):
filtered_data = filter.IsDefined()(self.table)
correct_data = [row for row in self.data if all(row)]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_selected_columns(self):
filtered_data = filter.IsDefined(columns=[0])(self.table)
correct_data = [row for row in self.data if row[0]]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_all_columns_negated(self):
filtered_data = filter.IsDefined(negate=True)(self.table)
correct_data = [row for row in self.data if not all(row)]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_selected_columns_negated(self):
filtered_data = \
filter.IsDefined(negate=True, columns=[4])(self.table)
correct_data = [row for row in self.data if not row[4]]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_can_inherit_is_defined_filter(self):
filtered_data = filter.IsDefined(columns=[1])(self.table)
filtered_data = filtered_data[:, 4]
correct_data = [[row[4]]for row in self.data if row[1]]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
@sql_test
class TestHasClass(PostgresTest):
def setUp(self):
self.data = [
[1, 2, 3, None, 'm'],
[2, 3, 1, 4, 'f'],
[None, None, None, None, None],
[7, None, 3, None, 'f'],
]
self.conn, self.table_name = self.create_sql_table(self.data)
table = SqlTable(self.conn, self.table_name, inspect_values=True)
variables = table.domain.variables
new_table = table.copy()
new_table.domain = domain.Domain(variables[:-1], variables[-1:])
self.table = new_table
def tearDown(self):
self.drop_sql_table(self.table_name)
def test_has_class(self):
filtered_data = filter.HasClass()(self.table)
correct_data = [row for row in self.data if row[-1]]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_negated(self):
filtered_data = filter.HasClass(negate=True)(self.table)
correct_data = [row for row in self.data if not row[-1]]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
@sql_test
class TestSameValueSql(PostgresTest):
def setUp(self):
self.data = [
[1, 2, 3, 'a', 'm'],
[2, None, 1, 'a', 'f'],
[None, 3, 1, 'b', None],
[2, 2, 3, 'b', 'f'],
]
self.conn, self.table_name = self.create_sql_table(self.data)
self.table = SqlTable(self.conn, self.table_name, inspect_values=True)
def tearDown(self):
self.drop_sql_table(self.table_name)
def test_on_continuous_attribute(self):
filtered_data = filter.SameValue(0, 1)(self.table)
correct_data = [row for row in self.data if row[0] == 1]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_on_continuous_attribute_with_unknowns(self):
filtered_data = filter.SameValue(1, 2)(self.table)
correct_data = [row for row in self.data if row[1] == 2]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_on_continuous_attribute_with_unknown_value(self):
filtered_data = filter.SameValue(1, None)(self.table)
correct_data = [row for row in self.data if row[1] is None]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_on_continuous_attribute_negated(self):
filtered_data = filter.SameValue(0, 1, negate=True)(self.table)
correct_data = [row for row in self.data if not row[0] == 1]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_on_discrete_attribute(self):
filtered_data = filter.SameValue(3, 'a')(self.table)
correct_data = [row for row in self.data if row[3] == 'a']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_on_discrete_attribute_with_unknown_value(self):
filtered_data = filter.SameValue(4, None)(self.table)
correct_data = [row for row in self.data if row[4] is None]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_on_discrete_attribute_with_unknowns(self):
filtered_data = filter.SameValue(4, 'm')(self.table)
correct_data = [row for row in self.data if row[4] == 'm']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_on_discrete_attribute_negated(self):
filtered_data = filter.SameValue(3, 'a', negate=True)(self.table)
correct_data = [row for row in self.data if not row[3] == 'a']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_on_discrete_attribute_value_passed_as_int(self):
values = self.table.domain[3].values
filtered_data = filter.SameValue(3, 0, negate=True)(self.table)
correct_data = [row for row in self.data if not row[3] == values[0]]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_on_discrete_attribute_value_passed_as_float(self):
values = self.table.domain[3].values
filtered_data = filter.SameValue(3, 0., negate=True)(self.table)
correct_data = [row for row in self.data if not row[3] == values[0]]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
@sql_test
class TestValuesSql(PostgresTest):
def setUp(self):
self.data = [
[1, 2, 3, 'a', 'm'],
[2, None, 1, 'a', 'f'],
[None, 3, 1, 'b', None],
[2, 2, 3, 'b', 'f'],
]
conn, self.table_name = self.create_sql_table(self.data)
self.table = SqlTable(conn, self.table_name, inspect_values=True)
def tearDown(self):
self.drop_sql_table(self.table_name)
def test_values_filter_with_no_conditions(self):
with self.assertRaises(ValueError):
filtered_data = filter.Values([])(self.table)
def test_discrete_value_filter(self):
filtered_data = filter.Values(conditions=[
filter.FilterDiscrete(3, ['a'])
])(self.table)
correct_data = [row for row in self.data if row[3] in ['a']]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_discrete_value_filter_with_multiple_values(self):
filtered_data = filter.Values(conditions=[
filter.FilterDiscrete(3, ['a', 'b'])
])(self.table)
correct_data = [row for row in self.data if row[3] in ['a', 'b']]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_discrete_value_filter_with_None(self):
filtered_data = filter.Values(conditions=[
filter.FilterDiscrete(3, None)
])(self.table)
correct_data = [row for row in self.data if row[3] is not None]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_continuous_value_filter_equal(self):
filtered_data = filter.Values(conditions=[
filter.FilterContinuous(0, filter.FilterContinuous.Equal, 1)
])(self.table)
correct_data = [row for row in self.data if row[0] == 1]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_continuous_value_filter_not_equal(self):
filtered_data = filter.Values(conditions=[
filter.FilterContinuous(0, filter.FilterContinuous.NotEqual, 1)
])(self.table)
correct_data = [row for row in self.data if row[0] != 1]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_continuous_value_filter_less(self):
filtered_data = filter.Values(conditions=[
filter.FilterContinuous(0, filter.FilterContinuous.Less, 2)
])(self.table)
correct_data = [row for row in self.data
if row[0] is not None and row[0] < 2]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_continuous_value_filter_less_equal(self):
filtered_data = filter.Values(conditions=[
filter.FilterContinuous(0, filter.FilterContinuous.LessEqual, 2)
])(self.table)
correct_data = [row for row in self.data
if row[0] is not None and row[0] <= 2]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_continuous_value_filter_greater(self):
filtered_data = filter.Values(conditions=[
filter.FilterContinuous(0, filter.FilterContinuous.Greater, 1)
])(self.table)
correct_data = [row for row in self.data
if row[0] is not None and row[0] > 1]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_continuous_value_filter_greater_equal(self):
filtered_data = filter.Values(conditions=[
filter.FilterContinuous(0, filter.FilterContinuous.GreaterEqual, 1)
])(self.table)
correct_data = [row for row in self.data
if row[0] is not None and row[0] >= 1]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_continuous_value_filter_between(self):
filtered_data = filter.Values(conditions=[
filter.FilterContinuous(0, filter.FilterContinuous.Between, 1, 2)
])(self.table)
correct_data = [row for row in self.data
if row[0] is not None and 1 <= row[0] <= 2]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_continuous_value_filter_outside(self):
filtered_data = filter.Values(conditions=[
filter.FilterContinuous(0, filter.FilterContinuous.Outside, 2, 3)
])(self.table)
correct_data = [row for row in self.data
if row[0] is not None and not 2 <= row[0] <= 3]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_continuous_value_filter_isdefined(self):
filtered_data = filter.Values(conditions=[
filter.FilterContinuous(1, filter.FilterContinuous.IsDefined)
])(self.table)
correct_data = [row for row in self.data if row[1] is not None]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
@sql_test
class TestFilterStringSql(PostgresTest):
def setUp(self):
self.data = [
[w] for w in "Lorem ipsum dolor sit amet, consectetur adipiscing"
"elit. Vestibulum vel dolor nulla. Etiam elit lectus, mollis nec"
"mattis sed, pellentesque in turpis. Vivamus non nisi dolor. Etiam"
"lacinia dictum purus, in ullamcorper ante vulputate sed. Nullam"
"congue blandit elementum. Donec blandit laoreet posuere. Proin"
"quis augue eget tortor posuere mollis. Fusce vestibulum bibendum"
"neque at convallis. Donec iaculis risus volutpat malesuada"
"vehicula. Ut cursus tempor massa vulputate lacinia. Pellentesque"
"eu tortor sed diam placerat porttitor et volutpat risus. In"
"vulputate rutrum lacus ac sagittis. Suspendisse interdum luctus"
"sem auctor commodo.".split(' ')] + [[None], [None]]
self.conn, self.table_name = self.create_sql_table(self.data)
self.table = SqlTable(self.conn, self.table_name)
def tearDown(self):
self.drop_sql_table(self.table_name)
def test_filter_string_is_defined(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.IsDefined)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data if row[0] is not None]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_equal(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Equal, 'in')
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data if row[0] == 'in']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_equal_case_insensitive_value(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Equal, 'In',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data if row[0] == 'in']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_equal_case_insensitive_data(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Equal, 'donec',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data if row[0] == 'Donec']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_not_equal(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.NotEqual, 'in')
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data if row[0] != 'in']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_not_equal_case_insensitive_value(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.NotEqual, 'In',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data if row[0] != 'in']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_not_equal_case_insensitive_data(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.NotEqual, 'donec',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data if row[0] != 'Donec']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_less(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Less, 'A')
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and row[0] < 'A']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_less_case_insensitive_value(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Less, 'In',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and row[0].lower() < 'in']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_less_case_insensitive_data(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Less, 'donec',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and row[0].lower() < 'donec']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_less_equal(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.LessEqual, 'A')
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and row[0] <= 'A']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_less_equal_case_insensitive_value(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.LessEqual, 'In',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and row[0].lower() <= 'in']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_less_equal_case_insensitive_data(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.LessEqual, 'donec',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and row[0].lower() <= 'donec']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_greater(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Greater, 'volutpat')
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and row[0] > 'volutpat']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_greater_case_insensitive_value(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Greater, 'In',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and row[0].lower() > 'in']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_greater_case_insensitive_data(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Greater, 'donec',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and row[0].lower() > 'donec']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_greater_equal(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.GreaterEqual, 'volutpat')
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and row[0] >= 'volutpat']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_greater_equal_case_insensitive_value(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.GreaterEqual, 'In',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and row[0].lower() >= 'in']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_greater_equal_case_insensitive_data(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.GreaterEqual, 'donec',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and row[0].lower() >= 'donec']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_between(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Between, 'a', 'c')
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and 'a' <= row[0] <= 'c']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_between_case_insensitive_value(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Between, 'I', 'O',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and 'i' < row[0].lower() <= 'o']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_between_case_insensitive_data(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Between, 'i', 'O',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and 'i' <= row[0].lower() <= 'o']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_contains(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Contains, 'et')
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and 'et' in row[0]]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_contains_case_insensitive_value(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Contains, 'eT',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and 'et' in row[0].lower()]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_contains_case_insensitive_data(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Contains, 'do',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and 'do' in row[0].lower()]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_outside(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Outside, 'am', 'di')
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and not 'am' < row[0] < 'di']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_outside_case_insensitive(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.Outside, 'd', 'k',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and not 'd' < row[0].lower() < 'k']
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_starts_with(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.StartsWith, 'D')
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and row[0].startswith('D')]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_starts_with_case_insensitive(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.StartsWith, 'D',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None
and row[0].lower().startswith('d')]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_ends_with(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.EndsWith, 's')
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None and row[0].endswith('s')]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_ends_with_case_insensitive(self):
filtered_data = filter.Values(conditions=[
filter.FilterString(-1, filter.FilterString.EndsWith, 'S',
case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data
if row[0] is not None
and row[0].lower().endswith('s')]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_list(self):
filtered_data = filter.Values(conditions=[
filter.FilterStringList(-1, ['et', 'in'])
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data if row[0] in ['et', 'in']]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_list_case_insensitive_value(self):
filtered_data = filter.Values(conditions=[
filter.FilterStringList(-1, ['Et', 'In'], case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data if row[0] in ['et', 'in']]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
def test_filter_string_list_case_insensitive_data(self):
filtered_data = filter.Values(conditions=[
filter.FilterStringList(-1, ['donec'], case_sensitive=False)
])(self.table)
correct_data = [SqlRowInstance(filtered_data.domain, row)
for row in self.data if row[0] in ['Donec']]
self.assertEqual(len(filtered_data), len(correct_data))
self.assertSequenceEqual(filtered_data, correct_data)
| bsd-2-clause | 8,207,754,882,827,303,000 | -2,153,838,920,418,975,700 | 43.25 | 81 | 0.616225 | false |
laurentgo/pants | src/python/pants/java/nailgun_executor.py | 5 | 11618 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import hashlib
import logging
import os
import re
import select
import threading
import time
from six import string_types
from twitter.common.collections import maybe_list
from pants.base.build_environment import get_buildroot
from pants.java.executor import Executor, SubprocessExecutor
from pants.java.nailgun_client import NailgunClient
from pants.pantsd.process_manager import ProcessGroup, ProcessManager
from pants.util.dirutil import safe_open
logger = logging.getLogger(__name__)
class NailgunProcessGroup(ProcessGroup):
_NAILGUN_KILL_LOCK = threading.Lock()
def __init__(self):
ProcessGroup.__init__(self, name='nailgun')
# TODO: this should enumerate the .pids dir first, then fallback to ps enumeration (& warn).
def _iter_nailgun_instances(self, everywhere=False):
def predicate(proc):
if proc.name() == NailgunExecutor._PROCESS_NAME:
if not everywhere:
return NailgunExecutor._PANTS_NG_ARG in proc.cmdline()
else:
return any(arg.startswith(NailgunExecutor._PANTS_NG_ARG_PREFIX) for arg in proc.cmdline())
return self.iter_instances(predicate)
def killall(self, everywhere=False):
"""Kills all nailgun servers started by pants.
:param bool everywhere: If ``True``, kills all pants-started nailguns on this machine;
otherwise restricts the nailguns killed to those started for the
current build root.
"""
with self._NAILGUN_KILL_LOCK:
for proc in self._iter_nailgun_instances(everywhere):
logger.info('killing nailgun server pid={pid}'.format(pid=proc.pid))
proc.terminate()
# TODO: Once we integrate standard logging into our reporting framework, we can consider making
# some of the log.debug() below into log.info(). Right now it just looks wrong on the console.
class NailgunExecutor(Executor, ProcessManager):
"""Executes java programs by launching them in nailgun server.
If a nailgun is not available for a given set of jvm args and classpath, one is launched and
re-used for the given jvm args and classpath on subsequent runs.
"""
# 'NGServer 0.9.1 started on 127.0.0.1, port 53785.'
_NG_PORT_REGEX = re.compile(r'.*\s+port\s+(\d+)\.$')
# Used to identify if we own a given nailgun server.
_PANTS_NG_ARG_PREFIX = b'-Dpants.buildroot'
_PANTS_FINGERPRINT_ARG_PREFIX = b'-Dpants.nailgun.fingerprint'
_PANTS_OWNER_ARG_PREFIX = b'-Dpants.nailgun.owner'
_PANTS_NG_ARG = '='.join((_PANTS_NG_ARG_PREFIX, get_buildroot()))
_NAILGUN_SPAWN_LOCK = threading.Lock()
_SELECT_WAIT = 1
_PROCESS_NAME = b'java'
def __init__(self, identity, workdir, nailgun_classpath, distribution=None, ins=None,
connect_timeout=10, connect_attempts=5):
Executor.__init__(self, distribution=distribution)
ProcessManager.__init__(self, name=identity, process_name=self._PROCESS_NAME)
if not isinstance(workdir, string_types):
raise ValueError('Workdir must be a path string, not: {workdir}'.format(workdir=workdir))
self._identity = identity
self._workdir = workdir
self._ng_stdout = os.path.join(workdir, 'stdout')
self._ng_stderr = os.path.join(workdir, 'stderr')
self._nailgun_classpath = maybe_list(nailgun_classpath)
self._ins = ins
self._connect_timeout = connect_timeout
self._connect_attempts = connect_attempts
def __str__(self):
return 'NailgunExecutor({identity}, dist={dist}, pid={pid} socket={socket})'.format(
identity=self._identity, dist=self._distribution, pid=self.pid, socket=self.socket)
def _parse_fingerprint(self, cmdline):
fingerprints = [cmd.split('=')[1] for cmd in cmdline if cmd.startswith(
self._PANTS_FINGERPRINT_ARG_PREFIX + '=')]
return fingerprints[0] if fingerprints else None
@property
def fingerprint(self):
"""This provides the nailgun fingerprint of the running process otherwise None."""
if self.cmdline:
return self._parse_fingerprint(self.cmdline)
def _create_owner_arg(self, workdir):
# Currently the owner is identified via the full path to the workdir.
return '='.join((self._PANTS_OWNER_ARG_PREFIX, workdir))
def _create_fingerprint_arg(self, fingerprint):
return '='.join((self._PANTS_FINGERPRINT_ARG_PREFIX, fingerprint))
@staticmethod
def _fingerprint(jvm_options, classpath, java_version):
"""Compute a fingerprint for this invocation of a Java task.
:param list jvm_options: JVM options passed to the java invocation
:param list classpath: The -cp arguments passed to the java invocation
:param Revision java_version: return value from Distribution.version()
:return: a hexstring representing a fingerprint of the java invocation
"""
digest = hashlib.sha1()
# TODO(John Sirois): hash classpath contents?
[digest.update(item) for item in (''.join(sorted(jvm_options)),
''.join(sorted(classpath)),
repr(java_version))]
return digest.hexdigest()
def _runner(self, classpath, main, jvm_options, args, cwd=None):
"""Runner factory. Called via Executor.execute()."""
command = self._create_command(classpath, main, jvm_options, args)
class Runner(self.Runner):
@property
def executor(this):
return self
@property
def command(self):
return list(command)
def run(this, stdout=None, stderr=None, cwd=None):
nailgun = self._get_nailgun_client(jvm_options, classpath, stdout, stderr)
try:
logger.debug('Executing via {ng_desc}: {cmd}'.format(ng_desc=nailgun, cmd=this.cmd))
return nailgun(main, cwd, *args)
except nailgun.NailgunError as e:
self.terminate()
raise self.Error('Problem launching via {ng_desc} command {main} {args}: {msg}'
.format(ng_desc=nailgun, main=main, args=' '.join(args), msg=e))
return Runner()
def _check_nailgun_state(self, new_fingerprint):
running = self.is_alive()
updated = running and (self.fingerprint != new_fingerprint or
self.cmd != self._distribution.java)
logging.debug('Nailgun {nailgun} state: updated={up!s} running={run!s} fingerprint={old_fp} '
'new_fingerprint={new_fp} distribution={old_dist} new_distribution={new_dist}'
.format(nailgun=self._identity, up=updated, run=running,
old_fp=self.fingerprint, new_fp=new_fingerprint,
old_dist=self.cmd, new_dist=self._distribution.java))
return running, updated
def _get_nailgun_client(self, jvm_options, classpath, stdout, stderr):
"""This (somewhat unfortunately) is the main entrypoint to this class via the Runner. It handles
creation of the running nailgun server as well as creation of the client."""
classpath = self._nailgun_classpath + classpath
new_fingerprint = self._fingerprint(jvm_options, classpath, self._distribution.version)
with self._NAILGUN_SPAWN_LOCK:
running, updated = self._check_nailgun_state(new_fingerprint)
if running and updated:
logger.debug('Found running nailgun server that needs updating, killing {server}'
.format(server=self._identity))
self.terminate()
if (not running) or (running and updated):
return self._spawn_nailgun_server(new_fingerprint, jvm_options, classpath, stdout, stderr)
return self._create_ngclient(self.socket, stdout, stderr)
def _await_socket(self, timeout):
"""Blocks for the nailgun subprocess to bind and emit a listening port in the nailgun stdout."""
with safe_open(self._ng_stdout, 'r') as ng_stdout:
start_time = time.time()
while 1:
readable, _, _ = select.select([ng_stdout], [], [], self._SELECT_WAIT)
if readable:
line = ng_stdout.readline() # TODO: address deadlock risk here.
try:
return self._NG_PORT_REGEX.match(line).group(1)
except AttributeError:
pass
if (time.time() - start_time) > timeout:
raise NailgunClient.NailgunError(
'Failed to read nailgun output after {sec} seconds!'.format(sec=timeout))
def _create_ngclient(self, port, stdout, stderr):
return NailgunClient(port=port, ins=self._ins, out=stdout, err=stderr, workdir=get_buildroot())
def ensure_connectable(self, nailgun):
"""Ensures that a nailgun client is connectable or raises NailgunError."""
attempt_count = 0
while 1:
if attempt_count > self._connect_attempts:
logger.debug('Failed to connect to ng after {count} attempts'
.format(count=self._connect_attempts))
raise NailgunClient.NailgunError('Failed to connect to ng server.')
try:
sock = nailgun.try_connect()
if sock:
logger.debug('Connected to ng server {server!r}'.format(server=self))
return
finally:
sock.close()
attempt_count += 1
time.sleep(self.WAIT_INTERVAL)
def _spawn_nailgun_server(self, fingerprint, jvm_options, classpath, stdout, stderr):
"""Synchronously spawn a new nailgun server."""
# Truncate the nailguns stdout & stderr.
self._write_file(self._ng_stdout, '')
self._write_file(self._ng_stderr, '')
jvm_options = jvm_options + [self._PANTS_NG_ARG,
self._create_owner_arg(self._workdir),
self._create_fingerprint_arg(fingerprint)]
post_fork_child_opts = dict(fingerprint=fingerprint,
jvm_options=jvm_options,
classpath=classpath,
stdout=stdout,
stderr=stderr)
logger.debug('Spawning nailgun server {i} with fingerprint={f}, jvm_options={j}, classpath={cp}'
.format(i=self._identity, f=fingerprint, j=jvm_options, cp=classpath))
self.daemon_spawn(post_fork_child_opts=post_fork_child_opts)
# Wait for and write the port information in the parent so we can bail on exception/timeout.
self.await_pid(self._connect_timeout)
self.write_socket(self._await_socket(self._connect_timeout))
logger.debug('Spawned nailgun server {i} with fingerprint={f}, pid={pid} port={port}'
.format(i=self._identity, f=fingerprint, pid=self.pid, port=self.socket))
client = self._create_ngclient(self.socket, stdout, stderr)
self.ensure_connectable(client)
return client
def post_fork_child(self, fingerprint, jvm_options, classpath, stdout, stderr):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
java = SubprocessExecutor(self._distribution)
subproc = java.spawn(classpath=classpath,
main='com.martiansoftware.nailgun.NGServer',
jvm_options=jvm_options,
args=[':0'],
stdin=safe_open('/dev/null', 'r'),
stdout=safe_open(self._ng_stdout, 'w'),
stderr=safe_open(self._ng_stderr, 'w'),
close_fds=True)
self.write_pid(subproc.pid)
| apache-2.0 | 5,708,061,587,585,094,000 | -8,807,591,198,074,997,000 | 40.942238 | 100 | 0.651231 | false |
advancedplotting/aplot | python/plotserv/api_annotations.py | 1 | 8009 | # Copyright (c) 2014-2015, Heliosphere Research LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Handles VIs in "api_annotations".
"""
import numpy as np
from matplotlib import pyplot as plt
from .core import resource
from .terminals import remove_none
from . import filters
from . import errors
@resource('text')
def text(ctx, a):
""" Display text on the plot """
plotid = a.plotid()
x = a.float('x')
y = a.float('y')
s = a.string('s')
relative = a.bool('coordinates')
textprops = a.text()
display = a.display()
ctx.set(plotid)
ax = plt.gca()
# None-finite values here mean we skip the plot
if x is None or y is None:
return
k = textprops._k()
k.update(display._k())
k['clip_on'] = True
if relative:
k['transform'] = ax.transAxes
remove_none(k)
plt.text(x, y, s, **k)
@resource('hline')
def hline(ctx, a):
""" Plot a horizontal line """
plotid = a.plotid()
y = a.float('y')
xmin = a.float('xmin')
xmax = a.float('xmax')
line = a.line()
display = a.display()
ctx.set(plotid)
ctx.fail_if_polar()
# Non-finite value provided
if y is None:
return
k = { 'xmin': xmin,
'xmax': xmax,
'linewidth': line.width,
'linestyle': line.style,
'color': line.color if line.color is not None else 'k', }
k.update(display._k())
remove_none(k)
plt.axhline(y, **k)
@resource('vline')
def vline(ctx, a):
""" Plot a vertical line """
plotid = a.plotid()
x = a.float('x')
ymin = a.float('ymin')
ymax = a.float('ymax')
line = a.line()
display = a.display()
ctx.set(plotid)
ctx.fail_if_polar()
# Non-finite value provided
if x is None:
return
k = { 'ymin': ymin,
'ymax': ymax,
'linewidth': line.width,
'linestyle': line.style,
'color': line.color if line.color is not None else 'k', }
k.update(display._k())
remove_none(k)
plt.axvline(x, **k)
@resource('colorbar')
def colorbar(ctx, a):
""" Display a colorbar """
plotid = a.plotid()
label = a.string('label')
ticks = a.dbl_1d('ticks')
ticklabels = a.string_1d('ticklabels')
ctx.set(plotid)
# If no colormapped object has been plotted, MPL complains.
# We permit this, and simply don't add the colorbar.
if ctx.mappable is None:
return
c = plt.colorbar(ctx.mappable)
# Don't bother setting an empty label
if len(label) > 0:
c.set_label(label)
# Both specified
if len(ticks) > 0 and len(ticklabels) > 0:
ticks, ticklabels = filters.filter_1d(ticks, ticklabels)
c.set_ticks(ticks)
c.set_ticklabels(ticklabels)
# Just ticks specified
elif len(ticks) > 0:
ticks = ticks[np.isfinite(ticks)]
c.set_ticks(ticks)
# Just ticklabels specified
else:
# Providing zero-length "ticks" array invokes auto-ticking, in which
# case any ticklabels are ignored.
pass
@resource('legend')
def legend(ctx, a):
""" Represents Legend.vi.
Note that there is no Positions enum on the Python side; the MPL
values are hard-coded into the LabView control.
"""
POSITIONS = { 0: 0,
1: 1,
2: 9,
3: 2,
4: 6,
5: 3,
6: 8,
7: 4,
8: 7,
9: 10 }
plotid = a.plotid()
position = a.enum('position', POSITIONS)
ctx.set(plotid)
k = {'loc': position, 'fontsize': 'medium'}
remove_none(k)
if len(ctx.legend_entries) > 0:
objects, labels = zip(*ctx.legend_entries)
plt.legend(objects, labels, **k)
@resource('label')
def label(ctx, a):
""" Title, X axis and Y axis labels. """
LOCATIONS = {0: 'title', 1: 'xlabel', 2: 'ylabel'}
plotid = a.plotid()
location = a.enum('kind', LOCATIONS)
label = a.string('label')
text = a.text()
ctx.set(plotid)
k = text._k()
if location == 'title':
plt.title(label, **k)
elif location == 'xlabel':
plt.xlabel(label, **k)
elif location == 'ylabel':
ctx.fail_if_polar()
plt.ylabel(label, **k)
else:
pass
@resource('circle')
def circle(ctx, a):
""" Draw a circle on a rectangular plot """
plotid = a.plotid()
x = a.float('x')
y = a.float('y')
radius = a.float('radius')
color = a.color('color')
line = a.line()
display = a.display()
f = ctx.set(plotid)
ctx.fail_if_polar()
ctx.fail_if_log_symlog()
# Like Text.vi, if any critical input is Nan we do nothing
if x is None or y is None or radius is None:
return
# Catch this before MPL complains
if radius <= 0:
return
k = { 'edgecolor': line.color,
'linestyle': line.style,
'linewidth': line.width,
'facecolor': color if color is not None else '#bbbbbb', }
k.update(display._k())
remove_none(k)
c = plt.Circle((x,y), radius, **k)
f.gca().add_artist(c)
@resource('rectangle')
def rectangle(ctx, a):
""" Draw a rectangle """
plotid = a.plotid()
x = a.float('x')
y = a.float('y')
width = a.float('width')
height = a.float('height')
color = a.color('color')
line = a.line()
display = a.display()
f = ctx.set(plotid)
ctx.fail_if_symlog()
# Like Text.vi, if any critical input is Nan we do nothing
if x is None or y is None or width is None or height is None:
return
if width == 0 or height == 0:
return
k = { 'edgecolor': line.color,
'linestyle': line.style,
'linewidth': line.width,
'facecolor': color if color is not None else '#bbbbbb', }
k.update(display._k())
remove_none(k)
r = plt.Rectangle((x,y), width, height, **k)
f.gca().add_artist(r) | bsd-3-clause | 8,567,149,656,905,378,000 | 2,346,926,114,397,437,000 | 25.611296 | 77 | 0.558122 | false |
waseem18/oh-mainline | vendor/packages/gdata/src/gdata/blogger/data.py | 61 | 4551 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for parsing and generating XML for the Blogger API."""
__author__ = 'j.s@google.com (Jeff Scudder)'
import re
import urlparse
import atom.core
import gdata.data
LABEL_SCHEME = 'http://www.blogger.com/atom/ns#'
THR_TEMPLATE = '{http://purl.org/syndication/thread/1.0}%s'
BLOG_NAME_PATTERN = re.compile('(http://)(\w*)')
BLOG_ID_PATTERN = re.compile('(tag:blogger.com,1999:blog-)(\w*)')
BLOG_ID2_PATTERN = re.compile('tag:blogger.com,1999:user-(\d+)\.blog-(\d+)')
POST_ID_PATTERN = re.compile(
'(tag:blogger.com,1999:blog-)(\w*)(.post-)(\w*)')
PAGE_ID_PATTERN = re.compile(
'(tag:blogger.com,1999:blog-)(\w*)(.page-)(\w*)')
COMMENT_ID_PATTERN = re.compile('.*-(\w*)$')
class BloggerEntry(gdata.data.GDEntry):
"""Adds convenience methods inherited by all Blogger entries."""
def get_blog_id(self):
"""Extracts the Blogger id of this blog.
This method is useful when contructing URLs by hand. The blog id is
often used in blogger operation URLs. This should not be confused with
the id member of a BloggerBlog. The id element is the Atom id XML element.
The blog id which this method returns is a part of the Atom id.
Returns:
The blog's unique id as a string.
"""
if self.id.text:
match = BLOG_ID_PATTERN.match(self.id.text)
if match:
return match.group(2)
else:
return BLOG_ID2_PATTERN.match(self.id.text).group(2)
return None
GetBlogId = get_blog_id
def get_blog_name(self):
"""Finds the name of this blog as used in the 'alternate' URL.
An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
entry representing the above example, this method would return 'blogName'.
Returns:
The blog's URL name component as a string.
"""
for link in self.link:
if link.rel == 'alternate':
return urlparse.urlparse(link.href)[1].split(".", 1)[0]
return None
GetBlogName = get_blog_name
class Blog(BloggerEntry):
"""Represents a blog which belongs to the user."""
class BlogFeed(gdata.data.GDFeed):
entry = [Blog]
class BlogPost(BloggerEntry):
"""Represents a single post on a blog."""
def add_label(self, label):
"""Adds a label to the blog post.
The label is represented by an Atom category element, so this method
is shorthand for appending a new atom.Category object.
Args:
label: str
"""
self.category.append(atom.data.Category(scheme=LABEL_SCHEME, term=label))
AddLabel = add_label
def get_post_id(self):
"""Extracts the postID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return POST_ID_PATTERN.match(self.id.text).group(4)
return None
GetPostId = get_post_id
class BlogPostFeed(gdata.data.GDFeed):
entry = [BlogPost]
class BlogPage(BloggerEntry):
"""Represents a single page on a blog."""
def get_page_id(self):
"""Extracts the pageID string from entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return PAGE_ID_PATTERN.match(self.id.text).group(4)
return None
GetPageId = get_page_id
class BlogPageFeed(gdata.data.GDFeed):
entry = [BlogPage]
class InReplyTo(atom.core.XmlElement):
_qname = THR_TEMPLATE % 'in-reply-to'
href = 'href'
ref = 'ref'
source = 'source'
type = 'type'
class Comment(BloggerEntry):
"""Blog post comment entry in a feed listing comments on a post or blog."""
in_reply_to = InReplyTo
def get_comment_id(self):
"""Extracts the commentID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return COMMENT_ID_PATTERN.match(self.id.text).group(1)
return None
GetCommentId = get_comment_id
class CommentFeed(gdata.data.GDFeed):
entry = [Comment]
| agpl-3.0 | -1,700,790,752,466,145,300 | 7,638,456,925,348,548,000 | 26.089286 | 78 | 0.682268 | false |
henriquegemignani/randovania | randovania/gui/main_window.py | 1 | 25113 | import functools
import json
import logging
import os
import platform
import subprocess
from functools import partial
from pathlib import Path
from typing import Optional, List
from PySide2 import QtCore, QtWidgets, QtGui
from PySide2.QtCore import QUrl, Signal, Qt
from qasync import asyncSlot
from randovania import VERSION
from randovania.game_description.resources.trick_resource_info import TrickResourceInfo
from randovania.games.game import RandovaniaGame
from randovania.gui.generated.main_window_ui import Ui_MainWindow
from randovania.gui.lib import common_qt_lib, async_dialog, theme
from randovania.gui.lib.trick_lib import used_tricks, difficulties_for_trick
from randovania.gui.lib.window_manager import WindowManager
from randovania.interface_common import update_checker
from randovania.interface_common.enum_lib import iterate_enum
from randovania.interface_common.options import Options
from randovania.interface_common.preset_manager import PresetManager
from randovania.layout.layout_description import LayoutDescription
from randovania.layout.trick_level import LayoutTrickLevel
from randovania.resolver import debug
_DISABLE_VALIDATION_WARNING = """
<html><head/><body>
<p>While it sometimes throws errors, the validation is what guarantees that your seed is completable.<br/>
Do <span style=" font-weight:600;">not</span> disable if you're uncomfortable with possibly unbeatable seeds.
</p><p align="center">Are you sure you want to disable validation?</p></body></html>
"""
def _update_label_on_show(label: QtWidgets.QLabel, text: str):
def showEvent(_):
if label._delayed_text is not None:
label.setText(label._delayed_text)
label._delayed_text = None
label._delayed_text = text
label.showEvent = showEvent
class MainWindow(WindowManager, Ui_MainWindow):
newer_version_signal = Signal(str, str)
options_changed_signal = Signal()
_is_preview_mode: bool = False
menu_new_version: Optional[QtWidgets.QAction] = None
_current_version_url: Optional[str] = None
_options: Options
_data_visualizer: Optional[QtWidgets.QWidget] = None
_map_tracker: QtWidgets.QWidget
_preset_manager: PresetManager
GameDetailsSignal = Signal(LayoutDescription)
InitPostShowSignal = Signal()
@property
def _tab_widget(self):
return self.main_tab_widget
@property
def preset_manager(self) -> PresetManager:
return self._preset_manager
@property
def main_window(self) -> QtWidgets.QMainWindow:
return self
@property
def is_preview_mode(self) -> bool:
return self._is_preview_mode
def __init__(self, options: Options, preset_manager: PresetManager,
network_client, preview: bool):
super().__init__()
self.setupUi(self)
self.setWindowTitle("Randovania {}".format(VERSION))
self._is_preview_mode = preview
self.setAcceptDrops(True)
common_qt_lib.set_default_window_icon(self)
# Remove all hardcoded link color
about_document: QtGui.QTextDocument = self.about_text_browser.document()
about_document.setHtml(about_document.toHtml().replace("color:#0000ff;", ""))
self.browse_racetime_label.setText(self.browse_racetime_label.text().replace("color:#0000ff;", ""))
self.intro_label.setText(self.intro_label.text().format(version=VERSION))
self._preset_manager = preset_manager
self.network_client = network_client
if preview:
debug.set_level(2)
# Signals
self.newer_version_signal.connect(self.display_new_version)
self.options_changed_signal.connect(self.on_options_changed)
self.GameDetailsSignal.connect(self._open_game_details)
self.InitPostShowSignal.connect(self.initialize_post_show)
self.intro_play_now_button.clicked.connect(lambda: self.welcome_tab_widget.setCurrentWidget(self.tab_play))
self.open_faq_button.clicked.connect(self._open_faq)
self.open_database_viewer_button.clicked.connect(partial(self._open_data_visualizer_for_game,
RandovaniaGame.PRIME2))
for game in RandovaniaGame:
self.hint_item_names_game_combo.addItem(game.long_name, game)
self.hint_location_game_combo.addItem(game.long_name, game)
self.hint_item_names_game_combo.currentIndexChanged.connect(self._update_hints_text)
self.hint_location_game_combo.currentIndexChanged.connect(self._update_hint_locations)
self.import_permalink_button.clicked.connect(self._import_permalink)
self.import_game_file_button.clicked.connect(self._import_spoiler_log)
self.browse_racetime_button.clicked.connect(self._browse_racetime)
self.create_new_seed_button.clicked.connect(
lambda: self.welcome_tab_widget.setCurrentWidget(self.tab_create_seed))
# Menu Bar
for action, game in ((self.menu_action_prime_1_data_visualizer, RandovaniaGame.PRIME1),
(self.menu_action_prime_2_data_visualizer, RandovaniaGame.PRIME2),
(self.menu_action_prime_3_data_visualizer, RandovaniaGame.PRIME3)):
action.triggered.connect(partial(self._open_data_visualizer_for_game, game))
for action, game in ((self.menu_action_edit_prime_1, RandovaniaGame.PRIME1),
(self.menu_action_edit_prime_2, RandovaniaGame.PRIME2),
(self.menu_action_edit_prime_3, RandovaniaGame.PRIME3)):
action.triggered.connect(partial(self._open_data_editor_for_game, game))
self.menu_action_item_tracker.triggered.connect(self._open_item_tracker)
self.menu_action_map_tracker.triggered.connect(self._on_menu_action_map_tracker)
self.menu_action_edit_existing_database.triggered.connect(self._open_data_editor_prompt)
self.menu_action_validate_seed_after.triggered.connect(self._on_validate_seed_change)
self.menu_action_timeout_generation_after_a_time_limit.triggered.connect(self._on_generate_time_limit_change)
self.menu_action_dark_mode.triggered.connect(self._on_menu_action_dark_mode)
self.menu_action_open_auto_tracker.triggered.connect(self._open_auto_tracker)
self.menu_action_previously_generated_games.triggered.connect(self._on_menu_action_previously_generated_games)
self.menu_action_layout_editor.triggered.connect(self._on_menu_action_layout_editor)
self.menu_prime_1_trick_details.aboutToShow.connect(self._create_trick_details_prime_1)
self.menu_prime_2_trick_details.aboutToShow.connect(self._create_trick_details_prime_2)
self.menu_prime_3_trick_details.aboutToShow.connect(self._create_trick_details_prime_3)
# Setting this event only now, so all options changed trigger only once
options.on_options_changed = self.options_changed_signal.emit
self._options = options
self.main_tab_widget.setCurrentIndex(0)
def closeEvent(self, event):
self.generate_seed_tab.stop_background_process()
super().closeEvent(event)
def dragEnterEvent(self, event: QtGui.QDragEnterEvent):
from randovania.layout.preset_migration import VersionedPreset
valid_extensions = [
LayoutDescription.file_extension(),
VersionedPreset.file_extension(),
]
valid_extensions_with_dot = {
f".{extension}"
for extension in valid_extensions
}
for url in event.mimeData().urls():
ext = os.path.splitext(url.toLocalFile())[1]
if ext in valid_extensions_with_dot:
event.acceptProposedAction()
return
def dropEvent(self, event: QtGui.QDropEvent):
from randovania.layout.preset_migration import VersionedPreset
for url in event.mimeData().urls():
path = Path(url.toLocalFile())
if path.suffix == f".{LayoutDescription.file_extension()}":
self.open_game_details(LayoutDescription.from_file(path))
return
elif path.suffix == f".{VersionedPreset.file_extension()}":
self.main_tab_widget.setCurrentWidget(self.welcome_tab)
self.welcome_tab_widget.setCurrentWidget(self.tab_create_seed)
self.generate_seed_tab.import_preset_file(path)
return
def showEvent(self, event: QtGui.QShowEvent):
self.InitPostShowSignal.emit()
# Delayed Initialization
@asyncSlot()
async def initialize_post_show(self):
self.InitPostShowSignal.disconnect(self.initialize_post_show)
logging.info("Will initialize things in post show")
await self._initialize_post_show_body()
logging.info("Finished initializing post show")
async def _initialize_post_show_body(self):
logging.info("Will load OnlineInteractions")
from randovania.gui.main_online_interaction import OnlineInteractions
logging.info("Creating OnlineInteractions...")
self.online_interactions = OnlineInteractions(self, self.preset_manager, self.network_client, self,
self._options)
logging.info("Will load GenerateSeedTab")
from randovania.gui.generate_seed_tab import GenerateSeedTab
logging.info("Creating GenerateSeedTab...")
self.generate_seed_tab = GenerateSeedTab(self, self, self._options)
logging.info("Running GenerateSeedTab.setup_ui")
self.generate_seed_tab.setup_ui()
# Update hints text
logging.info("Will _update_hints_text")
self._update_hints_text()
logging.info("Will hide hint locations combo")
self.hint_location_game_combo.setVisible(False)
self.hint_location_game_combo.setCurrentIndex(1)
logging.info("Will update for modified options")
with self._options:
self.on_options_changed()
def _update_hints_text(self):
from randovania.gui.lib import hints_text
hints_text.update_hints_text(self.hint_item_names_game_combo.currentData(), self.hint_item_names_tree_widget)
def _update_hint_locations(self):
from randovania.gui.lib import hints_text
hints_text.update_hint_locations(self.hint_location_game_combo.currentData(), self.hint_tree_widget)
# Generate Seed
def _open_faq(self):
self.main_tab_widget.setCurrentWidget(self.help_tab)
self.help_tab_widget.setCurrentWidget(self.tab_faq)
async def generate_seed_from_permalink(self, permalink):
from randovania.interface_common.status_update_lib import ProgressUpdateCallable
from randovania.gui.dialog.background_process_dialog import BackgroundProcessDialog
def work(progress_update: ProgressUpdateCallable):
from randovania.interface_common import simplified_patcher
layout = simplified_patcher.generate_layout(progress_update=progress_update,
permalink=permalink,
options=self._options)
progress_update(f"Success! (Seed hash: {layout.shareable_hash})", 1)
return layout
new_layout = await BackgroundProcessDialog.open_for_background_task(work, "Creating a game...")
self.open_game_details(new_layout)
@asyncSlot()
async def _import_permalink(self):
from randovania.gui.dialog.permalink_dialog import PermalinkDialog
dialog = PermalinkDialog()
result = await async_dialog.execute_dialog(dialog)
if result == QtWidgets.QDialog.Accepted:
permalink = dialog.get_permalink_from_field()
await self.generate_seed_from_permalink(permalink)
def _import_spoiler_log(self):
json_path = common_qt_lib.prompt_user_for_input_game_log(self)
if json_path is not None:
layout = LayoutDescription.from_file(json_path)
self.open_game_details(layout)
@asyncSlot()
async def _browse_racetime(self):
from randovania.gui.dialog.racetime_browser_dialog import RacetimeBrowserDialog
dialog = RacetimeBrowserDialog()
if not await dialog.refresh():
return
result = await async_dialog.execute_dialog(dialog)
if result == QtWidgets.QDialog.Accepted:
await self.generate_seed_from_permalink(dialog.permalink)
def open_game_details(self, layout: LayoutDescription):
self.GameDetailsSignal.emit(layout)
def _open_game_details(self, layout: LayoutDescription):
from randovania.gui.seed_details_window import SeedDetailsWindow
details_window = SeedDetailsWindow(self, self._options)
details_window.update_layout_description(layout)
details_window.show()
self.track_window(details_window)
# Releases info
async def request_new_data(self):
from randovania.interface_common import github_releases_data
await self._on_releases_data(await github_releases_data.get_releases())
async def _on_releases_data(self, releases: Optional[List[dict]]):
import markdown
current_version = update_checker.strict_current_version()
last_changelog = self._options.last_changelog_displayed
all_change_logs, new_change_logs, version_to_display = update_checker.versions_to_display_for_releases(
current_version, last_changelog, releases)
if version_to_display is not None:
self.display_new_version(version_to_display)
if all_change_logs:
changelog_tab = QtWidgets.QWidget()
changelog_tab.setObjectName("changelog_tab")
changelog_tab_layout = QtWidgets.QVBoxLayout(changelog_tab)
changelog_tab_layout.setContentsMargins(0, 0, 0, 0)
changelog_tab_layout.setObjectName("changelog_tab_layout")
changelog_scroll_area = QtWidgets.QScrollArea(changelog_tab)
changelog_scroll_area.setWidgetResizable(True)
changelog_scroll_area.setObjectName("changelog_scroll_area")
changelog_scroll_contents = QtWidgets.QWidget()
changelog_scroll_contents.setGeometry(QtCore.QRect(0, 0, 489, 337))
changelog_scroll_contents.setObjectName("changelog_scroll_contents")
changelog_scroll_layout = QtWidgets.QVBoxLayout(changelog_scroll_contents)
changelog_scroll_layout.setObjectName("changelog_scroll_layout")
for entry in all_change_logs:
changelog_label = QtWidgets.QLabel(changelog_scroll_contents)
_update_label_on_show(changelog_label, markdown.markdown(entry))
changelog_label.setObjectName("changelog_label")
changelog_label.setWordWrap(True)
changelog_scroll_layout.addWidget(changelog_label)
changelog_scroll_area.setWidget(changelog_scroll_contents)
changelog_tab_layout.addWidget(changelog_scroll_area)
self.help_tab_widget.addTab(changelog_tab, "Change Log")
if new_change_logs:
await async_dialog.message_box(self, QtWidgets.QMessageBox.Information,
"What's new", markdown.markdown("\n".join(new_change_logs)))
with self._options as options:
options.last_changelog_displayed = current_version
def display_new_version(self, version: update_checker.VersionDescription):
if self.menu_new_version is None:
self.menu_new_version = QtWidgets.QAction("", self)
self.menu_new_version.triggered.connect(self.open_version_link)
self.menu_bar.addAction(self.menu_new_version)
self.menu_new_version.setText("New version available: {}".format(version.tag_name))
self._current_version_url = version.html_url
def open_version_link(self):
if self._current_version_url is None:
raise RuntimeError("Called open_version_link, but _current_version_url is None")
QtGui.QDesktopServices.openUrl(QUrl(self._current_version_url))
# Options
def on_options_changed(self):
self.menu_action_validate_seed_after.setChecked(self._options.advanced_validate_seed_after)
self.menu_action_timeout_generation_after_a_time_limit.setChecked(
self._options.advanced_timeout_during_generation)
self.menu_action_dark_mode.setChecked(self._options.dark_mode)
self.generate_seed_tab.on_options_changed(self._options)
theme.set_dark_theme(self._options.dark_mode)
# Menu Actions
def _open_data_visualizer_for_game(self, game: RandovaniaGame):
self.open_data_visualizer_at(None, None, game)
def open_data_visualizer_at(self,
world_name: Optional[str],
area_name: Optional[str],
game: RandovaniaGame = RandovaniaGame.PRIME2,
):
from randovania.gui.data_editor import DataEditorWindow
data_visualizer = DataEditorWindow.open_internal_data(game, False)
self._data_visualizer = data_visualizer
if world_name is not None:
data_visualizer.focus_on_world(world_name)
if area_name is not None:
data_visualizer.focus_on_area(area_name)
self._data_visualizer.show()
def _open_data_editor_for_game(self, game: RandovaniaGame):
from randovania.gui.data_editor import DataEditorWindow
self._data_editor = DataEditorWindow.open_internal_data(game, True)
self._data_editor.show()
def _open_data_editor_prompt(self):
from randovania.gui.data_editor import DataEditorWindow
database_path = common_qt_lib.prompt_user_for_database_file(self)
if database_path is None:
return
with database_path.open("r") as database_file:
self._data_editor = DataEditorWindow(json.load(database_file), database_path, False, True)
self._data_editor.show()
@asyncSlot()
async def _on_menu_action_map_tracker(self):
dialog = QtWidgets.QInputDialog(self)
dialog.setWindowTitle("Map Tracker")
dialog.setLabelText("Select preset used for the tracker.")
dialog.setComboBoxItems([preset.name for preset in self._preset_manager.all_presets])
dialog.setTextValue(self._options.selected_preset_name)
result = await async_dialog.execute_dialog(dialog)
if result == QtWidgets.QDialog.Accepted:
preset = self._preset_manager.preset_for_name(dialog.textValue())
self.open_map_tracker(preset.get_preset().configuration)
def open_map_tracker(self, configuration: "EchoesConfiguration"):
from randovania.gui.tracker_window import TrackerWindow, InvalidLayoutForTracker
try:
self._map_tracker = TrackerWindow(self._options.tracker_files_path, configuration)
except InvalidLayoutForTracker as e:
QtWidgets.QMessageBox.critical(
self,
"Unsupported configuration for Tracker",
str(e)
)
return
self._map_tracker.show()
def _open_item_tracker(self):
# Importing this at root level seems to crash linux tests :(
from PySide2.QtWebEngineWidgets import QWebEngineView
tracker_window = QtWidgets.QMainWindow()
tracker_window.setWindowTitle("Item Tracker")
tracker_window.resize(370, 380)
web_view = QWebEngineView(tracker_window)
tracker_window.setCentralWidget(web_view)
self.web_view = web_view
def update_window_icon():
tracker_window.setWindowIcon(web_view.icon())
web_view.iconChanged.connect(update_window_icon)
web_view.load(QUrl("https://spaghettitoastbook.github.io/echoes/tracker/"))
tracker_window.show()
self._item_tracker_window = tracker_window
# Difficulties stuff
def _exec_trick_details(self, popup: "TrickDetailsPopup"):
self._trick_details_popup = popup
self._trick_details_popup.setWindowModality(Qt.WindowModal)
self._trick_details_popup.open()
def _open_trick_details_popup(self, game, trick: TrickResourceInfo, level: LayoutTrickLevel):
from randovania.gui.dialog.trick_details_popup import TrickDetailsPopup
self._exec_trick_details(TrickDetailsPopup(self, self, game, trick, level))
def _create_trick_details_prime_1(self):
self.menu_prime_1_trick_details.aboutToShow.disconnect(self._create_trick_details_prime_1)
self._setup_difficulties_menu(RandovaniaGame.PRIME1, self.menu_prime_1_trick_details)
def _create_trick_details_prime_2(self):
self.menu_prime_2_trick_details.aboutToShow.disconnect(self._create_trick_details_prime_2)
self._setup_difficulties_menu(RandovaniaGame.PRIME2, self.menu_prime_2_trick_details)
def _create_trick_details_prime_3(self):
self.menu_prime_3_trick_details.aboutToShow.disconnect(self._create_trick_details_prime_3)
self._setup_difficulties_menu(RandovaniaGame.PRIME3, self.menu_prime_3_trick_details)
def _setup_difficulties_menu(self, game: RandovaniaGame, menu: QtWidgets.QMenu):
from randovania.game_description import default_database
game = default_database.game_description_for(game)
tricks_in_use = used_tricks(game)
menu.clear()
for trick in sorted(game.resource_database.trick, key=lambda _trick: _trick.long_name):
if trick not in tricks_in_use:
continue
trick_menu = QtWidgets.QMenu(self)
trick_menu.setTitle(trick.long_name)
menu.addAction(trick_menu.menuAction())
used_difficulties = difficulties_for_trick(game, trick)
for i, trick_level in enumerate(iterate_enum(LayoutTrickLevel)):
if trick_level in used_difficulties:
difficulty_action = QtWidgets.QAction(self)
difficulty_action.setText(trick_level.long_name)
trick_menu.addAction(difficulty_action)
difficulty_action.triggered.connect(
functools.partial(self._open_trick_details_popup, game, trick, trick_level))
# ==========
@asyncSlot()
async def _on_validate_seed_change(self):
old_value = self._options.advanced_validate_seed_after
new_value = self.menu_action_validate_seed_after.isChecked()
if old_value and not new_value:
box = QtWidgets.QMessageBox(self)
box.setWindowTitle("Disable validation?")
box.setText(_DISABLE_VALIDATION_WARNING)
box.setIcon(QtWidgets.QMessageBox.Warning)
box.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
box.setDefaultButton(QtWidgets.QMessageBox.No)
user_response = await async_dialog.execute_dialog(box)
if user_response != QtWidgets.QMessageBox.Yes:
self.menu_action_validate_seed_after.setChecked(True)
return
with self._options as options:
options.advanced_validate_seed_after = new_value
def _on_generate_time_limit_change(self):
is_checked = self.menu_action_timeout_generation_after_a_time_limit.isChecked()
with self._options as options:
options.advanced_timeout_during_generation = is_checked
def _on_menu_action_dark_mode(self):
with self._options as options:
options.dark_mode = self.menu_action_dark_mode.isChecked()
def _open_auto_tracker(self):
from randovania.gui.auto_tracker_window import AutoTrackerWindow
self.auto_tracker_window = AutoTrackerWindow(common_qt_lib.get_game_connection(), self._options)
self.auto_tracker_window.show()
def _on_menu_action_previously_generated_games(self):
path = self._options.data_dir.joinpath("game_history")
try:
if platform.system() == "Windows":
os.startfile(path)
elif platform.system() == "Darwin":
subprocess.run(["open", path])
else:
subprocess.run(["xdg-open", path])
except OSError:
print("Exception thrown :)")
box = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Information, "Game History",
f"Previously generated games can be found at:\n{path}",
QtWidgets.QMessageBox.Ok, self)
box.setTextInteractionFlags(Qt.TextSelectableByMouse)
box.show()
def _on_menu_action_layout_editor(self):
from randovania.gui.corruption_layout_editor import CorruptionLayoutEditor
self.corruption_editor = CorruptionLayoutEditor()
self.corruption_editor.show()
| gpl-3.0 | 3,252,894,393,071,820,000 | -5,864,234,511,285,010,000 | 44.330325 | 118 | 0.669454 | false |
paran0ids0ul/infernal-twin | build/reportlab/src/reportlab/graphics/charts/doughnut.py | 28 | 15381 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/doughnut.py
# doughnut chart
__version__=''' $Id$ '''
__doc__="""Doughnut chart
Produces a circular chart like the doughnut charts produced by Excel.
Can handle multiple series (which produce concentric 'rings' in the chart).
"""
import copy
from math import sin, cos, pi
from reportlab.lib import colors
from reportlab.lib.validators import isColor, isNumber, isListOfNumbersOrNone,\
isListOfNumbers, isColorOrNone, isString,\
isListOfStringsOrNone, OneOf, SequenceOf,\
isBoolean, isListOfColors,\
isNoneOrListOfNoneOrStrings,\
isNoneOrListOfNoneOrNumbers,\
isNumberOrNone
from reportlab.lib.attrmap import *
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics.shapes import Group, Drawing, Line, Rect, Polygon, Ellipse, \
Wedge, String, SolidShape, UserNode, STATE_DEFAULTS
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder
from reportlab.graphics.charts.piecharts import AbstractPieChart, WedgeProperties, _addWedgeLabel, fixLabelOverlaps
from reportlab.graphics.charts.textlabels import Label
from reportlab.graphics.widgets.markers import Marker
from functools import reduce
class SectorProperties(WedgeProperties):
"""This holds descriptive information about the sectors in a doughnut chart.
It is not to be confused with the 'sector itself'; this just holds
a recipe for how to format one, and does not allow you to hack the
angles. It can format a genuine Sector object for you with its
format method.
"""
_attrMap = AttrMap(BASE=WedgeProperties,
)
class Doughnut(AbstractPieChart):
_attrMap = AttrMap(
x = AttrMapValue(isNumber, desc='X position of the chart within its container.'),
y = AttrMapValue(isNumber, desc='Y position of the chart within its container.'),
width = AttrMapValue(isNumber, desc='width of doughnut bounding box. Need not be same as width.'),
height = AttrMapValue(isNumber, desc='height of doughnut bounding box. Need not be same as height.'),
data = AttrMapValue(None, desc='list of numbers defining sector sizes; need not sum to 1'),
labels = AttrMapValue(isListOfStringsOrNone, desc="optional list of labels to use for each data point"),
startAngle = AttrMapValue(isNumber, desc="angle of first slice; like the compass, 0 is due North"),
direction = AttrMapValue(OneOf('clockwise', 'anticlockwise'), desc="'clockwise' or 'anticlockwise'"),
slices = AttrMapValue(None, desc="collection of sector descriptor objects"),
simpleLabels = AttrMapValue(isBoolean, desc="If true(default) use String not super duper WedgeLabel"),
# advanced usage
checkLabelOverlap = AttrMapValue(isBoolean, desc="If true check and attempt to fix\n standard label overlaps(default off)",advancedUsage=1),
sideLabels = AttrMapValue(isBoolean, desc="If true attempt to make chart with labels along side and pointers", advancedUsage=1)
)
def __init__(self):
self.x = 0
self.y = 0
self.width = 100
self.height = 100
self.data = [1,1]
self.labels = None # or list of strings
self.startAngle = 90
self.direction = "clockwise"
self.simpleLabels = 1
self.checkLabelOverlap = 0
self.sideLabels = 0
self.slices = TypedPropertyCollection(SectorProperties)
self.slices[0].fillColor = colors.darkcyan
self.slices[1].fillColor = colors.blueviolet
self.slices[2].fillColor = colors.blue
self.slices[3].fillColor = colors.cyan
self.slices[4].fillColor = colors.pink
self.slices[5].fillColor = colors.magenta
self.slices[6].fillColor = colors.yellow
def demo(self):
d = Drawing(200, 100)
dn = Doughnut()
dn.x = 50
dn.y = 10
dn.width = 100
dn.height = 80
dn.data = [10,20,30,40,50,60]
dn.labels = ['a','b','c','d','e','f']
dn.slices.strokeWidth=0.5
dn.slices[3].popout = 10
dn.slices[3].strokeWidth = 2
dn.slices[3].strokeDashArray = [2,2]
dn.slices[3].labelRadius = 1.75
dn.slices[3].fontColor = colors.red
dn.slices[0].fillColor = colors.darkcyan
dn.slices[1].fillColor = colors.blueviolet
dn.slices[2].fillColor = colors.blue
dn.slices[3].fillColor = colors.cyan
dn.slices[4].fillColor = colors.aquamarine
dn.slices[5].fillColor = colors.cadetblue
dn.slices[6].fillColor = colors.lightcoral
d.add(dn)
return d
def normalizeData(self, data=None):
from operator import add
sum = float(reduce(add,data,0))
return abs(sum)>=1e-8 and list(map(lambda x,f=360./sum: f*x, data)) or len(data)*[0]
def makeSectors(self):
# normalize slice data
if isinstance(self.data,(list,tuple)) and isinstance(self.data[0],(list,tuple)):
#it's a nested list, more than one sequence
normData = []
n = []
for l in self.data:
t = self.normalizeData(l)
normData.append(t)
n.append(len(t))
self._seriesCount = max(n)
else:
normData = self.normalizeData(self.data)
n = len(normData)
self._seriesCount = n
#labels
checkLabelOverlap = self.checkLabelOverlap
L = []
L_add = L.append
if self.labels is None:
labels = []
if not isinstance(n,(list,tuple)):
labels = [''] * n
else:
for m in n:
labels = list(labels) + [''] * m
else:
labels = self.labels
#there's no point in raising errors for less than enough labels if
#we silently create all for the extreme case of no labels.
if not isinstance(n,(list,tuple)):
i = n-len(labels)
if i>0:
labels = list(labels) + [''] * i
else:
tlab = 0
for m in n:
tlab += m
i = tlab-len(labels)
if i>0:
labels = list(labels) + [''] * i
xradius = self.width/2.0
yradius = self.height/2.0
centerx = self.x + xradius
centery = self.y + yradius
if self.direction == "anticlockwise":
whichWay = 1
else:
whichWay = -1
g = Group()
startAngle = self.startAngle #% 360
styleCount = len(self.slices)
if isinstance(self.data[0],(list,tuple)):
#multi-series doughnut
ndata = len(self.data)
yir = (yradius/2.5)/ndata
xir = (xradius/2.5)/ndata
ydr = (yradius-yir)/ndata
xdr = (xradius-xir)/ndata
for sn,series in enumerate(normData):
for i,angle in enumerate(series):
endAngle = (startAngle + (angle * whichWay)) #% 360
if abs(startAngle-endAngle)<1e-5:
startAngle = endAngle
continue
if startAngle < endAngle:
a1 = startAngle
a2 = endAngle
else:
a1 = endAngle
a2 = startAngle
startAngle = endAngle
#if we didn't use %stylecount here we'd end up with the later sectors
#all having the default style
sectorStyle = self.slices[i%styleCount]
# is it a popout?
cx, cy = centerx, centery
if sectorStyle.popout != 0:
# pop out the sector
averageAngle = (a1+a2)/2.0
aveAngleRadians = averageAngle * pi/180.0
popdistance = sectorStyle.popout
cx = centerx + popdistance * cos(aveAngleRadians)
cy = centery + popdistance * sin(aveAngleRadians)
yr1 = yir+sn*ydr
yr = yr1 + ydr
xr1 = xir+sn*xdr
xr = xr1 + xdr
if isinstance(n,(list,tuple)):
theSector = Wedge(cx, cy, xr, a1, a2, yradius=yr, radius1=xr1, yradius1=yr1)
else:
theSector = Wedge(cx, cy, xr, a1, a2, yradius=yr, radius1=xr1, yradius1=yr1, annular=True)
theSector.fillColor = sectorStyle.fillColor
theSector.strokeColor = sectorStyle.strokeColor
theSector.strokeWidth = sectorStyle.strokeWidth
theSector.strokeDashArray = sectorStyle.strokeDashArray
g.add(theSector)
if sn == 0:
text = self.getSeriesName(i,'')
if text:
averageAngle = (a1+a2)/2.0
aveAngleRadians = averageAngle*pi/180.0
labelRadius = sectorStyle.labelRadius
rx = xradius*labelRadius
ry = yradius*labelRadius
labelX = centerx + (0.5 * self.width * cos(aveAngleRadians) * labelRadius)
labelY = centery + (0.5 * self.height * sin(aveAngleRadians) * labelRadius)
l = _addWedgeLabel(self,text,averageAngle,labelX,labelY,sectorStyle)
if checkLabelOverlap:
l._origdata = { 'x': labelX, 'y':labelY, 'angle': averageAngle,
'rx': rx, 'ry':ry, 'cx':cx, 'cy':cy,
'bounds': l.getBounds(),
}
L_add(l)
else:
#single series doughnut
yir = yradius/2.5
xir = xradius/2.5
for i,angle in enumerate(normData):
endAngle = (startAngle + (angle * whichWay)) #% 360
if abs(startAngle-endAngle)<1e-5:
startAngle = endAngle
continue
if startAngle < endAngle:
a1 = startAngle
a2 = endAngle
else:
a1 = endAngle
a2 = startAngle
startAngle = endAngle
#if we didn't use %stylecount here we'd end up with the later sectors
#all having the default style
sectorStyle = self.slices[i%styleCount]
# is it a popout?
cx, cy = centerx, centery
if sectorStyle.popout != 0:
# pop out the sector
averageAngle = (a1+a2)/2.0
aveAngleRadians = averageAngle * pi/180.0
popdistance = sectorStyle.popout
cx = centerx + popdistance * cos(aveAngleRadians)
cy = centery + popdistance * sin(aveAngleRadians)
if n > 1:
theSector = Wedge(cx, cy, xradius, a1, a2, yradius=yradius, radius1=xir, yradius1=yir)
elif n==1:
theSector = Wedge(cx, cy, xradius, a1, a2, yradius=yradius, radius1=xir, yradius1=yir, annular=True)
theSector.fillColor = sectorStyle.fillColor
theSector.strokeColor = sectorStyle.strokeColor
theSector.strokeWidth = sectorStyle.strokeWidth
theSector.strokeDashArray = sectorStyle.strokeDashArray
g.add(theSector)
# now draw a label
if labels[i] != "":
averageAngle = (a1+a2)/2.0
aveAngleRadians = averageAngle*pi/180.0
labelRadius = sectorStyle.labelRadius
labelX = centerx + (0.5 * self.width * cos(aveAngleRadians) * labelRadius)
labelY = centery + (0.5 * self.height * sin(aveAngleRadians) * labelRadius)
rx = xradius*labelRadius
ry = yradius*labelRadius
l = _addWedgeLabel(self,labels[i],averageAngle,labelX,labelY,sectorStyle)
if checkLabelOverlap:
l._origdata = { 'x': labelX, 'y':labelY, 'angle': averageAngle,
'rx': rx, 'ry':ry, 'cx':cx, 'cy':cy,
'bounds': l.getBounds(),
}
L_add(l)
if checkLabelOverlap and L:
fixLabelOverlaps(L)
for l in L: g.add(l)
return g
def draw(self):
g = Group()
g.add(self.makeSectors())
return g
def sample1():
"Make up something from the individual Sectors"
d = Drawing(400, 400)
g = Group()
s1 = Wedge(centerx=200, centery=200, radius=150, startangledegrees=0, endangledegrees=120, radius1=100)
s1.fillColor=colors.red
s1.strokeColor=None
d.add(s1)
s2 = Wedge(centerx=200, centery=200, radius=150, startangledegrees=120, endangledegrees=240, radius1=100)
s2.fillColor=colors.green
s2.strokeColor=None
d.add(s2)
s3 = Wedge(centerx=200, centery=200, radius=150, startangledegrees=240, endangledegrees=260, radius1=100)
s3.fillColor=colors.blue
s3.strokeColor=None
d.add(s3)
s4 = Wedge(centerx=200, centery=200, radius=150, startangledegrees=260, endangledegrees=360, radius1=100)
s4.fillColor=colors.gray
s4.strokeColor=None
d.add(s4)
return d
def sample2():
"Make a simple demo"
d = Drawing(400, 400)
dn = Doughnut()
dn.x = 50
dn.y = 50
dn.width = 300
dn.height = 300
dn.data = [10,20,30,40,50,60]
d.add(dn)
return d
def sample3():
"Make a more complex demo"
d = Drawing(400, 400)
dn = Doughnut()
dn.x = 50
dn.y = 50
dn.width = 300
dn.height = 300
dn.data = [[10,20,30,40,50,60], [10,20,30,40]]
dn.labels = ['a','b','c','d','e','f']
d.add(dn)
return d
def sample4():
"Make a more complex demo with Label Overlap fixing"
d = Drawing(400, 400)
dn = Doughnut()
dn.x = 50
dn.y = 50
dn.width = 300
dn.height = 300
dn.data = [[10,20,30,40,50,60], [10,20,30,40]]
dn.labels = ['a','b','c','d','e','f']
dn.checkLabelOverlap = True
d.add(dn)
return d
if __name__=='__main__':
from reportlab.graphics.renderPDF import drawToFile
d = sample1()
drawToFile(d, 'doughnut1.pdf')
d = sample2()
drawToFile(d, 'doughnut2.pdf')
d = sample3()
drawToFile(d, 'doughnut3.pdf')
| gpl-3.0 | 4,742,610,950,981,630,000 | 8,310,498,147,371,672,000 | 37.4525 | 148 | 0.540862 | false |
pexip/os-kombu | kombu/utils/imports.py | 5 | 2072 | """Import related utilities."""
from __future__ import absolute_import, unicode_literals
import importlib
import sys
from kombu.five import reraise, string_t
def symbol_by_name(name, aliases={}, imp=None, package=None,
sep='.', default=None, **kwargs):
"""Get symbol by qualified name.
The name should be the full dot-separated path to the class::
modulename.ClassName
Example::
celery.concurrency.processes.TaskPool
^- class name
or using ':' to separate module and symbol::
celery.concurrency.processes:TaskPool
If `aliases` is provided, a dict containing short name/long name
mappings, the name is looked up in the aliases first.
Examples:
>>> symbol_by_name('celery.concurrency.processes.TaskPool')
<class 'celery.concurrency.processes.TaskPool'>
>>> symbol_by_name('default', {
... 'default': 'celery.concurrency.processes.TaskPool'})
<class 'celery.concurrency.processes.TaskPool'>
# Does not try to look up non-string names.
>>> from celery.concurrency.processes import TaskPool
>>> symbol_by_name(TaskPool) is TaskPool
True
"""
if imp is None:
imp = importlib.import_module
if not isinstance(name, string_t):
return name # already a class
name = aliases.get(name) or name
sep = ':' if ':' in name else sep
module_name, _, cls_name = name.rpartition(sep)
if not module_name:
cls_name, module_name = None, package if package else cls_name
try:
try:
module = imp(module_name, package=package, **kwargs)
except ValueError as exc:
reraise(ValueError,
ValueError("Couldn't import {0!r}: {1}".format(name, exc)),
sys.exc_info()[2])
return getattr(module, cls_name) if cls_name else module
except (ImportError, AttributeError):
if default is None:
raise
return default
| bsd-3-clause | -4,866,749,985,754,240,000 | 1,299,057,023,272,405,800 | 30.876923 | 79 | 0.605695 | false |
mancoast/CPythonPyc_test | cpython/213_test_re.py | 5 | 12461 | import sys
sys.path = ['.'] + sys.path
from test_support import verify, verbose, TestFailed
import re
import sys, os, traceback
# Misc tests from Tim Peters' re.doc
if verbose:
print 'Running tests on re.search and re.match'
try:
verify(re.search('x*', 'axx').span(0) == (0, 0))
verify(re.search('x*', 'axx').span() == (0, 0))
verify(re.search('x+', 'axx').span(0) == (1, 3))
verify(re.search('x+', 'axx').span() == (1, 3))
verify(re.search('x', 'aaa') is None)
except:
raise TestFailed, "re.search"
try:
verify(re.match('a*', 'xxx').span(0) == (0, 0))
verify(re.match('a*', 'xxx').span() == (0, 0))
verify(re.match('x*', 'xxxa').span(0) == (0, 3))
verify(re.match('x*', 'xxxa').span() == (0, 3))
verify(re.match('a+', 'xxx') is None)
except:
raise TestFailed, "re.search"
if verbose:
print 'Running tests on re.sub'
try:
verify(re.sub("(?i)b+", "x", "bbbb BBBB") == 'x x')
def bump_num(matchobj):
int_value = int(matchobj.group(0))
return str(int_value + 1)
verify(re.sub(r'\d+', bump_num, '08.2 -2 23x99y') == '9.3 -3 24x100y')
verify(re.sub(r'\d+', bump_num, '08.2 -2 23x99y', 3) == '9.3 -3 23x99y')
verify(re.sub('.', lambda m: r"\n", 'x') == '\\n')
verify(re.sub('.', r"\n", 'x') == '\n')
s = r"\1\1"
verify(re.sub('(.)', s, 'x') == 'xx')
verify(re.sub('(.)', re.escape(s), 'x') == s)
verify(re.sub('(.)', lambda m: s, 'x') == s)
verify(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx') == 'xxxx')
verify(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx') == 'xxxx')
verify(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx') == 'xxxx')
verify(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx') == 'xxxx')
verify(re.sub('a', r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D', 'a') == '\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
verify(re.sub('a', '\t\n\v\r\f\a', 'a') == '\t\n\v\r\f\a')
verify(re.sub('a', '\t\n\v\r\f\a', 'a') == (chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
verify(re.sub('^\s*', 'X', 'test') == 'Xtest')
except AssertionError:
raise TestFailed, "re.sub"
try:
verify(re.sub('a', 'b', 'aaaaa') == 'bbbbb')
verify(re.sub('a', 'b', 'aaaaa', 1) == 'baaaa')
except AssertionError:
raise TestFailed, "qualified re.sub"
if verbose:
print 'Running tests on symbolic references'
try:
re.sub('(?P<a>x)', '\g<a', 'xx')
except re.error, reason:
pass
else:
raise TestFailed, "symbolic reference"
try:
re.sub('(?P<a>x)', '\g<', 'xx')
except re.error, reason:
pass
else:
raise TestFailed, "symbolic reference"
try:
re.sub('(?P<a>x)', '\g', 'xx')
except re.error, reason:
pass
else:
raise TestFailed, "symbolic reference"
try:
re.sub('(?P<a>x)', '\g<a a>', 'xx')
except re.error, reason:
pass
else:
raise TestFailed, "symbolic reference"
try:
re.sub('(?P<a>x)', '\g<1a1>', 'xx')
except re.error, reason:
pass
else:
raise TestFailed, "symbolic reference"
try:
re.sub('(?P<a>x)', '\g<ab>', 'xx')
except IndexError, reason:
pass
else:
raise TestFailed, "symbolic reference"
try:
re.sub('(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')
except re.error, reason:
pass
else:
raise TestFailed, "symbolic reference"
try:
re.sub('(?P<a>x)|(?P<b>y)', '\\2', 'xx')
except re.error, reason:
pass
else:
raise TestFailed, "symbolic reference"
if verbose:
print 'Running tests on re.subn'
try:
verify(re.subn("(?i)b+", "x", "bbbb BBBB") == ('x x', 2))
verify(re.subn("b+", "x", "bbbb BBBB") == ('x BBBB', 1))
verify(re.subn("b+", "x", "xyz") == ('xyz', 0))
verify(re.subn("b*", "x", "xyz") == ('xxxyxzx', 4))
verify(re.subn("b*", "x", "xyz", 2) == ('xxxyz', 2))
except AssertionError:
raise TestFailed, "re.subn"
if verbose:
print 'Running tests on re.split'
try:
verify(re.split(":", ":a:b::c") == ['', 'a', 'b', '', 'c'])
verify(re.split(":*", ":a:b::c") == ['', 'a', 'b', 'c'])
verify(re.split("(:*)", ":a:b::c") == ['', ':', 'a', ':', 'b', '::', 'c'])
verify(re.split("(?::*)", ":a:b::c") == ['', 'a', 'b', 'c'])
verify(re.split("(:)*", ":a:b::c") == ['', ':', 'a', ':', 'b', ':', 'c'])
verify(re.split("([b:]+)", ":a:b::c") == ['', ':', 'a', ':b::', 'c'])
verify(re.split("(b)|(:+)", ":a:b::c") == \
['', None, ':', 'a', None, ':', '', 'b', None, '', None, '::', 'c'] )
verify(re.split("(?:b)|(?::+)", ":a:b::c") == ['', 'a', '', '', 'c'])
except AssertionError:
raise TestFailed, "re.split"
try:
verify(re.split(":", ":a:b::c", 2) == ['', 'a', 'b::c'])
verify(re.split(':', 'a:b:c:d', 2) == ['a', 'b', 'c:d'])
verify(re.split("(:)", ":a:b::c", 2) == ['', ':', 'a', ':', 'b::c'])
verify(re.split("(:*)", ":a:b::c", 2) == ['', ':', 'a', ':', 'b::c'])
except AssertionError:
raise TestFailed, "qualified re.split"
if verbose:
print "Running tests on re.findall"
try:
verify(re.findall(":+", "abc") == [])
verify(re.findall(":+", "a:b::c:::d") == [":", "::", ":::"])
verify(re.findall("(:+)", "a:b::c:::d") == [":", "::", ":::"])
verify(re.findall("(:)(:*)", "a:b::c:::d") == [(":", ""),
(":", ":"),
(":", "::")] )
except AssertionError:
raise TestFailed, "re.findall"
if verbose:
print "Running tests on re.match"
try:
# No groups at all
m = re.match('a', 'a') ; verify(m.groups() == ())
# A single group
m = re.match('(a)', 'a') ; verify(m.groups() == ('a',))
pat = re.compile('((a)|(b))(c)?')
verify(pat.match('a').groups() == ('a', 'a', None, None))
verify(pat.match('b').groups() == ('b', None, 'b', None))
verify(pat.match('ac').groups() == ('a', 'a', None, 'c'))
verify(pat.match('bc').groups() == ('b', None, 'b', 'c'))
verify(pat.match('bc').groups("") == ('b', "", 'b', 'c'))
except AssertionError:
raise TestFailed, "match .groups() method"
try:
# A single group
m = re.match('(a)', 'a')
verify(m.group(0) == 'a')
verify(m.group(0) == 'a')
verify(m.group(1) == 'a')
verify(m.group(1, 1) == ('a', 'a'))
pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
verify(pat.match('a').group(1, 2, 3) == ('a', None, None))
verify(pat.match('b').group('a1', 'b2', 'c3') == (None, 'b', None))
verify(pat.match('ac').group(1, 'b2', 3) == ('a', None, 'c'))
except AssertionError:
raise TestFailed, "match .group() method"
if verbose:
print "Running tests on re.escape"
try:
p=""
for i in range(0, 256):
p = p + chr(i)
verify(re.match(re.escape(chr(i)), chr(i)) is not None)
verify(re.match(re.escape(chr(i)), chr(i)).span() == (0,1))
pat=re.compile( re.escape(p) )
verify(pat.match(p) is not None)
verify(pat.match(p).span() == (0,256))
except AssertionError:
raise TestFailed, "re.escape"
if verbose:
print 'Pickling a RegexObject instance'
import pickle
pat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)')
s = pickle.dumps(pat)
pat = pickle.loads(s)
try:
verify(re.I == re.IGNORECASE)
verify(re.L == re.LOCALE)
verify(re.M == re.MULTILINE)
verify(re.S == re.DOTALL)
verify(re.X == re.VERBOSE)
except AssertionError:
raise TestFailed, 're module constants'
for flags in [re.I, re.M, re.X, re.S, re.L]:
try:
r = re.compile('^pattern$', flags)
except:
print 'Exception raised on flag', flags
if verbose:
print 'Test engine limitations'
# Try nasty case that overflows the straightforward recursive
# implementation of repeated groups.
try:
verify(re.match('(x)*', 50000*'x').span() == (0, 50000))
except RuntimeError, v:
print v
from re_tests import *
if verbose:
print 'Running re_tests test suite'
else:
# To save time, only run the first and last 10 tests
#tests = tests[:10] + tests[-10:]
pass
for t in tests:
sys.stdout.flush()
pattern = s = outcome = repl = expected = None
if len(t) == 5:
pattern, s, outcome, repl, expected = t
elif len(t) == 3:
pattern, s, outcome = t
else:
raise ValueError, ('Test tuples should have 3 or 5 fields', t)
try:
obj = re.compile(pattern)
except re.error:
if outcome == SYNTAX_ERROR: pass # Expected a syntax error
else:
print '=== Syntax error:', t
except KeyboardInterrupt: raise KeyboardInterrupt
except:
print '*** Unexpected error ***', t
if verbose:
traceback.print_exc(file=sys.stdout)
else:
try:
result = obj.search(s)
except re.error, msg:
print '=== Unexpected exception', t, repr(msg)
if outcome == SYNTAX_ERROR:
# This should have been a syntax error; forget it.
pass
elif outcome == FAIL:
if result is None: pass # No match, as expected
else: print '=== Succeeded incorrectly', t
elif outcome == SUCCEED:
if result is not None:
# Matched, as expected, so now we compute the
# result string and compare it to our expected result.
start, end = result.span(0)
vardict={'found': result.group(0),
'groups': result.group(),
'flags': result.re.flags}
for i in range(1, 100):
try:
gi = result.group(i)
# Special hack because else the string concat fails:
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict['g%d' % i] = gi
for i in result.re.groupindex.keys():
try:
gi = result.group(i)
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict[i] = gi
repl = eval(repl, vardict)
if repl != expected:
print '=== grouping error', t,
print repr(repl) + ' should be ' + repr(expected)
else:
print '=== Failed incorrectly', t
# Try the match on a unicode string, and check that it
# still succeeds.
try:
result = obj.search(unicode(s, "latin-1"))
if result is None:
print '=== Fails on unicode match', t
except NameError:
continue # 1.5.2
except TypeError:
continue # unicode test case
# Try the match on a unicode pattern, and check that it
# still succeeds.
obj=re.compile(unicode(pattern, "latin-1"))
result = obj.search(s)
if result is None:
print '=== Fails on unicode pattern match', t
# Try the match with the search area limited to the extent
# of the match and see if it still succeeds. \B will
# break (because it won't match at the end or start of a
# string), so we'll ignore patterns that feature it.
if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
and result is not None:
obj = re.compile(pattern)
result = obj.search(s, result.start(0), result.end(0) + 1)
if result is None:
print '=== Failed on range-limited match', t
# Try the match with IGNORECASE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.IGNORECASE)
result = obj.search(s)
if result is None:
print '=== Fails on case-insensitive match', t
# Try the match with LOCALE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.LOCALE)
result = obj.search(s)
if result is None:
print '=== Fails on locale-sensitive match', t
# Try the match with UNICODE locale enabled, and check
# that it still succeeds.
obj = re.compile(pattern, re.UNICODE)
result = obj.search(s)
if result is None:
print '=== Fails on unicode-sensitive match', t
| gpl-3.0 | 6,802,431,258,396,922,000 | -5,317,995,753,375,668,000 | 31.282383 | 116 | 0.498997 | false |
maltsev/LatexWebOffice | app/views/document.py | 1 | 15983 | # -*- coding: utf-8 -*-
"""
* Purpose : Dokument- und Projektverwaltung Schnittstelle
* Creation Date : 19-11-2014
* Last Modified : Di 24 Feb 2015 15:46:51 CET
* Author : mattis
* Coauthors : christian, ingo, Kirill
* Sprintnumber : 2, 5
* Backlog entry : TEK1, 3ED9, DOK8, DO14, KOL1
"""
import os
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from django.shortcuts import render
from django.views.static import serve
import settings
from app.common import util
from app.common.constants import ERROR_MESSAGES
from app.views import file, folder, project, template
from app.models.projecttemplate import ProjectTemplate
from app.models.file.file import File
from app.models.file.texfile import TexFile
from app.models.file.plaintextfile import PlainTextFile
from app.models.file.pdf import PDF
from app.models.project import Project
from app.models.folder import Folder
globalparas = {
'id': {'name': 'id', 'type': int},
'content': {'name': 'content', 'type': str},
'folderid': {'name': 'folderid', 'type': int},
'name': {'name': 'name', 'type': str},
'formatid': {'name': 'formatid', 'type': int},
# 'compilerid': {'name': 'compilerid', 'type': int},
'forcecompile': {'name': 'forcecompile', 'type': int}
}
# dictionary mit verfügbaren Befehlen und den entsprechenden Aktionen
# die entsprechenden Methoden befinden sich in:
# '/app/views/project.py', '/app/views/file.py', '/app/views/folder.py' und '/app/views/collaboration.py'
available_commands = {
'projectcreate': {
'command': project.projectCreate,
'parameters': [{'para': globalparas['name'], 'stringcheck': True}]
},
'projectclone': {
'command': project.projectClone,
'parameters': [{'para': globalparas['id'], 'type': Project, 'requirerights': ['owner', 'collaborator']},
{'para': globalparas['name'], 'stringcheck': True}]
},
'projectrm': {
'command': project.projectRm,
'parameters': [{'para': globalparas['id'], 'type': Project}]
},
'projectrename': {
'command': project.projectRename,
'parameters': [{'para': globalparas['id'], 'type': Project},
{'para': globalparas['name'], 'stringcheck': True}]
},
'listprojects': {
'command': project.listProjects,
'parameters': []
},
'importzip': {
'command': project.importZip,
'parameters': []
},
'exportzip': {
'command': project.exportZip,
'parameters': [{'para': globalparas['id']}]
},
'inviteuser': {
'command': project.inviteUser,
'parameters': [{'para': globalparas['id'], 'type': Project},
{'para': globalparas['name'], 'stringcheck': True}]
},
'hasinvitedusers': {
'command': project.hasInvitedUsers,
'parameters': [{'para': globalparas['id'], 'type': Project}]
},
'listinvitedusers': {
'command': project.listInvitedUsers,
'parameters': [{'para': globalparas['id'], 'type': Project}]
},
'listunconfirmedcollaborativeprojects': {
'command': project.listUnconfirmedCollaborativeProjects,
'parameters': []
},
'activatecollaboration': {
'command': project.activateCollaboration,
'parameters': [{'para': globalparas['id'], 'type': Project, 'requirerights': ['owner', 'invitee']}]
},
'quitcollaboration': {
'command': project.quitCollaboration,
'parameters': [
{'para': globalparas['id'], 'type': Project, 'requirerights': ['owner', 'invitee', 'collaborator']}]
},
'cancelcollaboration': {
'command': project.cancelCollaboration,
'parameters': [{'para': globalparas['id'], 'type': Project},
{'para': globalparas['name'], 'stringcheck': True}]
},
'createtex': {
'command': file.createTexFile,
'parameters': [{'para': globalparas['id'], 'type': Folder, 'requirerights': ['owner', 'collaborator']},
{'para': globalparas['name'], 'filenamecheck': True}]
},
'updatefile': {
'command': file.updateFile,
'parameters': [{'para': globalparas['id'], 'type': PlainTextFile,
'requirerights': ['owner', 'collaborator'], 'lockcheck': False},
{'para': globalparas['content']}]
},
'deletefile': {
'command': file.deleteFile,
'parameters': [{'para': globalparas['id'], 'type': File,
'requirerights': ['owner', 'collaborator'], 'lockcheck': True}]
},
'renamefile': {
'command': file.renameFile,
'parameters': [{'para': globalparas['id'], 'type': File,
'requirerights': ['owner', 'collaborator'], 'lockcheck': True},
{'para': globalparas['name'], 'filenamecheck': True}]
},
'movefile': {
'command': file.moveFile,
'parameters': [{'para': globalparas['id'], 'type': File,
'requirerights': ['owner', 'collaborator'], 'lockcheck': True},
{'para': globalparas['folderid'], 'type': Folder, 'requirerights': ['owner', 'collaborator']}]
},
'uploadfiles': {
'command': file.uploadFiles,
'parameters': [{'para': globalparas['id'], 'type': Folder, 'requirerights': ['owner', 'collaborator']}]
},
'downloadfile': {
'command': file.downloadFile,
'parameters': [{'para': globalparas['id']}]
},
'gettext': {
'command': file.getText,
'parameters': [{'para': globalparas['id'], 'type': PlainTextFile, 'requirerights': ['owner', 'collaborator']}]
},
'fileinfo': {
'command': file.fileInfo,
'parameters': [{'para': globalparas['id'], 'type': File, 'requirerights': ['owner', 'collaborator']}]
},
'compile': {
'command': file.latexCompile,
'parameters': [{'para': globalparas['id'], 'type': TexFile,
'requirerights': ['owner', 'collaborator'], 'lockcheck': True},
{'para': globalparas['formatid']},
# {'para': globalparas['compilerid']},
{'para': globalparas['forcecompile']}]
},
'lockfile': {
'command': file.lockFile,
'parameters': [{'para': globalparas['id'], 'type': File, 'requirerights': ['owner', 'collaborator']}]
},
'unlockfile': {
'command': file.unlockFile,
'parameters': [{'para': globalparas['id'], 'type': File, 'requirerights': ['owner', 'collaborator']}]
},
'getlog': {
'command': file.getLog,
'parameters': [{'para': globalparas['id'], 'type': TexFile, 'requirerights': ['owner', 'collaborator']}]
},
'createdir': {
'command': folder.createDir,
'parameters': [{'para': globalparas['id'], 'type': Folder, 'requirerights': ['owner', 'collaborator']},
{'para': globalparas['name'], 'stringcheck': True}]
},
'rmdir': {
'command': folder.rmDir,
'parameters': [{'para': globalparas['id'], 'type': Folder,
'requirerights': ['owner', 'collaborator'], 'lockcheck': True}]
},
'renamedir': {
'command': folder.renameDir,
'parameters': [{'para': globalparas['id'], 'type': Folder,
'requirerights': ['owner', 'collaborator']},
{'para': globalparas['name'], 'stringcheck': True}]
},
'movedir': {
'command': folder.moveDir,
'parameters': [{'para': globalparas['id'], 'type': Folder,
'requirerights': ['owner', 'collaborator'], 'lockcheck': True},
{'para': globalparas['folderid'], 'type': Folder, 'requirerights': ['owner', 'collaborator']}]
},
'listfiles': {
'command': folder.listFiles,
'parameters': [{'para': globalparas['id'], 'type': Folder, 'requirerights': ['owner', 'collaborator']}]
},
'template2project': {
'command': template.template2Project,
'parameters': [{'para': globalparas['id'], 'type': ProjectTemplate},
{'para': globalparas['name'], 'stringcheck': True}]
},
'project2template': {
'command': template.project2Template,
'parameters': [{'para': globalparas['id'], 'type': Project, 'requirerights': ['owner', 'collaborator']},
{'para': globalparas['name'], 'stringcheck': True}]
},
'templaterm': {
'command': template.templateRm,
'parameters': [{'para': globalparas['id'], 'type': ProjectTemplate}]
},
'templaterename': {
'command': template.templateRename,
'parameters': [{'para': globalparas['id'], 'type': ProjectTemplate},
{'para': globalparas['name'], 'stringcheck': True}]
},
'listtemplates': {
'command': template.listTemplates,
'parameters': []
}
}
available_commands_output = {}
for key, value in available_commands.items():
parameters = []
for paras in value['parameters']:
globalparainfo = (paras['para']).copy()
value = {'para': globalparainfo}
if globalparainfo.get('type'):
del globalparainfo['type']
parameters.append(value)
if key == 'uploadfiles' or key == 'importzip':
parameters.append({'para': {'name': 'files'}})
available_commands_output.update({key: parameters})
@login_required
def debug(request):
return render(request, 'documentPoster.html')
# Schnittstellenfunktion
# bietet eine Schnittstelle zur Kommunikation zwischen Client und Server
# liest den vom Client per POST Data übergebenen Befehl ein
# und führt die entsprechende Methode aus
@login_required
@require_http_methods(['POST', 'GET'])
def execute(request):
if request.method == 'POST' and 'command' in request.POST:
# hole den aktuellen Benutzer
user = request.user
# wenn der Schlüssel nicht gefunden wurde
# gib Fehlermeldung zurück
if request.POST['command'] not in available_commands:
return util.jsonErrorResponse(ERROR_MESSAGES['COMMANDNOTFOUND'], request)
args = []
# aktueller Befehl
c = available_commands[request.POST['command']]
# Parameter dieses Befehls
paras = c['parameters']
# durchlaufe alle Parameter des Befehls
for para in paras:
# wenn der Parameter nicht gefunden wurde oder ein Parameter, welcher eine id angeben sollte
# Zeichen enthält, die keine Zahlen sind, gib Fehlermeldung zurück
if request.POST.get(para['para']['name']) is None:
return util.jsonErrorResponse(ERROR_MESSAGES['MISSINGPARAMETER'] % (para['para']), request)
elif para['para']['type'] == int and (not request.POST.get(para['para']['name']).isdigit()):
return util.jsonErrorResponse(ERROR_MESSAGES['MISSINGPARAMETER'] % (para['para']), request)
# sonst füge den Parameter zu der Argumentliste hinzu
else:
args.append(request.POST[para['para']['name']])
# Teste auf ungültige strings
if para.get('stringcheck'):
failstring, failurereturn = util.checkObjectForInvalidString(
request.POST.get(para['para']['name']), request)
if not failstring:
return failurereturn
elif para.get('filenamecheck'):
failstring, failurereturn = util.checkFileForInvalidString(
request.POST.get(para['para']['name']), request)
if not failstring:
return failurereturn
# Teste, dass der User rechte auf das Objekt mit der angegebenen id
# hat und diese existiert
if para.get('type') and para['para']['type'] == int:
objType = para.get('type')
objId = request.POST.get(para['para']['name'])
requireRights = para.get('requirerights', ['owner'])
lockcheck = para.get('lockcheck', False)
if objType == Project:
rights, failurereturn = util.checkIfProjectExistsAndUserHasRights(objId, user, request,
requireRights)
if not rights:
return failurereturn
elif objType == Folder:
rights, failurereturn = util.checkIfDirExistsAndUserHasRights(objId, user, request, requireRights, lockcheck)
if not rights:
return failurereturn
elif objType == File:
rights, failurereturn = util.checkIfFileExistsAndUserHasRights(objId, user, request, requireRights, lockcheck,
objecttype=File)
if not rights:
return failurereturn
elif objType == TexFile:
rights, failurereturn = util.checkIfFileExistsAndUserHasRights(objId, user, request, requireRights, lockcheck,
objecttype=TexFile)
if not rights:
return failurereturn
elif objType == PlainTextFile:
rights, failurereturn = util.checkIfFileExistsAndUserHasRights(objId, user, request, requireRights, lockcheck,
objecttype=PlainTextFile)
if not rights:
return failurereturn
elif objType == ProjectTemplate:
# Überprüfe, ob Vorlage existiert und der User darauf Rechte hat
emptystring, failurereturn = util.checkIfTemplateExistsAndUserHasRights(objId, user, request)
if not emptystring:
return failurereturn
# führe den übergebenen Befehl aus
return c['command'](request, user, *args)
elif request.method == 'GET' and request.GET.get('command'):
command = request.GET.get('command')
pdfid = request.GET.get('id')
texid = request.GET.get('texid')
defaultpdfPath = filepath = os.path.join(settings.BASE_DIR, 'app', 'static', 'default.pdf')
if (pdfid and not pdfid.isdigit()) or (texid and not texid.isdigit()):
return serve(request, os.path.basename(defaultpdfPath), os.path.dirname(defaultpdfPath))
if command == 'getpdf' and pdfid:
requireRights = ['owner', 'collaborator']
rights, failurereturn = util.checkIfFileExistsAndUserHasRights(pdfid, request.user, request, requireRights, lockcheck=False,
objecttype=PDF)
if not rights:
return serve(request, os.path.basename(defaultpdfPath), os.path.dirname(defaultpdfPath))
return file.getPDF(request, request.user, pdfid=pdfid, default=defaultpdfPath)
elif command == 'getpdf' and texid:
requireRights = ['owner', 'collaborator']
rights, failurereturn = util.checkIfFileExistsAndUserHasRights(texid, request.user, request, requireRights, lockcheck=False,
objecttype=TexFile)
if not rights:
return serve(request, os.path.basename(defaultpdfPath), os.path.dirname(defaultpdfPath))
return file.getPDF(request, request.user, texid=texid, default=defaultpdfPath)
return util.jsonErrorResponse(ERROR_MESSAGES['MISSINGPARAMETER'] % 'unknown', request)
| gpl-3.0 | 8,602,562,680,739,277,000 | -5,261,911,191,810,481,000 | 42.63388 | 136 | 0.568503 | false |
DavidNorman/tensorflow | tensorflow/python/tpu/session_support.py | 5 | 15060 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Operations for handling session logging and shutdown notifications."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from google.protobuf import text_format
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
_WATCHDOG = None
class CoordinatorResetError(errors.AbortedError):
"""Raised when the monitored session should reset."""
def __init__(self):
errors.AbortedError.__init__(
self, None, None, 'Resetting session loop due to worker shutdown.')
def _clone_session(session, graph=None):
return session_lib.Session(
target=session.sess_str,
config=session._config, # pylint: disable=protected-access
graph=graph if graph else session.graph)
class WorkerHeartbeatManager(object):
"""Manages the status/heartbeat monitor for a set of workers."""
def __init__(self, session, devices, heartbeat_ops, request_placeholder):
"""Construct a new WorkerHeartbeatManager.
(Prefer using `WorkerHeartbeatManager.from_devices` when possible.)
Args:
session: `tf.compat.v1.Session`, session to use for heartbeat operations.
devices: `list[string]` Set of devices to connect to.
heartbeat_ops: `list[tf.Operation]` Heartbeat operations.
request_placeholder: `tf.Placeholder[String]` Placeholder used to specify
the WorkerHeartbeatRequest protocol buffer.
"""
self._session = session
self._devices = devices
self._ops = heartbeat_ops
self._request_placeholder = request_placeholder
@staticmethod
def from_devices(session, devices):
"""Construct a heartbeat manager for the given devices."""
if not devices:
logging.error('Trying to create heartbeat manager with no devices?')
logging.info('Creating heartbeat manager for %s', devices)
request_placeholder = array_ops.placeholder(
name='worker_heartbeat_request', dtype=dtypes.string)
heartbeat_ops = []
for device in devices:
with ops.device(device):
heartbeat_ops.append(tpu_ops.worker_heartbeat(request_placeholder))
return WorkerHeartbeatManager(session, devices, heartbeat_ops,
request_placeholder)
def num_workers(self):
return len(self._devices)
def configure(self, message):
"""Configure heartbeat manager for all devices.
Args:
message: `event_pb2.WorkerHeartbeatRequest`
Returns: `None`
"""
logging.info('Configuring worker heartbeat: %s',
text_format.MessageToString(message))
self._session.run(self._ops,
{self._request_placeholder: message.SerializeToString()})
def ping(self, request=None, timeout_in_ms=5000):
"""Ping all workers, returning the parsed status results."""
if request is None:
request = event_pb2.WorkerHeartbeatRequest()
options = config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
results = self._session.run(
self._ops,
feed_dict={self._request_placeholder: request.SerializeToString()},
options=options)
parsed_results = [
event_pb2.WorkerHeartbeatResponse.FromString(res_pb)
for res_pb in results
]
logging.debug('Ping results: %s', parsed_results)
return parsed_results
def lame_workers(self):
"""Ping all workers, returning manager containing lame workers (or None)."""
ping_results = self.ping()
lame_workers = []
for ping_response, device, op in zip(ping_results, self._devices,
self._ops):
if ping_response.health_status != event_pb2.OK:
lame_workers.append((device, op))
if not lame_workers:
return None
bad_devices, bad_ops = zip(*lame_workers)
return WorkerHeartbeatManager(self._session, bad_devices, bad_ops,
self._request_placeholder)
def __repr__(self):
return 'HeartbeatManager(%s)' % ','.join(self._devices)
# Default timeout is set to allow other shutdown triggered operations (log
# flushing etc) to finish before terminating the worker.
def shutdown(self, wait_time_in_ms=60000, exit_code=None):
"""Shutdown all workers after `shutdown_timeout_secs`."""
logging.info('Shutting down %s.', self)
req = event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(timeout_ms=wait_time_in_ms),
shutdown_mode=event_pb2.SHUTDOWN_AFTER_TIMEOUT,
exit_code=event_pb2.RequestedExitCode(
exit_code=exit_code) if exit_code is not None else None)
self.configure(req)
# Wait for workers to shutdown.
sleep_sec = 10.0 + wait_time_in_ms / 1000
logging.info('Waiting %.2f seconds for worker shutdown.', sleep_sec)
time.sleep(sleep_sec)
def all_worker_devices(session):
"""Return a list of devices for each worker in the system."""
devices = session.list_devices()
devices_that_support_heartbeats = []
for device in devices:
name = device.name
# Pick devices that have a TPU but target the attached CPU
if ':TPU:0' in name and 'coordinator' not in name:
devices_that_support_heartbeats.append(name.replace('TPU', 'CPU'))
return devices_that_support_heartbeats
class WatchdogManager(threading.Thread):
"""Configures worker watchdog timer and handles periodic pings.
Usage:
# Ping workers every minute, shutting down workers if they haven't received
# a ping after 1 hour.
watchdog_manager = WatchdogManager(
ping_interval=60, shutdown_timeout=3600
)
# Use as a context manager, resetting watchdog on context exit:
with watchdog_manager:
session.run(...)
# Or setup globally; watchdog will remain active until program exit.
watchdog_manager.configure_and_run()
"""
def __init__(self,
session,
devices=None,
ping_interval=60,
shutdown_timeout=3600):
"""Initialize a watchdog manager.
Args:
session: Session connected to worker devices. A cloned session and graph
will be created for managing worker pings.
devices: Set of devices to monitor. If none, all workers will be
monitored.
ping_interval: Time, in seconds, between watchdog pings.
shutdown_timeout: Time, in seconds, before watchdog timeout.
"""
threading.Thread.__init__(self)
self.ping_interval = ping_interval
self.shutdown_timeout = shutdown_timeout
self.daemon = True
self._config = session._config # pylint: disable=protected-access
self._target = session.sess_str
self._running = False
self._devices = devices
self._graph = None
self._session = None
self._worker_manager = None
def _reset_manager(self):
"""Reset the graph, session and worker manager."""
self._graph = ops.Graph()
self._session = session_lib.Session(
target=self._target,
graph=self._graph,
config=self._config,
)
if self._devices is None:
self._devices = all_worker_devices(self._session)
with self._graph.as_default():
self._worker_manager = WorkerHeartbeatManager.from_devices(
self._session, self._devices)
self._worker_manager.configure(
event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(
timeout_ms=self.shutdown_timeout * 1000,),
shutdown_mode=event_pb2.WAIT_FOR_COORDINATOR))
def configure_and_run(self):
logging.info(
'Enabling watchdog timer with %d second timeout '
'and %d second ping interval.', self.shutdown_timeout,
self.ping_interval)
self._reset_manager()
self._running = True
self.start()
def stop(self):
logging.info('Stopping worker watchdog.')
self._worker_manager.configure(
event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(timeout_ms=-1,),
shutdown_mode=event_pb2.NOT_CONFIGURED))
self._running = False
self.join()
def __enter__(self):
self.configure_and_run()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def run(self):
# Don't fetch logs or adjust timing: just ping the watchdog.
#
# If we hit an exception, reset our session as it is likely broken.
while self._running:
try:
self._worker_manager.ping(request=None)
time.sleep(self.ping_interval)
except errors.OpError as e:
# Catch any TF errors that occur so we don't stop sending heartbeats
logging.debug('Caught error while sending heartbeat: %s', e)
self._reset_manager()
def start_worker_watchdog(session,
devices=None,
ping_interval=60,
shutdown_timeout=3600):
"""Start global worker watchdog to shutdown workers on coordinator exit."""
global _WATCHDOG
if _WATCHDOG is None:
# Ensure we can send a few pings before we timeout!
ping_interval = min(shutdown_timeout / 10., ping_interval)
_WATCHDOG = WatchdogManager(session, devices, ping_interval,
shutdown_timeout)
_WATCHDOG.configure_and_run()
class GracefulShutdownHook(session_run_hook.SessionRunHook):
"""Session hook that watches for shutdown events.
If a shutdown is indicated, `saver.save(checkpoint_prefix)` is executed, and a
SystemShutdown exception is raised to terminate the main session. If `saver`
is None the `SAVERS` collection will be read to find a saver.
`on_shutdown_hooks` is an optional list of functions that should be called
after checkpointing. The function is called with (`run_context`,
`all_workers`, `lame_workers`).
If `heartbeat_group` is not specified, it will default to all CPU workers
in the system.
"""
def __init__(self, checkpoint_prefix, saver=None, on_shutdown_hooks=None):
self._saver = saver
self._checkpoint_prefix = checkpoint_prefix
self._on_shutdown_hooks = on_shutdown_hooks if on_shutdown_hooks else []
# Worker heartbeats are managed independently of the main training graph.
self._graph = ops.Graph()
self._workers = None
self._session = None
self._heartbeat_supported = False
def after_create_session(self, training_session, coord): # pylint: disable=unused-argument
# N.B. We have to pull the global step here to avoid it being unavailable
# at checkpoint time; the graph has been frozen at that point.
if training_util.get_global_step() is None and self.saver() is not None:
raise ValueError(
'Saver defined but no global step. Run `get_or_create_global_step()`'
' in your model definition to allow checkpointing.')
with self._graph.as_default():
logging.info('Installing graceful shutdown hook.')
self._session = _clone_session(training_session, self._graph)
self._workers = WorkerHeartbeatManager.from_devices(
self._session, all_worker_devices(self._session))
self._heartbeat_supported = self._workers.num_workers() > 0
if self._heartbeat_supported:
try:
self._workers.configure(
event_pb2.WorkerHeartbeatRequest(
shutdown_mode=event_pb2.WAIT_FOR_COORDINATOR))
except errors.InvalidArgumentError:
logging.warn(
'TPU device does not support heartbeats. Failure '
'handling will be disabled.')
self._heartbeat_supported = False
else:
logging.warn(
'No workers support hearbeats. Failure handling will be disabled.')
def saver(self):
if self._saver:
return self._saver
savers = ops.get_collection(ops.GraphKeys.SAVERS)
if not savers:
return None
if not isinstance(savers, list):
return savers
if len(savers) > 1:
logging.error(
'Multiple savers in the SAVERS collection. On-demand checkpointing '
'will be disabled. Pass an explicit `saver` to the constructor to '
'override this behavior.')
return None
return savers[0]
def after_run(self, run_context, run_values):
del run_values
if not self._heartbeat_supported:
return
lame_workers = self._workers.lame_workers()
if lame_workers:
logging.info('ShutdownHook: lame workers found: %s', lame_workers)
if self.saver():
logging.info('ShutdownHook: saving checkpoint to %s',
self._checkpoint_prefix)
self.saver().save(
run_context.session,
self._checkpoint_prefix,
global_step=training_util.get_global_step(),
write_state=True,
)
else:
logging.info('ShutdownHook: no Saver defined.')
for fn in self._on_shutdown_hooks:
fn(run_context, self._workers, lame_workers)
class ResetComputation(object):
"""Hook to reset a TPUEstimator computation loop.
This hook shuts down all workers and resets the monitored session loop by
throwing a CoordinatorResetError.
"""
def __init__(self):
pass
def __call__(self, run_context, all_workers, lame_workers):
del run_context, lame_workers
all_workers.shutdown()
logging.info('Resetting coordinator.')
raise CoordinatorResetError()
class ShutdownLameWorkers(object):
"""Shutdown lamed workers.
Processing will continue normally (typically by waiting for the down
workers to be restarted).
"""
def __init__(self):
pass
def __call__(self, run_context, all_workers, lame_workers):
lame_workers.shutdown()
class ShutdownAllWorkers(object):
"""Shutdown all workers.
Processing will continue normally (typically by waiting for the down
workers to be restarted).
"""
def __init__(self):
pass
def __call__(self, run_context, all_workers, lame_workers):
all_workers.shutdown()
| apache-2.0 | -3,494,198,386,858,859,000 | -8,142,491,965,797,042,000 | 33.072398 | 93 | 0.673838 | false |
psyonara/agonizomai | sermons/models.py | 1 | 5153 | from __future__ import unicode_literals
from django.db import models
from django.template.defaultfilters import slugify
from bible.models import BibleBook
from useraccounts.models import UserAccount
class Author(models.Model):
name = models.CharField(null=False, blank=False, max_length=50)
name_slug = models.SlugField(max_length=50, null=True, blank=True, db_index=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if self.name_slug is None or self.name_slug == "":
self.name_slug = slugify(self.name)
super(Author, self).save(*args, **kwargs)
class AuthorSetting(models.Model):
"""
Holds user settings specific to an author.
"""
author = models.ForeignKey(Author, on_delete=models.CASCADE)
user = models.ForeignKey("useraccounts.UserAccount", on_delete=models.CASCADE)
name = models.CharField(max_length=30, db_index=True)
value = models.CharField(max_length=50)
class Series(models.Model):
name = models.CharField(null=False, blank=False, max_length=100)
name_slug = models.SlugField(max_length=100, null=True, blank=True, db_index=True)
author = models.ForeignKey(Author, null=False, blank=False, on_delete=models.CASCADE)
complete = models.BooleanField(default=False)
def __str__(self):
return "%s (%s)" % (self.name, self.author.name)
def save(self, *args, **kwargs):
if self.name_slug is None or self.name_slug == "":
self.name_slug = slugify(self.name)
super(Series, self).save(*args, **kwargs)
class Sermon(models.Model):
date_added = models.DateTimeField(auto_now_add=True)
date_preached = models.DateField(null=True, blank=True)
author = models.ForeignKey(Author, related_name="sermons", on_delete=models.CASCADE)
title = models.CharField(null=False, blank=False, max_length=100)
title_slug = models.SlugField(max_length=100, null=True, blank=True, db_index=True)
series = models.ForeignKey(
Series, null=True, blank=True, related_name="sermons", on_delete=models.CASCADE
)
ref = models.CharField(max_length=20, null=True, blank=True)
def get_audio_file(self):
files = self.media_files.filter(media_type=1)
return files[0] if len(files) > 0 else None
def __str__(self):
return "%s (by %s)" % (self.title, self.author.name)
def save(self, *args, **kwargs):
if self.title_slug is None or self.title_slug == "":
self.title_slug = slugify(self.title)
super(Sermon, self).save(*args, **kwargs)
class Meta:
ordering = ["-date_preached"]
class ScriptureRef(models.Model):
sermon = models.ForeignKey(Sermon, related_name="scripture_refs", on_delete=models.CASCADE)
bible_book = models.ForeignKey(BibleBook, on_delete=models.CASCADE)
chapter_begin = models.PositiveSmallIntegerField()
chapter_end = models.PositiveSmallIntegerField()
verse_begin = models.PositiveSmallIntegerField(null=True, blank=True)
verse_end = models.PositiveSmallIntegerField(null=True, blank=True)
def __str__(self):
end_string = ""
if self.chapter_begin == self.chapter_end:
end_string += "%s %s" % (self.bible_book.name, self.chapter_begin)
if self.verse_begin is not None and self.verse_end is not None:
if self.verse_begin == self.verse_end:
end_string += ":%s" % (self.verse_begin)
else:
end_string += ":%s-%s" % (self.verse_begin, self.verse_end)
else:
end_string += "%s %s" % (self.bible_book.name, self.chapter_begin)
if self.verse_begin is None and self.verse_end is None:
end_string += "-%s" % (self.chapter_end)
else:
end_string += ":%s-%s:%s" % (self.verse_begin, self.chapter_end, self.verse_end)
return end_string
class MediaFile(models.Model):
MEDIA_TYPE_CHOICES = ((1, "audio"), (2, "video"), (3, "text"), (4, "pdf"))
LOCATION_TYPE_CHOICES = ((1, "url"),)
sermon = models.ForeignKey(Sermon, related_name="media_files", on_delete=models.CASCADE)
media_type = models.PositiveSmallIntegerField(choices=MEDIA_TYPE_CHOICES, null=False, default=1)
file_size = models.PositiveIntegerField(null=True, blank=True)
location_type = models.PositiveSmallIntegerField(
choices=LOCATION_TYPE_CHOICES, null=False, default=1
)
location = models.CharField(null=False, max_length=250)
def __str__(self):
return "%s (%s)" % (self.location, self.sermon.title)
class SermonSession(models.Model):
sermon = models.ForeignKey(Sermon, related_name="sessions", on_delete=models.CASCADE)
session_started = models.DateTimeField(auto_now_add=True)
session_updated = models.DateTimeField(auto_now=True)
position = models.PositiveSmallIntegerField(default=0) # in seconds from start of file
total_duration = models.PositiveSmallIntegerField(default=0) # in seconds
user = models.ForeignKey(UserAccount, on_delete=models.CASCADE)
completed = models.BooleanField(default=False)
| mit | 1,191,321,339,836,954,000 | -8,908,622,426,777,752,000 | 39.896825 | 100 | 0.663497 | false |
funshine/rpidemo | mqtt_oled/oled_test_luma.py | 1 | 1273 | #!/usr/bin/python/
# coding: utf-8
import time
import datetime
from luma.core.interface.serial import i2c, spi
from luma.core.render import canvas
from luma.oled.device import ssd1306, ssd1325, ssd1331, sh1106
def do_nothing(obj):
pass
# rev.1 users set port=0
# substitute spi(device=0, port=0) below if using that interface
# serial = i2c(port=1, address=0x3C)
serial = spi(device=0, port=0)
# substitute ssd1331(...) or sh1106(...) below if using that device
# device = ssd1306(serial, rotate=1)
device = sh1106(serial)
# device.cleanup = do_nothing
print("Testing display Hello World")
with canvas(device) as draw:
draw.rectangle(device.bounding_box, outline="white", fill="black")
draw.text((30, 40), "Hello World", fill="white")
time.sleep(3)
print("Testing display ON/OFF...")
for _ in range(5):
time.sleep(0.5)
device.hide()
time.sleep(0.5)
device.show()
print("Testing clear display...")
time.sleep(2)
device.clear()
print("Testing screen updates...")
time.sleep(2)
for x in range(40):
with canvas(device) as draw:
now = datetime.datetime.now()
draw.text((x, 4), str(now.date()), fill="white")
draw.text((10, 16), str(now.time()), fill="white")
time.sleep(0.1)
print("Quit, cleanup...")
| mit | -8,267,113,571,965,462,000 | -5,557,212,255,792,604,000 | 23.018868 | 70 | 0.671642 | false |
VikParuchuri/evolve-music | midi/sequencer_alsa/sequencer.py | 5 | 16997 | import select
import sequencer_alsa as S
import midi
__SWIG_NS_SET__ = set(['__class__', '__del__', '__delattr__', '__dict__', '__doc__', '__getattr__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__str__', '__swig_getmethods__', '__swig_setmethods__', '__weakref__', 'this', 'thisown'])
def stringify(name, obj, indent=0):
retstr = ''
datafields = False
if getattr(obj, 'this', False):
datafields = dir(obj)
# filter unwanted names
datafields = list(set(datafields) - __SWIG_NS_SET__)
retstr += '%s%s ::\n' % (' ' * indent, name)
for key in datafields:
value = getattr(obj, key, "n/a")
retstr += stringify(key, value, indent+1)
else:
retstr += '%s%s: %s\n' % (' ' * indent, name, obj)
return retstr
class Sequencer(object):
__ARGUMENTS__ = {
'alsa_sequencer_name':'__sequencer__',
'alsa_sequencer_stream':S.SND_SEQ_OPEN_DUPLEX,
'alsa_sequencer_mode':S.SND_SEQ_NONBLOCK,
'alsa_sequencer_type':'default',
'alsa_port_name':'__port__',
'alsa_port_caps':S.SND_SEQ_PORT_CAP_READ,
'alsa_port_type':S.SND_SEQ_PORT_TYPE_MIDI_GENERIC,
'alsa_queue_name':'__queue__',
'sequencer_tempo':120,
'sequencer_resolution':1000,
}
DefaultArguments = {}
def __init__(self, **ns):
# seed with baseline arguments
self.__dict__.update(self.__ARGUMENTS__)
# update with default arguments from concrete class
self.__dict__.update(self.DefaultArguments)
# apply user arguments
self.__dict__.update(ns)
self.client = None
self._queue_running = False
self._poll_descriptors = []
self.init()
def __del__(self):
if self.client:
S.snd_seq_close(self.client)
def init(self):
self._init_handle()
self._init_port()
self._init_queue()
def set_nonblock(self, nonblock=True):
if nonblock:
self.alsa_sequencer_mode = S.SND_SEQ_NONBLOCK
else:
self.alsa_sequencer_mode = 0
S.snd_seq_nonblock(self.client, self.alsa_sequencer_mode)
def get_nonblock(self):
if self.alsa_sequencer_mode == S.SND_SEQ_NONBLOCK:
return True
else:
return False
def _error(self, errcode):
strerr = S.snd_strerror(errcode)
msg = "ALSAError[%d]: %s" % (errcode, strerr)
raise RuntimeError, msg
def _init_handle(self):
ret = S.open_client(self.alsa_sequencer_name,
self.alsa_sequencer_type,
self.alsa_sequencer_stream,
self.alsa_sequencer_mode)
if ret == None:
# XXX: global error
self._error(ret)
self.client = ret
self.client_id = S.snd_seq_client_id(self.client)
self.output_buffer_size = S.snd_seq_get_output_buffer_size(self.client)
self.input_buffer_size = S.snd_seq_get_input_buffer_size(self.client)
self._set_poll_descriptors()
def _init_port(self):
err = S.snd_seq_create_simple_port(self.client,
self.alsa_port_name,
self.alsa_port_caps,
self.alsa_port_type)
if err < 0: self._error(err)
self.port = err
def _new_subscribe(self, sender, dest, read=True):
subscribe = S.new_port_subscribe()
if read:
self.read_sender = sender
self.read_dest = dest
S.snd_seq_port_subscribe_set_sender(subscribe, self.read_sender)
S.snd_seq_port_subscribe_set_dest(subscribe, self.read_dest)
else:
self.write_sender = sender
self.write_dest = dest
S.snd_seq_port_subscribe_set_sender(subscribe, self.write_sender)
S.snd_seq_port_subscribe_set_dest(subscribe, self.write_dest)
S.snd_seq_port_subscribe_set_queue(subscribe, self.queue)
return subscribe
def _subscribe_port(self, subscribe):
err = S.snd_seq_subscribe_port(self.client, subscribe)
if err < 0: self._error(err)
def _my_address(self):
addr = S.snd_seq_addr_t()
addr.client = self.client_id
addr.port = self.port
return addr
def _new_address(self, client, port):
addr = S.snd_seq_addr_t()
addr.client = int(client)
addr.port = int(port)
return addr
def _init_queue(self):
err = S.snd_seq_alloc_named_queue(self.client, self.alsa_queue_name)
if err < 0: self._error(err)
self.queue = err
adjtempo = int(60.0 * 1000000.0 / self.sequencer_tempo)
S.init_queue_tempo(self.client, self.queue,
adjtempo, self.sequencer_resolution)
def _control_queue(self, ctype, cvalue, event=None):
err = S.snd_seq_control_queue(self.client, self.queue, ctype, cvalue, event)
if err < 0: self._error(err)
self.drain()
def _set_event_broadcast(self, event):
event.source.client = source.client
event.source.port = source.port
event.dest.client = S.SND_SEQ_ADDRESS_SUBSCRIBERS
event.dest.port = S.SND_SEQ_ADDRESS_UNKNOWN
def queue_get_tick_time(self):
status = S.new_queue_status(self.client, self.queue)
S.snd_seq_get_queue_status(self.client, self.queue, status)
res = S.snd_seq_queue_status_get_tick_time(status)
S.free_queue_status(status)
return res
def queue_get_real_time(self):
status = S.new_queue_status(self.client, self.queue)
S.snd_seq_get_queue_status(self.client, self.queue, status)
res = S.snd_seq_queue_status_get_real_time(status)
S.free_queue_status(status)
return (res.tv_sec, res.tv_nsec)
def change_tempo(self, tempo, event=None):
adjbpm = int(60.0 * 1000000.0 / tempo)
self._control_queue(S.SND_SEQ_EVENT_TEMPO, adjbpm, event)
self.sequencer_tempo = tempo
return True
def start_sequencer(self, event=None):
if not self._queue_running:
self._control_queue(S.SND_SEQ_EVENT_START, 0, event)
self._queue_running = True
def continue_sequencer(self, event=None):
if not self._queue_running:
self._control_queue(S.SND_SEQ_EVENT_CONTINUE, 0, event)
self._queue_running = True
def stop_sequencer(self, event=None):
if self._queue_running:
self._control_queue(S.SND_SEQ_EVENT_STOP, 0, event)
self._queue_running = False
def drain(self):
S.snd_seq_drain_output(self.client)
def queue_eventlen(self):
status = S.new_queue_status(self.client, self.queue)
S.snd_seq_queue_status_get_events(status)
def _set_poll_descriptors(self):
self._poll_descriptors = S.client_poll_descriptors(self.client)
def configure_poll(self, poll):
for fd in self._poll_descriptors:
poll.register(fd, select.POLLIN)
def drop_output(self):
S.snd_seq_drop_output_buffer(self.client)
def output_pending(self):
return S.snd_seq_event_output_pending(self.client)
## EVENT HANDLERS
##
def event_write(self, event, direct=False, relative=False, tick=False):
#print event.__class__, event
## Event Filter
if isinstance(event, midi.EndOfTrackEvent):
return
seqev = S.snd_seq_event_t()
## common
seqev.dest.client = self.write_dest.client
seqev.dest.port = self.write_dest.port
seqev.source.client = self.write_sender.client
seqev.source.port = self.write_sender.port
if direct:
# no scheduling
seqev.queue = S.SND_SEQ_QUEUE_DIRECT
else:
seqev.queue = self.queue
seqev.flags &= (S.SND_SEQ_TIME_STAMP_MASK|S.SND_SEQ_TIME_MODE_MASK)
if relative:
seqev.flags |= S.SND_SEQ_TIME_MODE_REL
else:
seqev.flags |= S.SND_SEQ_TIME_MODE_ABS
if tick:
seqev.flags |= S.SND_SEQ_TIME_STAMP_TICK
seqev.time.tick = event.tick
else:
seqev.flags |= S.SND_SEQ_TIME_STAMP_REAL
sec = int(event.msdelay / 1000)
nsec = int((event.msdelay - (sec * 1000)) * 1000000)
seqev.time.time.tv_sec = sec
seqev.time.time.tv_nsec = nsec
## Tempo Change
if isinstance(event, midi.SetTempoEvent):
adjtempo = int(60.0 * 1000000.0 / event.bpm)
seqev.type = S.SND_SEQ_EVENT_TEMPO
seqev.dest.client = S.SND_SEQ_CLIENT_SYSTEM
seqev.dest.port = S.SND_SEQ_PORT_SYSTEM_TIMER
seqev.data.queue.queue = self.queue
seqev.data.queue.param.value = adjtempo
## Note Event
elif isinstance(event, midi.NoteEvent):
if isinstance(event, midi.NoteOnEvent):
seqev.type = S.SND_SEQ_EVENT_NOTEON
if isinstance(event, midi.NoteOffEvent):
seqev.type = S.SND_SEQ_EVENT_NOTEOFF
seqev.data.note.channel = event.channel
seqev.data.note.note = event.pitch
seqev.data.note.velocity = event.velocity
## Control Change
elif isinstance(event, midi.ControlChangeEvent):
seqev.type = S.SND_SEQ_EVENT_CONTROLLER
seqev.data.control.channel = event.channel
seqev.data.control.param = event.control
seqev.data.control.value = event.value
## Program Change
elif isinstance(event, midi.ProgramChangeEvent):
seqev.type = S.SND_SEQ_EVENT_PGMCHANGE
seqev.data.control.channel = event.channel
seqev.data.control.value = event.value
## Pitch Bench
elif isinstance(event, midi.PitchWheelEvent):
seqev.type = S.SND_SEQ_EVENT_PITCHBEND
seqev.data.control.channel = event.channel
seqev.data.control.value = event.pitch
## Unknown
else:
print "Warning :: Unknown event type: %s" % event
return None
err = S.snd_seq_event_output(self.client, seqev)
if (err < 0): self._error(err)
self.drain()
return self.output_buffer_size - err
def event_read(self):
ev = S.event_input(self.client)
if ev and (ev < 0): self._error(ev)
if ev and ev.type in (S.SND_SEQ_EVENT_NOTEON, S.SND_SEQ_EVENT_NOTEOFF):
if ev.type == S.SND_SEQ_EVENT_NOTEON:
mev = midi.NoteOnEvent()
mev.channel = ev.data.note.channel
mev.pitch = ev.data.note.note
mev.velocity = ev.data.note.velocity
elif ev.type == S.SND_SEQ_EVENT_NOTEOFF:
mev = midi.NoteOffEvent()
mev.channel = ev.data.note.channel
mev.pitch = ev.data.note.note
mev.velocity = ev.data.note.velocity
if ev.time.time.tv_nsec:
# convert to ms
mev.msdeay = \
(ev.time.time.tv_nsec / 1e6) + (ev.time.time.tv_sec * 1e3)
else:
mev.tick = ev.time.tick
return mev
else:
return None
class SequencerHardware(Sequencer):
SequencerName = "__hw__"
SequencerStream = S.SND_SEQ_OPEN_DUPLEX
SequencerType = "hw"
SequencerMode = 0
class Client(object):
def __init__(self, client, name):
self.client = client
self.name = name
self._ports = {}
def __str__(self):
retstr = '] client(%d) "%s"\n' % (self.client, self.name)
for port in self:
retstr += str(port)
return retstr
def add_port(self, port, name, caps):
port = self.Port(port, name, caps)
self._ports[name] = port
def __iter__(self):
return self._ports.itervalues()
def __len__(self):
return len(self._ports)
def get_port(self, key):
return self._ports[key]
__getitem__ = get_port
class Port(object):
def __init__(self, port, name, caps):
self.port = port
self.name = name
self.caps = caps
self.caps_read = self.caps & S.SND_SEQ_PORT_CAP_READ
self.caps_write = self.caps & S.SND_SEQ_PORT_CAP_WRITE
self.caps_subs_read = self.caps & S.SND_SEQ_PORT_CAP_SUBS_READ
self.caps_subs_write = self.caps & S.SND_SEQ_PORT_CAP_SUBS_WRITE
def __str__(self):
flags = []
if self.caps_read: flags.append('r')
if self.caps_write: flags.append('w')
if self.caps_subs_read: flags.append('sender')
if self.caps_subs_write: flags.append('receiver')
flags = str.join(', ', flags)
retstr = '] port(%d) [%s] "%s"\n' % (self.port, flags, self.name)
return retstr
def init(self):
self._clients = {}
self._init_handle()
self._query_clients()
def __iter__(self):
return self._clients.itervalues()
def __len__(self):
return len(self._clients)
def get_client(self, key):
return self._clients[key]
__getitem__ = get_client
def get_client_and_port(self, cname, pname):
client = self[cname]
port = client[pname]
return (client.client, port.port)
def __str__(self):
retstr = ''
for client in self:
retstr += str(client)
return retstr
def _query_clients(self):
self._clients = {}
S.snd_seq_drop_output(self.client)
cinfo = S.new_client_info()
pinfo = S.new_port_info()
S.snd_seq_client_info_set_client(cinfo, -1)
# for each client
while S.snd_seq_query_next_client(self.client, cinfo) >= 0:
client = S.snd_seq_client_info_get_client(cinfo)
cname = S.snd_seq_client_info_get_name(cinfo)
cobj = self.Client(client, cname)
self._clients[cname] = cobj
# get port data
S.snd_seq_port_info_set_client(pinfo, client)
S.snd_seq_port_info_set_port(pinfo, -1)
while (S.snd_seq_query_next_port(self.client, pinfo) >= 0):
cap = S.snd_seq_port_info_get_capability(pinfo)
client = S.snd_seq_port_info_get_client(pinfo)
port = S.snd_seq_port_info_get_port(pinfo)
pname = S.snd_seq_port_info_get_name(pinfo)
cobj.add_port(port, pname, cap)
class SequencerRead(Sequencer):
DefaultArguments = {
'sequencer_name':'__SequencerRead__',
'sequencer_stream':not S.SND_SEQ_NONBLOCK,
'alsa_port_caps':S.SND_SEQ_PORT_CAP_WRITE | S.SND_SEQ_PORT_CAP_SUBS_WRITE,
}
def subscribe_port(self, client, port):
sender = self._new_address(client, port)
dest = self._my_address()
subscribe = self._new_subscribe(sender, dest, read=True)
S.snd_seq_port_subscribe_set_time_update(subscribe, True)
#S.snd_seq_port_subscribe_set_time_real(subscribe, True)
self._subscribe_port(subscribe)
class SequencerWrite(Sequencer):
DefaultArguments = {
'sequencer_name':'__SequencerWrite__',
'sequencer_stream':not S.SND_SEQ_NONBLOCK,
'alsa_port_caps':S.SND_SEQ_PORT_CAP_READ | S.SND_SEQ_PORT_CAP_SUBS_READ
}
def subscribe_port(self, client, port):
sender = self._my_address()
dest = self._new_address(client, port)
subscribe = self._new_subscribe(sender, dest, read=False)
self._subscribe_port(subscribe)
class SequencerDuplex(Sequencer):
DefaultArguments = {
'sequencer_name':'__SequencerWrite__',
'sequencer_stream':not S.SND_SEQ_NONBLOCK,
'alsa_port_caps':S.SND_SEQ_PORT_CAP_READ | S.SND_SEQ_PORT_CAP_SUBS_READ |
S.SND_SEQ_PORT_CAP_WRITE | S.SND_SEQ_PORT_CAP_SUBS_WRITE
}
def subscribe_read_port(self, client, port):
sender = self._new_address(client, port)
dest = self._my_address()
subscribe = self._new_subscribe(sender, dest, read=True)
S.snd_seq_port_subscribe_set_time_update(subscribe, True)
#S.snd_seq_port_subscribe_set_time_real(subscribe, True)
self._subscribe_port(subscribe)
def subscribe_write_port(self, client, port):
sender = self._my_address()
dest = self._new_address(client, port)
subscribe = self._new_subscribe(sender, dest, read=False)
self._subscribe_port(subscribe)
| agpl-3.0 | 1,431,519,563,981,031,200 | 1,281,872,724,270,424,000 | 36.520971 | 318 | 0.564453 | false |
nathandunn/jbrowse | tests/selenium_tests/jbrowse_selenium/JBrowseTest.py | 2 | 12948 | import os
import time
import re
import unittest
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import track_selectors
class JBrowseTest (object):
data_dir = None
base_url = None
# this "time dilation" factor hugely increases all our fixed waiting times when
# running under Travis CI, because the Travis environment is much slower than a desktop.
time_dilation = 12 if os.getenv('CI','false') == 'true' else 1
tracksel_type = 'Hierarchical'
## TestCase override - use instead of constructor
def setUp( self ):
self.track_selector = getattr( track_selectors, '%sTrackSelector' % self.tracksel_type )( self, self.time_dilation )
#self.browser = self._getChrome()
self.browser = self._getBrowser()
if self.base_url and self.data_dir: self.browser.get(self.base_url+self.data_dir)
else:
base = self.baseURL()
self.browser.get(
base + ( '&' if base.find('?') >= 0 else '?' )
+ ( "data="+self.data_dir if self.data_dir else "" )
)
if not os.getenv('DEBUG'):
self.addCleanup(self.browser.quit)
self._waits_for_load()
def _getBrowser( self ):
browser = os.getenv('SELENIUM_BROWSER','Firefox').lower()
if browser == 'firefox':
fp = webdriver.FirefoxProfile()
fp.set_preference("browser.download.folderList",2)
fp.set_preference("browser.download.manager.showWhenStarting",False)
fp.set_preference("browser.download.dir", os.getcwd())
fp.set_preference("browser.helperApps.neverAsk.saveToDisk","application/x-bedgraph,application/x-wiggle,application/x-bed")
fp.set_preference('webdriver.log.driver.ignore',True)
return webdriver.Firefox( firefox_profile = fp )
elif browser == 'chrome':
options = webdriver.ChromeOptions()
if os.getenv('CHROME_HEADLESS'):
options.add_argument('headless')
options.add_argument('disable-gpu')
return webdriver.Chrome(chrome_options=options)
elif browser == 'phantom' or browser == 'phantomjs':
return webdriver.PhantomJS()
elif browser == 'travis_saucelabs':
username = os.environ["SAUCE_USERNAME"]
access_key = os.environ["SAUCE_ACCESS_KEY"]
#capabilities["tunnel-identifier"] = os.environ["TRAVIS_JOB_NUMBER"]
hub_url = "%s:%s@localhost:4445" % (username, access_key)
return webdriver.Remote(desired_capabilities=capabilities, command_executor="https://%s/wd/hub" % hub_url)
else:
raise Exception('invalid browser name', 'invalid browser name "%s"' % browser)
def baseURL( self ):
if not self.base_url:
self.base_url = os.environ['JBROWSE_URL'] if 'JBROWSE_URL' in os.environ else "http://localhost/jbrowse/index.html"
return self.base_url
## convenience methods for us
def assert_element(self, expression , time=5):
self._waits_for_element(expression, time*self.time_dilation)
try:
if expression.find('/') >= 0:
el = self.browser.find_element_by_xpath(expression)
else:
el = self.browser.find_element_by_css_selector( expression )
except NoSuchElementException:
raise AssertionError ("can't find %s" %expression)
return el
def assert_elements( self, expression ):
self._waits_for_elements( expression, 3*self.time_dilation )
try:
if '/' in expression:
el = self.browser.find_elements_by_xpath( expression )
else:
el = self.browser.find_elements_by_css_selector( expression )
except NoSuchElementException:
raise AssertionError ("can't find %s" %expression)
return el
def assert_track( self, tracktext ):
trackPath = "//div[contains(@class,'track-label')][contains(.,'%s')]" %tracktext
self._waits_for_element( trackPath )
def assert_no_element( self, expression ):
self._waits_for_no_element( expression )
def assert_no_js_errors( self ):
assert self.browser.find_element_by_xpath('/html/body') \
.get_attribute('JSError') == None
# Find the query box and put f15 into it and hit enter
def do_typed_query( self, text ):
qbox = self.browser.find_element_by_id("location")
qbox.clear()
qbox.send_keys( text )
qbox.send_keys( Keys.RETURN )
def _rubberband( self, el_xpath, start_pct, end_pct, modkey = None ):
el = self.assert_element( el_xpath )
start_offset = el.size['width'] * start_pct - el.size['width']/2
c = self.actionchains() \
.move_to_element( el ) \
.move_by_offset( start_offset, 0 )
if( modkey ):
c = c.key_down( modkey )
c = c \
.click_and_hold( None ) \
.move_by_offset( el.size['width']*(end_pct-start_pct), 0 ) \
.release( None )
if( modkey ):
c = c.key_up( modkey )
c.perform()
self.assert_no_js_errors()
def export_track( self, track_name, region, file_format, button ):
self.track_menu_click( track_name, 'Save')
# test view export
self.assert_element("//div[@id='exportDialog']//label[contains(.,'%s')]" % region ).click()
self.assert_element("//div[@id='exportDialog']//label[contains(.,'%s')]" % file_format ).click()
self.assert_element("//div[@id='exportDialog']//*[contains(@class,'dijitButton')]//*[contains(@class,'dijitButtonText')][contains(.,'%s')]" % button ).click()
self.wait_for_dialog_disappearance()
self.assert_no_js_errors()
def close_dialog( self, title ):
dialog = "//*[@class='dijitDialogTitle'][contains(text(),'%s')]/../span[contains(@class,'dijitDialogCloseIcon')]" % title
self.assert_element(dialog).click()
self.assert_no_element(dialog)
self.wait_for_dialog_disappearance()
self.assert_no_js_errors()
def wait_for_dialog_appearance( self, t=5):
#WebDriverWait(self, t).until(lambda self: not self.browser.find_element_by_css_selector( '.dijitDialogUnderlayWrapper').is_displayed())
time.sleep(1*self.time_dilation)
#pass
def wait_for_dialog_disappearance( self, t=5):
#WebDriverWait(self, t).until(lambda self: not self.browser.find_element_by_css_selector( '.dijitDialogUnderlayWrapper').is_displayed())
time.sleep(1*self.time_dilation)
#pass
def track_menu_click( self, track_name, item_name ):
menuButton = "//div[contains(@class,'track_%s')]//div[contains(@class,'track-label')]//div[contains(@class,'track-menu-button')]" \
% re.sub( '\W', '_', track_name.lower())
self.assert_element(menuButton).click()
self.menu_item_click( item_name )
def menu_item_click( self, text ):
menuItem = "//div[contains(@class,'dijitMenuPopup')][not(contains(@style,'display: none'))] \
//td[contains(@class,'dijitMenuItemLabel')][contains(.,'%s')]" % text
self.assert_element(menuItem).click()
def overview_rubberband( self, start_pct, end_pct ):
"""Executes a rubberband gesture from start_pct to end_pct on the overview bar"""
self._rubberband( "//div[@id='overview']", start_pct, end_pct )
# I can't get a mainscale_rubberband() working, can't find an
# element to tell selenium to move to that will hit it. can't
# move to the scale itself because it's so wide.
def trackpane_rubberband( self, start_pct, end_pct ):
"""Executes a rubberband gesture from start_pct to end_pct in the main track pane"""
self._rubberband( "//div[contains(@class,'dragWindow')]", start_pct, end_pct, Keys.SHIFT )
def is_track_on( self, tracktext ):
# find the track label in the track pane
return self.does_element_exist( \
"//div[contains(@class,'track-label')]/span[contains(@class,'track-label-text')][contains(.,'%s')]" % tracktext )
def turn_on_track( self, tracktext ):
return self.track_selector.turn_on_track( tracktext )
def turn_off_track( self, tracktext ):
return self.track_selector.turn_off_track( tracktext )
def actionchains( self ):
return ActionChains( self.browser )
def get_track_labels_containing( self, string ):
return self.assert_elements( "//span[contains(@class,'track-label-text')][contains(.,'%s')]" % string )
def _waits_for_elements( self, expression, time=5):
WebDriverWait(self, time*self.time_dilation).until(lambda self: self.do_elements_exist(expression))
def _waits_for_element( self, expression, time=5 ):
WebDriverWait(self, time*self.time_dilation).until(lambda self: self.does_element_exist(expression))
def _waits_for_no_element( self, expression, time=5 ):
WebDriverWait(self, time*self.time_dilation).until(lambda self: not self.does_element_exist(expression))
# Wait until faceted browser has narrowed results to one track
def wait_until_one_track(self):
WebDriverWait(self, 5*self.time_dilation).until(lambda self: self.is_one_row())
# Return true/false if faceted browser narrowed down to one track
def is_one_row(self):
return self.assert_elements("div.dojoxGridRow").__len__() == 1
# Return true/false if element exists
def does_element_exist (self, expression):
try:
if expression.find('/') >= 0:
self.browser.find_element_by_xpath( expression )
else:
self.browser.find_element_by_css_selector( expression )
return True
except NoSuchElementException:
return False
# Return true/false if elements exist
def do_elements_exist (self, expression):
try:
if expression.find('/') >= 0:
self.browser.find_elements_by_xpath( expression )
else:
self.browser.find_elements_by_css_selector( expression )
return True
except NoSuchElementException:
return False
def click_search_disambiguation( self, trackName, buttonText):
self.wait_for_dialog_appearance()
xpath = (
'//*[contains(@class,"dijitDialogPaneContent")]'
'//td[contains(@class,"field-tracks")][contains(.,"%s")]'
'/../td[contains(@class,"goButtonColumn")]'
'//*[contains(@class,"dijitButton")][contains(.,"%s")]'
) % (trackName, buttonText)
#print(xpath)
self.assert_element(xpath).click()
self.wait_for_dialog_disappearance()
def select_refseq( self, name ):
self.do_typed_query( name )
def scroll( self ):
move_right_button = self.browser.find_element_by_id('moveRight')
move_right_button.click()
self.waits_for_scroll(self.browser.title)
move_left_button = self.browser.find_element_by_id('moveLeft')
move_left_button.click()
self.waits_for_scroll(self.browser.title)
self.assert_no_js_errors()
# scroll back and forth with the mouse
self.actionchains() \
.move_to_element( move_right_button ) \
.move_by_offset( 0, 300 ) \
.click_and_hold( None ) \
.move_by_offset( 300, 0 ) \
.release( None ) \
.move_by_offset( -100,100 ) \
.click_and_hold( None ) \
.move_by_offset( -300, 0 ) \
.release( None ) \
.perform()
self.assert_no_js_errors()
# waits for the title of the page to change, since it
# gets updated after the scroll animation
def waits_for_scroll ( self, location ):
WebDriverWait(self, 5*self.time_dilation).until(lambda self: self.browser.title != location)
#Exists because onload() get trigered before JBrowse is ready
def _waits_for_load(self):
WebDriverWait(self.browser, 5*self.time_dilation).until(lambda self: "data=" in self.current_url or "js_tests")
if "data=nonexistent" in self.browser.current_url: #account for the test for bad data
pass
elif "js_tests" in self.browser.current_url: #account for jasmine tests
pass
else:
self.waits_for_scroll("JBrowse")
| lgpl-2.1 | 2,266,186,494,303,133,400 | -972,908,021,915,785,900 | 39.974684 | 166 | 0.614535 | false |
alfkjartan/nvgimu | nvg/maths/matrices.py | 2 | 6689 | """
Utilities for working with matrices.
"""
# Copyright (C) 2009-2011 University of Edinburgh
#
# This file is part of IMUSim.
#
# IMUSim is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IMUSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IMUSim. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import numpy as np
import math
import operator
from itertools import izip
_rotationMatrices = dict(
x = lambda rx: np.matrix((
(1,0,0),
(0,math.cos(rx),-math.sin(rx)),
(0,math.sin(rx),math.cos(rx))),dtype=float),
y = lambda ry: np.matrix((
(math.cos(ry),0,math.sin(ry)),
(0,1,0),
(-math.sin(ry),0,math.cos(ry))),dtype=float),
z = lambda rz: np.matrix((
(math.cos(rz),-math.sin(rz),0),
(math.sin(rz),math.cos(rz),0),
(0,0,1)),dtype=float))
_EPS = 1e-12
_eulerFuncs = dict(
xyz = lambda m: \
(np.arctan2(-m[1,2], m[2,2]), np.arcsin(m[0,2]), np.arctan2(-m[0,1], m[0,0])) if abs(m[0,2]) < 1 - _EPS \
else (np.arctan2(m[1,0], m[1,1]), np.pi/2, 0) if m[0,2] > 0 \
else (-np.arctan2(m[1,0], m[1,1]), -np.pi/2, 0),
xzy = lambda m: \
(np.arctan2(m[2,1], m[1,1]), np.arcsin(-m[0,1]), np.arctan2(m[0,2], m[0,0])) if abs(m[0,1]) < 1 - _EPS \
else (np.arctan2(-m[2,0], m[2,2]), -np.pi/2, 0) if m[0,1] > 0 \
else (-np.arctan2(-m[2,0], m[2,2]), np.pi/2, 0),
yxz = lambda m: \
(np.arctan2(m[0,2], m[2,2]), np.arcsin(-m[1,2]), np.arctan2(m[1,0], m[1,1])) if abs(m[1,2]) < 1 - _EPS \
else (np.arctan2(-m[0,1], m[0,0]), -np.pi/2, 0) if m[1,2] > 0 \
else (-np.arctan2(-m[0,1], m[0,0]), np.pi/2, 0),
yzx = lambda m: \
(np.arctan2(-m[2,0], m[0,0]), np.arcsin(m[1,0]), np.arctan2(-m[1,2], m[1,1])) if abs(m[1,0]) < 1 - _EPS \
else (np.arctan2(m[2,1], m[2,2]), np.pi/2, 0) if m[1,0] > 0 \
else (-np.arctan2(m[2,1], m[2,2]), -np.pi/2, 0),
zxy = lambda m: \
(np.arctan2(-m[0,1], m[1,1]), np.arcsin(m[2,1]), np.arctan2(-m[2,0], m[2,2])) if abs(m[2,1]) < 1 - _EPS \
else (np.arctan2(m[0,2], m[0,0]), np.pi/2, 0) if m[2,1] > 0 \
else (-np.arctan2(m[0,2], m[0,0]), -np.pi/2, 0),
zyx = lambda m: \
(np.arctan2(m[1,0], m[0,0]), np.arcsin(-m[2,0]), np.arctan2(m[2,1], m[2,2])) if abs(m[2,0]) < 1 - _EPS \
else (np.arctan2(-m[1,2], m[1,1]), -np.pi/2, 0) if m[2,0] > 0 \
else (-np.arctan2(-m[1,2], m[1,1]), np.pi/2, 0),
xyx = lambda m: \
(np.arctan2(m[1,0], -m[2,0]), np.arccos(m[0,0]), np.arctan2(m[0,1], m[0,2])) if abs(m[0,0]) < 1 - _EPS \
else (np.arctan2(-m[1,2], m[1,1]), 0, 0) if m[0,0] > 0 \
else (-np.arctan2(-m[1,2], m[1,1]), np.pi, 0),
xzx = lambda m: \
(np.arctan2(m[2,0], m[1,0]), np.arccos(m[0,0]), np.arctan2(m[0,2], -m[0,1])) if abs(m[0,0]) < 1 - _EPS \
else (np.arctan2(m[2,1], m[2,2]), 0, 0) if m[0,0] > 0 \
else (-np.arctan2(m[2,1], m[2,2]), np.pi, 0),
yxy = lambda m: \
(np.arctan2(m[0,1], m[2,1]), np.arccos(m[1,1]), np.arctan2(m[1,0], -m[1,2])) if abs(m[1,1]) < 1 - _EPS \
else (np.arctan2(m[0,2], m[0,0]), 0, 0) if m[1,1] > 0 \
else (-np.arctan2(m[0,2], m[0,0]), np.pi, 0),
yzy = lambda m: \
(np.arctan2(m[2,1], -m[0,1]), np.arccos(m[1,1]), np.arctan2(m[1,2], m[1,0])) if abs(m[1,1]) < 1 - _EPS \
else (np.arctan2(-m[2,0], m[2,2]), 0, 0) if m[1,1] > 0 \
else (-np.arctan2(-m[2,0], m[2,2]), np.pi, 0),
zxz = lambda m: \
(np.arctan2(m[0,2], -m[1,2]), np.arccos(m[2,2]), np.arctan2(m[2,0], m[2,1])) if abs(m[2,2]) < 1 - _EPS \
else (np.arctan2(-m[0,1], m[0,0]), 0, 0) if m[2,2] > 0 \
else (-np.arctan2(-m[0,1], m[0,0]), np.pi, 0),
zyz = lambda m: \
(np.arctan2(m[1,2], m[0,2]), np.arccos(m[2,2]), np.arctan2(m[2,1], -m[2,0])) if abs(m[2,2]) < 1 - _EPS \
else (np.arctan2(m[1,0], m[1,1]), 0, 0) if m[2,2] > 0 \
else (-np.arctan2(m[1,0], m[1,1]), np.pi, 0),
xy = lambda m: (np.arctan2(m[2,1], m[1,1]), np.arctan2(m[0,2], m[0,0])),
xz = lambda m: (np.arctan2(-m[1,2], m[2,2]), np.arctan2(-m[0,1], m[0,0])),
yx = lambda m: (np.arctan2(-m[2,0], m[0,0]), np.arctan2(-m[1,2], m[1,1])),
yz = lambda m: (np.arctan2(m[0,2], m[2,2]), np.arctan2(m[1,0], m[1,1])),
zx = lambda m: (np.arctan2(m[1,0], m[0,0]), np.arctan2(m[2,1], m[2,2])),
zy = lambda m: (np.arctan2(-m[0,1], m[1,1]), np.arctan2(-m[2,0], m[2,2])),
x = lambda m: (np.arctan2(m[2,1], m[2,2]),),
y = lambda m: (np.arctan2(m[0,2], m[0,0]),),
z = lambda m: (np.arctan2(m[1,0], m[1,1]),))
def matrixToEuler(m,order='zyx',inDegrees=True):
"""
Convert a 3x3 rotation matrix to an Euler angle sequence.
@param m: 3x3 L{np.matrix}, or equivalent, to convert.
@param order: The order of the Euler angle sequence, e.g. 'zyx'
@param inDegrees: True to return result in degrees, False for radians.
@return: L{np.ndarray} of Euler angles in specified order.
"""
order = order.lower()
if order not in _eulerFuncs.keys():
raise NotImplementedError, "Order %s not implemented" % order
result = np.array(_eulerFuncs[order](m))
if inDegrees:
return np.degrees(result)
else:
return result
def matrixFromEuler(angles, order, inDegrees=True):
"""
Generate a rotation matrix from an Euler angle sequence.
@param angles: Sequence of Euler rotation angles.
@param order: Sequence of rotation axes. Rotations are applied sequentially
from left to right, i.e. the string 'zyx' would result in rotation about
the Z axis, then the new Y axis, and finally about the new X axis.
@param inDegrees: Whether the angles are in degrees (`True`) or radians
(`False`)
@return: 3x3 rotation matrix corresponding to the Euler angle sequence.
"""
assert len(angles) == len(order)
if inDegrees:
angles = np.radians(angles)
return reduce(operator.mul,
(_rotationMatrices[axis](angle) for axis,angle in
izip(order.lower(), angles)))
| gpl-3.0 | -8,425,215,034,160,879,000 | 219,741,071,333,983,040 | 45.776224 | 117 | 0.535955 | false |
ppries/tensorflow | tensorflow/contrib/bayesflow/python/kernel_tests/monte_carlo_test.py | 23 | 6678 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Monte Carlo Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
distributions = tf.contrib.distributions
layers = tf.contrib.layers
monte_carlo = tf.contrib.bayesflow.monte_carlo
class ExpectationImportanceSampleTest(tf.test.TestCase):
def test_normal_integral_mean_and_var_correctly_estimated(self):
n = int(1e6)
with self.test_session():
mu_p = tf.constant([-1.0, 1.0], dtype=tf.float64)
mu_q = tf.constant([0.0, 0.0], dtype=tf.float64)
sigma_p = tf.constant([0.5, 0.5], dtype=tf.float64)
sigma_q = tf.constant([1.0, 1.0], dtype=tf.float64)
p = distributions.Normal(mu=mu_p, sigma=sigma_p)
q = distributions.Normal(mu=mu_q, sigma=sigma_q)
# Compute E_p[X].
e_x = monte_carlo.expectation_importance_sampler(
f=lambda x: x, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
# Compute E_p[X^2].
e_x2 = monte_carlo.expectation_importance_sampler(
f=tf.square,
log_p=p.log_prob,
sampling_dist_q=q,
n=n,
seed=42)
stdev = tf.sqrt(e_x2 - tf.square(e_x))
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
# Convergence of mean is +- 0.003 if n = 100M
# Convergence of std is +- 0.00001 if n = 100M
self.assertEqual(p.get_batch_shape(), e_x.get_shape())
self.assertAllClose(p.mean().eval(), e_x.eval(), rtol=0.01)
self.assertAllClose(p.std().eval(), stdev.eval(), rtol=0.02)
def test_multivariate_normal_prob_positive_product_of_components(self):
# Test that importance sampling can correctly estimate the probability that
# the product of components in a MultivariateNormal are > 0.
n = 1000
with self.test_session():
p = distributions.MultivariateNormalDiag(
mu=[0.0, 0.0], diag_stdev=[1.0, 1.0])
q = distributions.MultivariateNormalDiag(
mu=[0.5, 0.5], diag_stdev=[3., 3.])
# Compute E_p[X_1 * X_2 > 0], with X_i the ith component of X ~ p(x).
# Should equal 1/2 because p is a spherical Gaussian centered at (0, 0).
def indicator(x):
x1_times_x2 = tf.reduce_prod(x, reduction_indices=[-1])
return 0.5 * (tf.sign(x1_times_x2) + 1.0)
prob = monte_carlo.expectation_importance_sampler(
f=indicator, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
# Convergence is +- 0.004 if n = 100k.
self.assertEqual(p.get_batch_shape(), prob.get_shape())
self.assertAllClose(0.5, prob.eval(), rtol=0.05)
class ExpectationImportanceSampleLogspaceTest(tf.test.TestCase):
def test_normal_distribution_second_moment_estimated_correctly(self):
# Test the importance sampled estimate against an analytical result.
n = int(1e6)
with self.test_session():
mu_p = tf.constant([0.0, 0.0], dtype=tf.float64)
mu_q = tf.constant([-1.0, 1.0], dtype=tf.float64)
sigma_p = tf.constant([1.0, 2 / 3.], dtype=tf.float64)
sigma_q = tf.constant([1.0, 1.0], dtype=tf.float64)
p = distributions.Normal(mu=mu_p, sigma=sigma_p)
q = distributions.Normal(mu=mu_q, sigma=sigma_q)
# Compute E_p[X^2].
# Should equal [1, (2/3)^2]
log_e_x2 = monte_carlo.expectation_importance_sampler_logspace(
log_f=lambda x: tf.log(tf.square(x)),
log_p=p.log_prob,
sampling_dist_q=q,
n=n,
seed=42)
e_x2 = tf.exp(log_e_x2)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertEqual(p.get_batch_shape(), e_x2.get_shape())
self.assertAllClose([1., (2 / 3.)**2], e_x2.eval(), rtol=0.02)
class ExpectationTest(tf.test.TestCase):
def test_mc_estimate_of_normal_mean_and_variance_is_correct_vs_analytic(self):
tf.set_random_seed(0)
n = 20000
with self.test_session():
p = distributions.Normal(mu=[1.0, -1.0], sigma=[0.3, 0.5])
# Compute E_p[X] and E_p[X^2].
z = p.sample_n(n=n)
e_x = monte_carlo.expectation(lambda x: x, p, z=z, seed=42)
e_x2 = monte_carlo.expectation(tf.square, p, z=z, seed=0)
var = e_x2 - tf.square(e_x)
self.assertEqual(p.get_batch_shape(), e_x.get_shape())
self.assertEqual(p.get_batch_shape(), e_x2.get_shape())
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertAllClose(p.mean().eval(), e_x.eval(), rtol=0.01)
self.assertAllClose(p.variance().eval(), var.eval(), rtol=0.02)
class GetSamplesTest(tf.test.TestCase):
"""Test the private method 'get_samples'."""
def test_raises_if_both_z_and_n_are_none(self):
with self.test_session():
dist = distributions.Normal(mu=0., sigma=1.)
z = None
n = None
seed = None
with self.assertRaisesRegexp(ValueError, 'exactly one'):
monte_carlo._get_samples(dist, z, n, seed)
def test_raises_if_both_z_and_n_are_not_none(self):
with self.test_session():
dist = distributions.Normal(mu=0., sigma=1.)
z = dist.sample_n(n=1)
n = 1
seed = None
with self.assertRaisesRegexp(ValueError, 'exactly one'):
monte_carlo._get_samples(dist, z, n, seed)
def test_returns_n_samples_if_n_provided(self):
with self.test_session():
dist = distributions.Normal(mu=0., sigma=1.)
z = None
n = 10
seed = None
z = monte_carlo._get_samples(dist, z, n, seed)
self.assertEqual((10,), z.get_shape())
def test_returns_z_if_z_provided(self):
with self.test_session():
dist = distributions.Normal(mu=0., sigma=1.)
z = dist.sample_n(n=10)
n = None
seed = None
z = monte_carlo._get_samples(dist, z, n, seed)
self.assertEqual((10,), z.get_shape())
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 522,966,474,900,995,800 | 4,140,672,479,186,716,000 | 36.1 | 80 | 0.626086 | false |
jantman/nagios-scripts | check_icinga_ido.py | 1 | 6939 | #!/usr/bin/env python
"""
Script to check last update of core programstatus
and service checks in Icinga ido2db Postgres database
"""
#
# The latest version of this script lives at:
# <https://github.com/jantman/nagios-scripts/blob/master/check_puppetdb_agent_run.py>
#
# Please file bug/feature requests and submit patches through
# the above GitHub repository. Feedback and patches are greatly
# appreciated; patches are preferred as GitHub pull requests, but
# emailed patches are also accepted.
#
# Copyright 2014 Jason Antman <jason@jasonantman.com> all rights reserved.
# See the above git repository's LICENSE file for license terms (GPLv3).
#
import sys
from datetime import datetime
import pytz
import logging
import argparse
from math import ceil
import nagiosplugin
import psycopg2
import pprint
_log = logging.getLogger('nagiosplugin')
utc = pytz.utc
class IdoStatus(nagiosplugin.Resource):
"""Check age of ido2db programstatus and last service check in postgres database"""
def __init__(self, db_host, db_name, db_user, db_pass, db_port=5432):
self.db_host = db_host
self.db_user = db_user
self.db_pass = db_pass
self.db_port = db_port
self.db_name = db_name
def probe(self):
_log.info("connecting to Postgres DB %s on %s" % (self.db_name, self.db_host))
try:
conn_str = "dbname='%s' user='%s' host='%s' password='%s' port='%s' application_name='%s'" % (
self.db_name,
self.db_user,
self.db_host,
self.db_pass,
self.db_port,
"check_icinga_ido_core.py",
)
_log.debug("psycopg2 connect string: %s" % conn_str)
conn = psycopg2.connect(conn_str)
except psycopg2.OperationalError, e:
_log.info("got psycopg2.OperationalError: %s" % e.__str__())
raise nagiosplugin.CheckError(e.__str__())
_log.info("connected to database")
# these queries come from https://wiki.icinga.org/display/testing/Special+IDOUtils+Queries
cur = conn.cursor()
_log.debug("got cursor")
sql = "SELECT EXTRACT(EPOCH FROM (NOW()-status_update_time)) AS age from icinga_programstatus where (UNIX_TIMESTAMP(status_update_time) > UNIX_TIMESTAMP(NOW())-60);"
_log.debug("executing query: %s" % sql)
cur.execute(sql)
row = cur.fetchone()
_log.debug("result: %s" % row)
programstatus_age = ceil(row[0])
sql = "select (UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(ss.status_update_time)) as age from icinga_servicestatus ss join icinga_objects os on os.object_id=ss.service_object_id order by status_update_time desc limit 1;"
_log.debug("executing query: %s" % sql)
cur.execute(sql)
row = cur.fetchone()
_log.debug("result: %s" % row)
last_check_age = ceil(row[0])
return [
nagiosplugin.Metric('programstatus_age', programstatus_age, uom='s', min=0),
nagiosplugin.Metric('last_check_age', last_check_age, uom='s', min=0),
]
class LoadSummary(nagiosplugin.Summary):
"""LoadSummary is used to provide custom outputs to the check"""
def __init__(self, db_name):
self.db_name = db_name
def _human_time(self, seconds):
"""convert an integer seconds into human-readable hms"""
mins, secs = divmod(seconds, 60)
hours, mins = divmod(mins, 60)
return '%02d:%02d:%02d' % (hours, mins, secs)
def _state_marker(self, state):
"""return a textual marker for result states"""
if type(state) == type(nagiosplugin.state.Critical):
return " (Crit)"
if type(state) == type(nagiosplugin.state.Warn):
return " (Warn)"
if type(state) == type(nagiosplugin.state.Unknown):
return " (Unk)"
return ""
def status_line(self, results):
if type(results.most_significant_state) == type(nagiosplugin.state.Unknown):
# won't have perf values, so special handling
return results.most_significant[0].hint.splitlines()[0]
return "Last Programstatus Update %s ago%s; Last Service Status Update %s ago%s (%s)" % (
self._human_time(results['programstatus_age'].metric.value),
self._state_marker(results['programstatus_age'].state),
self._human_time(results['last_check_age'].metric.value),
self._state_marker(results['last_check_age'].state),
self.db_name)
def ok(self, results):
return self.status_line(results)
def problem(self, results):
return self.status_line(results)
@nagiosplugin.guarded
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-H', '--hostname', dest='hostname',
help='Postgres server hostname')
parser.add_argument('-p', '--port', dest='port',
default='5432',
help='Postgres port (Default: 5432)')
parser.add_argument('-u', '--username', dest='username',
default='icinga-ido',
help='Postgres username (Default: icinga-ido)')
parser.add_argument('-a', '--password', dest='password',
default='icinga',
help='Postgres password (Default: icinga)')
parser.add_argument('-n', '--db-name', dest='db_name',
default='icinga_ido',
help='Postgres database name (Default: icinga_ido)')
parser.add_argument('-w', '--warning', dest='warning',
default='120',
help='warning threshold for age of last programstatus or service status update, in seconds (Default: 120 / 2m)')
parser.add_argument('-c', '--critical', dest='critical',
default='600',
help='critical threshold for age of last programstatus or service status update, in seconds (Default: 600 / 10m)')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='increase output verbosity (use up to 3 times)')
parser.add_argument('-t', '--timeout', dest='timeout',
default=30,
help='timeout (in seconds) for the command (Default: 30)')
args = parser.parse_args()
if not args.hostname:
raise nagiosplugin.CheckError('hostname (-H|--hostname) must be provided')
check = nagiosplugin.Check(
IdoStatus(args.hostname, args.db_name, args.username, args.password, args.port),
nagiosplugin.ScalarContext('programstatus_age', args.warning, args.critical),
nagiosplugin.ScalarContext('last_check_age', args.warning, args.critical),
LoadSummary(args.db_name))
check.main(args.verbose, args.timeout)
if __name__ == '__main__':
main()
| gpl-3.0 | 1,262,103,486,681,983,200 | -1,098,291,397,429,490,200 | 41.833333 | 222 | 0.608157 | false |
tseaver/google-cloud-python | talent/google/cloud/talent_v4beta1/gapic/transports/event_service_grpc_transport.py | 2 | 4874 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.cloud.talent_v4beta1.proto import event_service_pb2_grpc
class EventServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.talent.v4beta1 EventService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
)
def __init__(
self, channel=None, credentials=None, address="jobs.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
options={
"grpc.max_send_message_length": -1,
"grpc.max_receive_message_length": -1,
}.items(),
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"event_service_stub": event_service_pb2_grpc.EventServiceStub(channel)
}
@classmethod
def create_channel(
cls, address="jobs.googleapis.com:443", credentials=None, **kwargs
):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def create_client_event(self):
"""Return the gRPC stub for :meth:`EventServiceClient.create_client_event`.
Report events issued when end user interacts with customer's application
that uses Cloud Talent Solution. You may inspect the created events in
`self service
tools <https://console.cloud.google.com/talent-solution/overview>`__.
`Learn
more <https://cloud.google.com/talent-solution/docs/management-tools>`__
about self service tools.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["event_service_stub"].CreateClientEvent
| apache-2.0 | -7,074,604,166,565,648,000 | -4,467,309,657,441,662,000 | 36.492308 | 86 | 0.631514 | false |
rpm-software-management/yum-utils | repomanage.py | 7 | 7043 | #!/usr/bin/python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# (c) Copyright Seth Vidal 2004
# need hdropen, dir traversing, version comparison, and getopt (eventually)
# this should take a dir, traverse it - build a dict of foo[(name, arch)] = [/path/to/file/that/is/highest, /path/to/equalfile]
import os
import sys
import rpm
import fnmatch
import string
import rpmUtils
from yum import misc
from optparse import OptionParser
def errorprint(stuff):
print >> sys.stderr, stuff
def getFileList(path, ext, filelist):
"""Return all files in path matching ext, store them in filelist, recurse dirs
return list object"""
extlen = len(ext)
try:
dir_list = os.listdir(path)
except OSError, e:
errorprint('Error accessing directory %s, %s' % (path, str(e)))
return []
for d in dir_list:
if os.path.isdir(path + '/' + d):
filelist = getFileList(path + '/' + d, ext, filelist)
else:
if string.lower(d[-extlen:]) == '%s' % (ext):
newpath = os.path.normpath(path + '/' + d)
filelist.append(newpath)
return filelist
def trimRpms(rpms, excludeGlobs):
# print 'Pre-Trim Len: %d' % len(rpms)
badrpms = []
for fn in rpms:
for glob in excludeGlobs:
if fnmatch.fnmatch(fn, glob):
# print 'excluded: %s' % fn
if fn not in badrpms:
badrpms.append(fn)
for fn in badrpms:
if fn in rpms:
rpms.remove(fn)
# print 'Post-Trim Len: %d' % len(rpms)
return rpms
def parseargs(args):
usage = """
repomanage: manage a directory of rpm packages. returns lists of newest
or oldest packages in a directory for easy piping to xargs
or similar programs.
repomanage [--old] [--new] path.
"""
parser = OptionParser(usage=usage)
# new is only used to make sure that the user is not trying to get both
# new and old, after this old and not old will be used.
# (default = not old = new)
parser.add_option("-o", "--old", default=False, action="store_true",
help='print the older packages')
parser.add_option("-n", "--new", default=False, action="store_true",
help='print the newest packages')
parser.add_option("-s", "--space", default=False, action="store_true",
help='space separated output, not newline')
parser.add_option("-k", "--keep", default=1, dest='keep', action="store",
help='newest N packages to keep - defaults to 1')
parser.add_option("-c", "--nocheck", default=0, action="store_true",
help='do not check package payload signatures/digests')
(opts, args)= parser.parse_args()
if opts.new and opts.old:
errorprint('\nPass either --old or --new, not both!\n')
print parser.format_help()
sys.exit(1)
if len(args) > 1:
errorprint('Error: Only one directory allowed per run.')
print parser.format_help()
sys.exit(1)
if len(args) < 1:
errorprint('Error: Must specify a directory to index.')
print parser.format_help()
sys.exit(1)
return (opts, args)
def main(args):
(options, args) = parseargs(args)
mydir = args[0]
rpmList = []
rpmList = getFileList(mydir, '.rpm', rpmList)
verfile = {}
pkgdict = {} # hold all of them - put them in (n,a) = [(e,v,r),(e1,v1,r1)]
keepnum = int(options.keep)*(-1) # the number of items to keep
if len(rpmList) == 0:
errorprint('No files to process')
sys.exit(1)
ts = rpm.TransactionSet()
if options.nocheck:
ts.setVSFlags(~(rpm._RPMVSF_NOPAYLOAD))
else:
ts.setVSFlags(~(rpm.RPMVSF_NOMD5|rpm.RPMVSF_NEEDPAYLOAD))
for pkg in rpmList:
try:
hdr = rpmUtils.miscutils.hdrFromPackage(ts, pkg)
except rpmUtils.RpmUtilsError, e:
msg = "Error opening pkg %s: %s" % (pkg, str(e))
errorprint(msg)
continue
pkgtuple = rpmUtils.miscutils.pkgTupleFromHeader(hdr)
(n,a,e,v,r) = pkgtuple
del hdr
if (n,a) not in pkgdict:
pkgdict[(n,a)] = []
pkgdict[(n,a)].append((e,v,r))
if pkgtuple not in verfile:
verfile[pkgtuple] = []
verfile[pkgtuple].append(pkg)
for natup in pkgdict.keys():
evrlist = pkgdict[natup]
if len(evrlist) > 1:
evrlist = misc.unique(evrlist)
evrlist.sort(rpmUtils.miscutils.compareEVR)
pkgdict[natup] = evrlist
del ts
# now we have our dicts - we can return whatever by iterating over them
outputpackages = []
#if new
if not options.old:
for (n,a) in pkgdict.keys():
evrlist = pkgdict[(n,a)]
if len(evrlist) < abs(keepnum):
newevrs = evrlist
else:
newevrs = evrlist[keepnum:]
for (e,v,r) in newevrs:
for pkg in verfile[(n,a,e,v,r)]:
outputpackages.append(pkg)
if options.old:
for (n,a) in pkgdict.keys():
evrlist = pkgdict[(n,a)]
if len(evrlist) < abs(keepnum):
continue
oldevrs = evrlist[:keepnum]
for (e,v,r) in oldevrs:
for pkg in verfile[(n,a,e,v,r)]:
outputpackages.append(pkg)
outputpackages.sort()
for pkg in outputpackages:
if options.space:
print '%s' % pkg,
else:
print pkg
def usage():
print """
repomanage [--old] [--new] path
-o --old - print the older packages
-n --new - print the newest packages
-s --space - space separated output, not newline
-k --keep - newest N packages to keep - defaults to 1
-c --nocheck - do not check package payload signatures/digests
-h --help - duh
By default it will output the full path to the newest packages in the path.
"""
if __name__ == "__main__":
if len(sys.argv) < 1:
usage()
sys.exit(1)
else:
main(sys.argv[1:])
| gpl-2.0 | 6,554,620,304,640,498,000 | 5,297,926,057,438,686,000 | 29.890351 | 127 | 0.572057 | false |
aslamplr/shorts | lib/oauthlib/oauth1/rfc5849/errors.py | 17 | 2326 | # coding=utf-8
"""
oauthlib.oauth2.rfc6749.errors
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Error used both by OAuth 2 clients and provicers to represent the spec
defined error responses for all four core grant types.
"""
from __future__ import unicode_literals
from oauthlib.common import urlencode, add_params_to_uri
class OAuth1Error(Exception):
error = None
def __init__(self, description=None, uri=None, status_code=400,
request=None):
"""
description: A human-readable ASCII [USASCII] text providing
additional information, used to assist the client
developer in understanding the error that occurred.
Values for the "error_description" parameter MUST NOT
include characters outside the set
x20-21 / x23-5B / x5D-7E.
uri: A URI identifying a human-readable web page with information
about the error, used to provide the client developer with
additional information about the error. Values for the
"error_uri" parameter MUST conform to the URI- Reference
syntax, and thus MUST NOT include characters outside the set
x21 / x23-5B / x5D-7E.
state: A CSRF protection value received from the client.
request: Oauthlib Request object
"""
self.description = description
self.uri = uri
self.status_code = status_code
def in_uri(self, uri):
return add_params_to_uri(uri, self.twotuples)
@property
def twotuples(self):
error = [('error', self.error)]
if self.description:
error.append(('error_description', self.description))
if self.uri:
error.append(('error_uri', self.uri))
return error
@property
def urlencoded(self):
return urlencode(self.twotuples)
class InsecureTransportError(OAuth1Error):
error = 'insecure_transport_protocol'
description = 'Only HTTPS connections are permitted.'
class InvalidSignatureMethodError(OAuth1Error):
error = 'invalid_signature_method'
class InvalidRequestError(OAuth1Error):
error = 'invalid_request'
class InvalidClientError(OAuth1Error):
error = 'invalid_client'
| mit | -3,029,430,481,095,711,000 | 3,265,124,404,976,840,000 | 30.863014 | 77 | 0.628977 | false |
3DLIRIOUS/BlendSCAD | examples/example014.scad.py | 1 | 1763 | # OpenSCAD example, ported by Michael Mlivoncic
# a beautiful dice...
# an interesting test case, to get the Boolean operations somehow fixed (TODO)
#import sys
#sys.path.append("O:/BlenderStuff")
import blendscad
#import imp
#imp.reload(blendscad)
#imp.reload(blendscad.core)
#imp.reload(blendscad.primitives)
blendscad.initns( globals() ) # try to add BlendSCAD names to current namespace .. as if they would be in this file...
## Clear the open .blend file!!!
clearAllObjects()
###### End of Header ##############################################################################
#OpenSCAD' intersection_for() is only a work around. As standard "for" implies a union of its content, this one is a combination of
# for() and intersection() statements.
# Not really needed as we currently do not support implicit union()'s, but to demonstrate, how it would be rewritten.
# see: http://en.wikibooks.org/wiki/OpenSCAD_User_Manual/The_OpenSCAD_Language#Intersection_For_Loop
# intersection_for(i = [
# [0, 0, 0],
# [10, 20, 300],
# [200, 40, 57],
# [20, 88, 57]
# ])
# rotate(i) cube([100, 20, 20], center = true)
# example 2 - rotation:
#intersection_for(i = [ ]
tmp = None
rnge = [ [ 0, 0, 0],
[ 10, 20, 300],
[200, 40, 57],
[ 20, 88, 57] ]
for i in rnge:
tmp = intersection(
rotate(i ,
cube([100, 20, 20], center = true))
, tmp);
###### Begin of Footer ##############################################################################
color(rands(0,1,3)) # random color last object. to see "FINISH" :-)
# print timestamp and finish - sometimes it is easier to see differences in console then :-)
import time
import datetime
st = datetime.datetime.fromtimestamp( time.time() ).strftime('%Y-%m-%d %H:%M:%S')
echo ("FINISH", st)
| gpl-3.0 | -4,571,813,345,443,268,000 | -6,176,795,665,840,345,000 | 26.546875 | 131 | 0.614861 | false |
codekaki/odoo | addons/project_issue/project_issue.py | 13 | 31772 | #-*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.base_status.base_stage import base_stage
from openerp.addons.project.project import _TASK_STATE
from openerp.addons.crm import crm
from datetime import datetime
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
import binascii
import time
from openerp import tools
from openerp.tools import html2plaintext
class project_issue_version(osv.osv):
_name = "project.issue.version"
_order = "name desc"
_columns = {
'name': fields.char('Version Number', size=32, required=True),
'active': fields.boolean('Active', required=False),
}
_defaults = {
'active': 1,
}
project_issue_version()
class project_issue(base_stage, osv.osv):
_name = "project.issue"
_description = "Project Issue"
_order = "priority, create_date desc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_track = {
'state': {
'project_issue.mt_issue_new': lambda self, cr, uid, obj, ctx=None: obj['state'] in ['new', 'draft'],
'project_issue.mt_issue_closed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'done',
'project_issue.mt_issue_started': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'open',
},
'stage_id': {
'project_issue.mt_issue_stage': lambda self, cr, uid, obj, ctx=None: obj['state'] not in ['new', 'draft', 'done', 'open'],
},
'kanban_state': {
'project_issue.mt_issue_blocked': lambda self, cr, uid, obj, ctx=None: obj['kanban_state'] == 'blocked',
},
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
return super(project_issue, self).create(cr, uid, vals, context=create_context)
def _get_default_partner(self, cr, uid, context=None):
""" Override of base_stage to add project specific behavior """
project_id = self._get_default_project_id(cr, uid, context)
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return project.partner_id.id
return super(project_issue, self)._get_default_partner(cr, uid, context=context)
def _get_default_project_id(self, cr, uid, context=None):
""" Gives default project by checking if present in the context """
return self._resolve_project_id_from_context(cr, uid, context=context)
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
project_id = self._get_default_project_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], project_id, [('state', '=', 'draft')], context=context)
def _resolve_project_id_from_context(self, cr, uid, context=None):
""" Returns ID of project based on the value of 'default_project_id'
context key, or None if it cannot be resolved to a single
project.
"""
if context is None:
context = {}
if type(context.get('default_project_id')) in (int, long):
return context.get('default_project_id')
if isinstance(context.get('default_project_id'), basestring):
project_name = context['default_project_id']
project_ids = self.pool.get('project.project').name_search(cr, uid, name=project_name, context=context)
if len(project_ids) == 1:
return int(project_ids[0][0])
return None
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
access_rights_uid = access_rights_uid or uid
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
# lame hack to allow reverting search, should just work in the trivial case
if read_group_order == 'stage_id desc':
order = "%s desc" % order
# retrieve section_id from the context and write the domain
# - ('id', 'in', 'ids'): add columns that should be present
# - OR ('case_default', '=', True), ('fold', '=', False): add default columns that are not folded
# - OR ('project_ids', 'in', project_id), ('fold', '=', False) if project_id: add project columns that are not folded
search_domain = []
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
if project_id:
search_domain += ['|', ('project_ids', '=', project_id)]
search_domain += [('id', 'in', ids)]
# perform search
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def _compute_day(self, cr, uid, ids, fields, args, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Openday’s IDs
@return: difference between current date and log date
@param context: A standard dictionary for contextual values
"""
cal_obj = self.pool.get('resource.calendar')
res_obj = self.pool.get('resource.resource')
res = {}
for issue in self.browse(cr, uid, ids, context=context):
# if the working hours on the project are not defined, use default ones (8 -> 12 and 13 -> 17 * 5), represented by None
if not issue.project_id or not issue.project_id.resource_calendar_id:
working_hours = None
else:
working_hours = issue.project_id.resource_calendar_id.id
res[issue.id] = {}
for field in fields:
duration = 0
ans = False
hours = 0
date_create = datetime.strptime(issue.create_date, "%Y-%m-%d %H:%M:%S")
if field in ['working_hours_open','day_open']:
if issue.date_open:
date_open = datetime.strptime(issue.date_open, "%Y-%m-%d %H:%M:%S")
ans = date_open - date_create
date_until = issue.date_open
#Calculating no. of working hours to open the issue
hours = cal_obj._interval_hours_get(cr, uid, working_hours,
date_create,
date_open,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False,
context=context)
elif field in ['working_hours_close','day_close']:
if issue.date_closed:
date_close = datetime.strptime(issue.date_closed, "%Y-%m-%d %H:%M:%S")
date_until = issue.date_closed
ans = date_close - date_create
#Calculating no. of working hours to close the issue
hours = cal_obj._interval_hours_get(cr, uid, working_hours,
date_create,
date_close,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False,
context=context)
elif field in ['days_since_creation']:
if issue.create_date:
days_since_creation = datetime.today() - datetime.strptime(issue.create_date, "%Y-%m-%d %H:%M:%S")
res[issue.id][field] = days_since_creation.days
continue
elif field in ['inactivity_days']:
res[issue.id][field] = 0
if issue.date_action_last:
inactive_days = datetime.today() - datetime.strptime(issue.date_action_last, '%Y-%m-%d %H:%M:%S')
res[issue.id][field] = inactive_days.days
continue
if ans:
resource_id = False
if issue.user_id:
resource_ids = res_obj.search(cr, uid, [('user_id','=',issue.user_id.id)])
if resource_ids and len(resource_ids):
resource_id = resource_ids[0]
duration = float(ans.days) + float(ans.seconds)/(24*3600)
if field in ['working_hours_open','working_hours_close']:
res[issue.id][field] = hours
elif field in ['day_open','day_close']:
res[issue.id][field] = duration
return res
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
task_pool = self.pool.get('project.task')
res = {}
for issue in self.browse(cr, uid, ids, context=context):
progress = 0.0
if issue.task_id:
progress = task_pool._hours_get(cr, uid, [issue.task_id.id], field_names, args, context=context)[issue.task_id.id]['progress']
res[issue.id] = {'progress' : progress}
return res
def on_change_project(self, cr, uid, ids, project_id, context=None):
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return {'value': {'partner_id': project.partner_id.id}}
return {}
def _get_issue_task(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for task in self.pool.get('project.task').browse(cr, uid, ids, context=context):
issues += issue_pool.search(cr, uid, [('task_id','=',task.id)])
return issues
def _get_issue_work(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id:
issues += issue_pool.search(cr, uid, [('task_id','=',work.task_id.id)])
return issues
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Issue', size=128, required=True),
'active': fields.boolean('Active', required=False),
'create_date': fields.datetime('Creation Date', readonly=True,select=True),
'write_date': fields.datetime('Update Date', readonly=True),
'days_since_creation': fields.function(_compute_day, string='Days since creation date', \
multi='compute_day', type="integer", help="Difference in days between creation date and current date"),
'date_deadline': fields.date('Deadline'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.\
Define Responsible user and Email account for mail gateway.'),
'partner_id': fields.many2one('res.partner', 'Contact', select=1),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Private Note'),
'state': fields.related('stage_id', 'state', type="selection", store=True,
selection=_TASK_STATE, string="Status", readonly=True, select=True,
help='The status is set to \'Draft\', when a case is created.\
If the case is in progress the status is set to \'Open\'.\
When the case is over, the status is set to \'Done\'.\
If the case needs to be reviewed then the status is \
set to \'Pending\'.'),
'kanban_state': fields.selection([('normal', 'Normal'),('blocked', 'Blocked'),('done', 'Ready for next stage')], 'Kanban State',
track_visibility='onchange',
help="A Issue's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this issue\n"
" * Ready for next stage indicates the issue is ready to be pulled to the next stage",
readonly=True, required=False),
'email_from': fields.char('Email', size=128, help="These people will receive email.", select=1),
'email_cc': fields.char('Watchers Emails', size=256, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'date_open': fields.datetime('Opened', readonly=True,select=True),
# Project Issue fields
'date_closed': fields.datetime('Closed', readonly=True,select=True),
'date': fields.datetime('Date'),
'channel_id': fields.many2one('crm.case.channel', 'Channel', help="Communication channel."),
'categ_ids': fields.many2many('project.category', string='Tags'),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority', select=True),
'version_id': fields.many2one('project.issue.version', 'Version'),
'stage_id': fields.many2one ('project.task.type', 'Stage',
track_visibility='onchange', select=True,
domain="['&', ('fold', '=', False), ('project_ids', '=', project_id)]"),
'project_id': fields.many2one('project.project', 'Project', track_visibility='onchange', select=True),
'duration': fields.float('Duration'),
'task_id': fields.many2one('project.task', 'Task', domain="[('project_id','=',project_id)]"),
'day_open': fields.function(_compute_day, string='Days to Open', \
multi='compute_day', type="float", store=True),
'day_close': fields.function(_compute_day, string='Days to Close', \
multi='compute_day', type="float", store=True),
'user_id': fields.many2one('res.users', 'Assigned to', required=False, select=1, track_visibility='onchange'),
'working_hours_open': fields.function(_compute_day, string='Working Hours to Open the Issue', \
multi='compute_day', type="float", store=True),
'working_hours_close': fields.function(_compute_day, string='Working Hours to Close the Issue', \
multi='compute_day', type="float", store=True),
'inactivity_days': fields.function(_compute_day, string='Days since last action', \
multi='compute_day', type="integer", help="Difference in days between last action and current date"),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'progress': fields.function(_hours_get, string='Progress (%)', multi='hours', group_operator="avg", help="Computed as: Time Spent / Total Time.",
store = {
'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['task_id'], 10),
'project.task': (_get_issue_task, ['work_ids', 'remaining_hours', 'planned_hours', 'state', 'stage_id'], 10),
'project.task.work': (_get_issue_work, ['hours'], 10),
}),
}
_defaults = {
'active': 1,
'partner_id': lambda s, cr, uid, c: s._get_default_partner(cr, uid, c),
'email_from': lambda s, cr, uid, c: s._get_default_email(cr, uid, c),
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c),
'section_id': lambda s, cr, uid, c: s._get_default_section_id(cr, uid, c),
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.helpdesk', context=c),
'priority': crm.AVAILABLE_PRIORITIES[2][0],
'kanban_state': 'normal',
'user_id': lambda obj, cr, uid, context: uid,
}
_group_by_full = {
'stage_id': _read_group_stage_ids
}
def set_priority(self, cr, uid, ids, priority, *args):
"""Set lead priority
"""
return self.write(cr, uid, ids, {'priority' : priority})
def set_high_priority(self, cr, uid, ids, *args):
"""Set lead priority to high
"""
return self.set_priority(cr, uid, ids, '1')
def set_normal_priority(self, cr, uid, ids, *args):
"""Set lead priority to normal
"""
return self.set_priority(cr, uid, ids, '3')
def convert_issue_task(self, cr, uid, ids, context=None):
if context is None:
context = {}
case_obj = self.pool.get('project.issue')
data_obj = self.pool.get('ir.model.data')
task_obj = self.pool.get('project.task')
result = data_obj._get_id(cr, uid, 'project', 'view_task_search_form')
res = data_obj.read(cr, uid, result, ['res_id'])
id2 = data_obj._get_id(cr, uid, 'project', 'view_task_form2')
id3 = data_obj._get_id(cr, uid, 'project', 'view_task_tree2')
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
if id3:
id3 = data_obj.browse(cr, uid, id3, context=context).res_id
for bug in case_obj.browse(cr, uid, ids, context=context):
new_task_id = task_obj.create(cr, uid, {
'name': bug.name,
'partner_id': bug.partner_id.id,
'description':bug.description,
'date_deadline': bug.date,
'project_id': bug.project_id.id,
# priority must be in ['0','1','2','3','4'], while bug.priority is in ['1','2','3','4','5']
'priority': str(int(bug.priority) - 1),
'user_id': bug.user_id.id,
'planned_hours': 0.0,
})
vals = {
'task_id': new_task_id,
'stage_id': self.stage_find(cr, uid, [bug], bug.project_id.id, [('state', '=', 'pending')], context=context),
}
message = _("Project issue <b>converted</b> to task.")
self.message_post(cr, uid, [bug.id], body=message, context=context)
case_obj.write(cr, uid, [bug.id], vals, context=context)
return {
'name': _('Tasks'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'project.task',
'res_id': int(new_task_id),
'view_id': False,
'views': [(id2,'form'),(id3,'tree'),(False,'calendar'),(False,'graph')],
'type': 'ir.actions.act_window',
'search_view_id': res['res_id'],
'nodestroy': True
}
def copy(self, cr, uid, id, default=None, context=None):
issue = self.read(cr, uid, id, ['name'], context=context)
if not default:
default = {}
default = default.copy()
default.update(name=_('%s (copy)') % (issue['name']))
return super(project_issue, self).copy(cr, uid, id, default=default,
context=context)
def write(self, cr, uid, ids, vals, context=None):
#Update last action date every time the user changes the stage
if 'stage_id' in vals:
vals['date_action_last'] = time.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
if 'kanban_state' not in vals:
vals.update(kanban_state='normal')
state = self.pool.get('project.task.type').browse(cr, uid, vals['stage_id'], context=context).state
for issue in self.browse(cr, uid, ids, context=context):
# Change from draft to not draft EXCEPT cancelled: The issue has been opened -> set the opening date
if issue.state == 'draft' and state not in ('draft', 'cancelled'):
vals['date_open'] = time.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
# Change from not done to done: The issue has been closed -> set the closing date
if issue.state != 'done' and state == 'done':
vals['date_closed'] = time.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
return super(project_issue, self).write(cr, uid, ids, vals, context)
def onchange_task_id(self, cr, uid, ids, task_id, context=None):
if not task_id:
return {'value': {}}
task = self.pool.get('project.task').browse(cr, uid, task_id, context=context)
return {'value': {'user_id': task.user_id.id, }}
def case_reset(self, cr, uid, ids, context=None):
"""Resets case as draft
"""
res = super(project_issue, self).case_reset(cr, uid, ids, context)
self.write(cr, uid, ids, {'date_open': False, 'date_closed': False})
return res
# -------------------------------------------------------
# Stage management
# -------------------------------------------------------
def set_kanban_state_blocked(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'kanban_state': 'blocked'}, context=context)
def set_kanban_state_normal(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'kanban_state': 'normal'}, context=context)
def set_kanban_state_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'kanban_state': 'done'}, context=context)
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the issue:
- type: stage type must be the same or 'both'
- section_id: if set, stages must belong to this section or
be a default case
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
# OR all section_ids and OR with case_default
search_domain = []
if section_ids:
search_domain += [('|')] * (len(section_ids)-1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def case_cancel(self, cr, uid, ids, context=None):
""" Cancels case """
self.case_set(cr, uid, ids, 'cancelled', {'active': True}, context=context)
return True
def case_escalate(self, cr, uid, ids, context=None):
cases = self.browse(cr, uid, ids)
for case in cases:
data = {}
if case.project_id.project_escalation_id:
data['project_id'] = case.project_id.project_escalation_id.id
if case.project_id.project_escalation_id.user_id:
data['user_id'] = case.project_id.project_escalation_id.user_id.id
if case.task_id:
self.pool.get('project.task').write(cr, uid, [case.task_id.id], {'project_id': data['project_id'], 'user_id': False})
else:
raise osv.except_osv(_('Warning!'), _('You cannot escalate this issue.\nThe relevant Project has not configured the Escalation Project!'))
self.case_set(cr, uid, ids, 'draft', data, context=context)
return True
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
return [issue.project_id.message_get_reply_to()[0] if issue.project_id else False
for issue in self.browse(cr, uid, ids, context=context)]
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(project_issue, self).message_get_suggested_recipients(cr, uid, ids, context=context)
try:
for issue in self.browse(cr, uid, ids, context=context):
if issue.partner_id:
self._message_add_suggested_recipient(cr, uid, recipients, issue, partner=issue.partner_id, reason=_('Customer'))
elif issue.email_from:
self._message_add_suggested_recipient(cr, uid, recipients, issue, email=issue.email_from, reason=_('Customer Email'))
except (osv.except_osv, orm.except_orm): # no read access rights -> just ignore suggested recipients because this imply modifying followers
pass
return recipients
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
if context is None:
context = {}
context['state_to'] = 'draft'
desc = html2plaintext(msg.get('body')) if msg.get('body') else ''
defaults = {
'name': msg.get('subject') or _("No Subject"),
'description': desc,
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'partner_id': msg.get('author_id', False),
'user_id': False,
}
defaults.update(custom_values)
res_id = super(project_issue, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
return res_id
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, content_subtype='html', **kwargs):
""" Overrides mail_thread message_post so that we can set the date of last action field when
a new message is posted on the issue.
"""
if context is None:
context = {}
res = super(project_issue, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, content_subtype=content_subtype, **kwargs)
if thread_id:
self.write(cr, uid, thread_id, {'date_action_last': time.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
return res
class project(osv.osv):
_inherit = "project.project"
def _get_alias_models(self, cr, uid, context=None):
return [('project.task', "Tasks"), ("project.issue", "Issues")]
def _issue_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0)
issue_ids = self.pool.get('project.issue').search(cr, uid, [('project_id', 'in', ids)])
for issue in self.pool.get('project.issue').browse(cr, uid, issue_ids, context):
if issue.state not in ('done', 'cancelled'):
res[issue.project_id.id] += 1
return res
_columns = {
'project_escalation_id' : fields.many2one('project.project','Project Escalation', help='If any issue is escalated from the current Project, it will be listed under the project selected here.', states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'issue_count': fields.function(_issue_count, type='integer', string="Unclosed Issues"),
}
def _check_escalation(self, cr, uid, ids, context=None):
project_obj = self.browse(cr, uid, ids[0], context=context)
if project_obj.project_escalation_id:
if project_obj.project_escalation_id.id == project_obj.id:
return False
return True
_constraints = [
(_check_escalation, 'Error! You cannot assign escalation to the same project!', ['project_escalation_id'])
]
project()
class account_analytic_account(osv.osv):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
_columns = {
'use_issues' : fields.boolean('Issues', help="Check this field if this project manages issues"),
}
def on_change_template(self, cr, uid, ids, template_id, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_issues'] = template.use_issues
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
if context is None: context = {}
res = super(account_analytic_account, self)._trigger_project_creation(cr, uid, vals, context=context)
return res or (vals.get('use_issues') and not 'project_creation_in_progress' in context)
account_analytic_account()
class project_project(osv.osv):
_inherit = 'project.project'
_defaults = {
'use_issues': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 320,275,368,902,685,100 | 1,472,456,763,714,871,600 | 50.487844 | 272 | 0.566419 | false |
Fusion-Rom/android_external_chromium_org | tools/export_tarball/export_v8_tarball.py | 118 | 3960 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a tarball with V8 sources, but without .svn directories.
This allows easy packaging of V8, synchronized with browser releases.
Example usage:
export_v8_tarball.py /foo/bar
The above will create file /foo/bar/v8-VERSION.tar.bz2 if it doesn't exist.
"""
import optparse
import os
import re
import subprocess
import sys
import tarfile
_V8_MAJOR_VERSION_PATTERN = re.compile(r'#define\s+MAJOR_VERSION\s+(.*)')
_V8_MINOR_VERSION_PATTERN = re.compile(r'#define\s+MINOR_VERSION\s+(.*)')
_V8_BUILD_NUMBER_PATTERN = re.compile(r'#define\s+BUILD_NUMBER\s+(.*)')
_V8_PATCH_LEVEL_PATTERN = re.compile(r'#define\s+PATCH_LEVEL\s+(.*)')
_V8_PATTERNS = [
_V8_MAJOR_VERSION_PATTERN,
_V8_MINOR_VERSION_PATTERN,
_V8_BUILD_NUMBER_PATTERN,
_V8_PATCH_LEVEL_PATTERN]
_NONESSENTIAL_DIRS = (
'third_party/icu',
)
def GetV8Version(v8_directory):
"""
Returns version number as string based on the string
contents of version.cc file.
"""
with open(os.path.join(v8_directory, 'src', 'version.cc')) as version_file:
version_contents = version_file.read()
version_components = []
for pattern in _V8_PATTERNS:
version_components.append(pattern.search(version_contents).group(1).strip())
if version_components[len(version_components) - 1] == '0':
version_components.pop()
return '.'.join(version_components)
def GetSourceDirectory():
return os.path.realpath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src'))
def GetV8Directory():
return os.path.join(GetSourceDirectory(), 'v8')
# Workaround lack of the exclude parameter in add method in python-2.4.
# TODO(phajdan.jr): remove the workaround when it's not needed on the bot.
class MyTarFile(tarfile.TarFile):
def set_remove_nonessential_files(self, remove):
self.__remove_nonessential_files = remove
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
head, tail = os.path.split(name)
if tail in ('.svn', '.git'):
return
if self.__remove_nonessential_files:
# Remove contents of non-essential directories, but preserve gyp files,
# so that build/gyp_chromium can work.
for nonessential_dir in _NONESSENTIAL_DIRS:
dir_path = os.path.join(GetV8Directory(), nonessential_dir)
if (name.startswith(dir_path) and
os.path.isfile(name) and
'gyp' not in name):
return
tarfile.TarFile.add(self, name, arcname=arcname, recursive=recursive)
def main(argv):
parser = optparse.OptionParser()
options, args = parser.parse_args(argv)
if len(args) != 1:
print 'You must provide only one argument: output file directory'
return 1
v8_directory = GetV8Directory()
if not os.path.exists(v8_directory):
print 'Cannot find the v8 directory.'
return 1
v8_version = GetV8Version(v8_directory)
print 'Packaging V8 version %s...' % v8_version
subprocess.check_call(["make", "dependencies"], cwd=v8_directory)
output_basename = 'v8-%s' % v8_version
# Package full tarball.
output_fullname = os.path.join(args[0], output_basename + '.tar.bz2')
if not os.path.exists(output_fullname):
archive = MyTarFile.open(output_fullname, 'w:bz2')
archive.set_remove_nonessential_files(False)
try:
archive.add(v8_directory, arcname=output_basename)
finally:
archive.close()
# Package lite tarball.
output_fullname = os.path.join(args[0], output_basename + '-lite.tar.bz2')
if not os.path.exists(output_fullname):
archive = MyTarFile.open(output_fullname, 'w:bz2')
archive.set_remove_nonessential_files(True)
try:
archive.add(v8_directory, arcname=output_basename)
finally:
archive.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | 8,152,256,917,273,218,000 | 9,195,842,908,482,324,000 | 28.333333 | 80 | 0.690152 | false |
XiaosongWei/chromium-crosswalk | components/test/data/autofill/merge/tools/serialize_profiles.py | 137 | 2606 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
import sqlite3
import sys
from autofill_merge_common import SerializeProfiles, ColumnNameToFieldType
def main():
"""Serializes the autofill_profiles table from the specified database."""
if len(sys.argv) != 2:
print "Usage: python serialize_profiles.py <path/to/database>"
return 1
database = sys.argv[1]
if not os.path.isfile(database):
print "Cannot read database at \"%s\"" % database
return 1
# Read the autofill_profile_names table.
try:
connection = sqlite3.connect(database, 0)
cursor = connection.cursor()
cursor.execute("SELECT * from autofill_profile_names;")
except sqlite3.OperationalError:
print ("Failed to read the autofill_profile_names table from \"%s\"" %
database)
raise
# For backward-compatibility, the result of |cursor.description| is a list of
# 7-tuples, in which the first item is the column name, and the remaining
# items are 'None'.
types = [ColumnNameToFieldType(item[0]) for item in cursor.description]
profiles = {}
for profile in cursor:
guid = profile[0]
profiles[guid] = zip(types, profile)
# Read the autofill_profile_emails table.
try:
cursor.execute("SELECT * from autofill_profile_emails;")
except sqlite3.OperationalError:
print ("Failed to read the autofill_profile_emails table from \"%s\"" %
database)
raise
types = [ColumnNameToFieldType(item[0]) for item in cursor.description]
for profile in cursor:
guid = profile[0]
profiles[guid].extend(zip(types, profile))
# Read the autofill_profiles table.
try:
cursor.execute("SELECT * from autofill_profiles;")
except sqlite3.OperationalError:
print "Failed to read the autofill_profiles table from \"%s\"" % database
raise
types = [ColumnNameToFieldType(item[0]) for item in cursor.description]
for profile in cursor:
guid = profile[0]
profiles[guid].extend(zip(types, profile))
# Read the autofill_profile_phones table.
try:
cursor.execute("SELECT * from autofill_profile_phones;")
except sqlite3.OperationalError:
print ("Failed to read the autofill_profile_phones table from \"%s\"" %
database)
raise
for profile in cursor:
guid = profile[0]
profiles[guid].append(("PHONE_HOME_WHOLE_NUMBER", profile[2]))
print SerializeProfiles(profiles.values())
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 3,784,438,128,660,155,000 | 4,519,226,245,819,258,400 | 29.302326 | 79 | 0.697237 | false |
stackunderflow-stackptr/stackptr_web | crossbarconnect/client.py | 1 | 8527 | ###############################################################################
##
## Copyright (C) 2012-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ['Client']
try:
import ssl
_HAS_SSL = True
except ImportError:
_HAS_SSL = False
import sys
_HAS_SSL_CLIENT_CONTEXT = sys.version_info >= (2,7,9)
import json
import hmac
import hashlib
import base64
import random
from datetime import datetime
import six
from six.moves.urllib import parse
from six.moves.http_client import HTTPConnection, HTTPSConnection
def _utcnow():
"""
Get current time in UTC as ISO 8601 string.
:returns str -- Current time as string in ISO 8601 format.
"""
now = datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
def _parse_url(url):
"""
Parses a Crossbar.io HTTP bridge URL.
"""
parsed = parse.urlparse(url)
if parsed.scheme not in ["http", "https"]:
raise Exception("invalid Push URL scheme '%s'" % parsed.scheme)
if parsed.port is None or parsed.port == "":
if parsed.scheme == "http":
port = 80
elif parsed.scheme == "https":
port = 443
else:
raise Exception("logic error")
else:
port = int(parsed.port)
if parsed.fragment is not None and parsed.fragment != "":
raise Exception("invalid Push URL: non-empty fragment '%s" % parsed.fragment)
if parsed.query is not None and parsed.query != "":
raise Exception("invalid Push URL: non-empty query string '%s" % parsed.query)
if parsed.path is not None and parsed.path != "":
ppath = parsed.path
path = parse.unquote(ppath)
else:
ppath = "/"
path = ppath
return {'secure': parsed.scheme == "https",
'host': parsed.hostname,
'port': port,
'path': path}
class Client:
"""
Crossbar.io HTTP bridge client.
"""
def __init__(self, url, key = None, secret = None, timeout = 5, context = None):
"""
Create a new Crossbar.io push client.
The only mandatory argument is the Push service endpoint of the Crossbar.io
instance to push to.
For signed pushes, provide authentication key and secret. If those are not
given, unsigned pushes are performed.
:param url: URL of the HTTP bridge of Crossbar.io (e.g. http://example.com:8080/push).
:type url: str
:param key: Optional key to use for signing requests.
:type key: str
:param secret: When using signed request, the secret corresponding to key.
:type secret: str
:param timeout: Timeout for requests.
:type timeout: int
:param context: If the HTTP bridge is running on HTTPS (that is securely over TLS),
then the context provides the SSL settings the client should use (e.g. the
certificate chain against which to verify the server certificate). This parameter
is only available on Python 2.7.9+ and Python 3 (otherwise the parameter is silently
ignored!). See: https://docs.python.org/2/library/ssl.html#ssl.SSLContext
:type context: obj or None
"""
if six.PY2:
if type(url) == str:
url = six.u(url)
if type(key) == str:
key = six.u(key)
if type(secret) == str:
secret = six.u(secret)
assert(type(url) == six.text_type)
assert((key and secret) or (not key and not secret))
assert(key is None or type(key) == six.text_type)
assert(secret is None or type(secret) == six.text_type)
assert(type(timeout) == int)
if _HAS_SSL and _HAS_SSL_CLIENT_CONTEXT:
assert(context is None or isinstance(context, ssl.SSLContext))
self._seq = 1
self._key = key
self._secret = secret
self._endpoint = _parse_url(url)
self._endpoint['headers'] = {
"Content-type": "application/json",
"User-agent": "crossbarconnect-python"
}
if self._endpoint['secure']:
if not _HAS_SSL:
raise Exception("Bridge URL is using HTTPS, but Python SSL module is missing")
if _HAS_SSL_CLIENT_CONTEXT:
self._connection = HTTPSConnection(self._endpoint['host'],
self._endpoint['port'], timeout = timeout, context = context)
else:
self._connection = HTTPSConnection(self._endpoint['host'],
self._endpoint['port'], timeout = timeout)
else:
self._connection = HTTPConnection(self._endpoint['host'],
self._endpoint['port'], timeout = timeout)
def publish(self, topic, *args, **kwargs):
"""
Publish an event to subscribers on specified topic via Crossbar.io HTTP bridge.
The event payload (positional and keyword) can be of any type that can be
serialized to JSON.
If `kwargs` contains an `options` attribute, this is expected to
be a dictionary with the following possible parameters:
* `exclude`: A list of WAMP session IDs to exclude from receivers.
* `eligible`: A list of WAMP session IDs eligible as receivers.
:param topic: Topic to push to.
:type topic: str
:param args: Arbitrary application payload for the event (positional arguments).
:type args: list
:param kwargs: Arbitrary application payload for the event (keyword arguments).
:type kwargs: dict
:returns int -- The event publication ID assigned by the broker.
"""
if six.PY2 and type(topic) == str:
topic = six.u(topic)
assert(type(topic) == six.text_type)
## this will get filled and later serialized into HTTP/POST body
##
event = {
'topic': topic
}
if 'options' in kwargs:
event['options'] = kwargs.pop('options')
assert(type(event['options']) == dict)
if args:
event['args'] = args
if kwargs:
event['kwargs'] = kwargs
try:
body = json.dumps(event, separators = (',',':'))
if six.PY3:
body = body.encode('utf8')
except Exception as e:
raise Exception("invalid event payload - not JSON serializable: {0}".format(e))
params = {
'timestamp': _utcnow(),
'seq': self._seq,
}
if self._key:
## if the request is to be signed, create extra fields and signature
params['key'] = self._key
params['nonce'] = random.randint(0, 9007199254740992)
# HMAC[SHA256]_{secret} (key | timestamp | seq | nonce | body) => signature
hm = hmac.new(self._secret.encode('utf8'), None, hashlib.sha256)
hm.update(params['key'].encode('utf8'))
hm.update(params['timestamp'].encode('utf8'))
hm.update(u"{0}".format(params['seq']).encode('utf8'))
hm.update(u"{0}".format(params['nonce']).encode('utf8'))
hm.update(body)
signature = base64.urlsafe_b64encode(hm.digest())
params['signature'] = signature
self._seq += 1
path = "{0}?{1}".format(parse.quote(self._endpoint['path']), parse.urlencode(params))
## now issue the HTTP/POST
##
self._connection.request('POST', path, body, self._endpoint['headers'])
response = self._connection.getresponse()
response_body = response.read()
if response.status not in [200, 202]:
raise Exception("publication request failed {0} [{1}] - {2}".format(response.status, response.reason, response_body))
try:
res = json.loads(response_body)
except Exception as e:
raise Exception("publication request bogus result - {0}".format(e))
return res['id']
| agpl-3.0 | -4,999,009,419,062,653,000 | 5,783,102,936,989,692,000 | 32.108 | 126 | 0.587194 | false |
lexyan/SickBeard | lib/hachoir_parser/audio/modplug.py | 90 | 10667 | """
Modplug metadata inserted into module files.
Doc:
- http://modplug.svn.sourceforge.net/viewvc/modplug/trunk/modplug/soundlib/
Author: Christophe GISQUET <christophe.gisquet@free.fr>
Creation: 10th February 2007
"""
from lib.hachoir_core.field import (FieldSet,
UInt32, UInt16, UInt8, Int8, Float32,
RawBytes, String, GenericVector, ParserError)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
MAX_ENVPOINTS = 32
def parseComments(parser):
size = parser["block_size"].value
if size > 0:
yield String(parser, "comment", size)
class MidiOut(FieldSet):
static_size = 9*32*8
def createFields(self):
for name in ("start", "stop", "tick", "noteon", "noteoff",
"volume", "pan", "banksel", "program"):
yield String(self, name, 32, strip='\0')
class Command(FieldSet):
static_size = 32*8
def createFields(self):
start = self.absolute_address
size = self.stream.searchBytesLength("\0", False, start)
if size > 0:
self.info("Command: %s" % self.stream.readBytes(start, size))
yield String(self, "command", size, strip='\0')
yield RawBytes(self, "parameter", (self._size//8)-size)
class MidiSFXExt(FieldSet):
static_size = 16*32*8
def createFields(self):
for index in xrange(16):
yield Command(self, "command[]")
class MidiZXXExt(FieldSet):
static_size = 128*32*8
def createFields(self):
for index in xrange(128):
yield Command(self, "command[]")
def parseMidiConfig(parser):
yield MidiOut(parser, "midi_out")
yield MidiSFXExt(parser, "sfx_ext")
yield MidiZXXExt(parser, "zxx_ext")
def parseChannelSettings(parser):
size = parser["block_size"].value//4
if size > 0:
yield GenericVector(parser, "settings", size, UInt32, "mix_plugin")
def parseEQBands(parser):
size = parser["block_size"].value//4
if size > 0:
yield GenericVector(parser, "gains", size, UInt32, "band")
class SoundMixPluginInfo(FieldSet):
static_size = 128*8
def createFields(self):
yield textHandler(UInt32(self, "plugin_id1"), hexadecimal)
yield textHandler(UInt32(self, "plugin_id2"), hexadecimal)
yield UInt32(self, "input_routing")
yield UInt32(self, "output_routing")
yield GenericVector(self, "routing_info", 4, UInt32, "reserved")
yield String(self, "name", 32, strip='\0')
yield String(self, "dll_name", 64, desc="Original DLL name", strip='\0')
class ExtraData(FieldSet):
def __init__(self, parent, name, desc=None):
FieldSet.__init__(self, parent, name, desc)
self._size = (4+self["size"].value)*8
def createFields(self):
yield UInt32(self, "size")
size = self["size"].value
if size:
yield RawBytes(self, "data", size)
class XPlugData(FieldSet):
def __init__(self, parent, name, desc=None):
FieldSet.__init__(self, parent, name, desc)
self._size = (4+self["size"].value)*8
def createFields(self):
yield UInt32(self, "size")
while not self.eof:
yield UInt32(self, "marker")
if self["marker"].value == 'DWRT':
yield Float32(self, "dry_ratio")
elif self["marker"].value == 'PORG':
yield UInt32(self, "default_program")
def parsePlugin(parser):
yield SoundMixPluginInfo(parser, "info")
# Check if VST setchunk present
size = parser.stream.readBits(parser.absolute_address+parser.current_size, 32, LITTLE_ENDIAN)
if 0 < size < parser.current_size + parser._size:
yield ExtraData(parser, "extra_data")
# Check if XPlugData is present
size = parser.stream.readBits(parser.absolute_address+parser.current_size, 32, LITTLE_ENDIAN)
if 0 < size < parser.current_size + parser._size:
yield XPlugData(parser, "xplug_data")
# Format: "XXXX": (type, count, name)
EXTENSIONS = {
# WriteInstrumentHeaderStruct@Sndfile.cpp
"XTPM": {
"..Fd": (UInt32, 1, "Flags"),
"..OF": (UInt32, 1, "Fade out"),
"..VG": (UInt32, 1, "Global Volume"),
"...P": (UInt32, 1, "Panning"),
"..EV": (UInt32, 1, "Volume Envelope"),
"..EP": (UInt32, 1, "Panning Envelope"),
".EiP": (UInt32, 1, "Pitch Envelope"),
".SLV": (UInt8, 1, "Volume Loop Start"),
".ELV": (UInt8, 1, "Volume Loop End"),
".BSV": (UInt8, 1, "Volume Sustain Begin"),
".ESV": (UInt8, 1, "Volume Sustain End"),
".SLP": (UInt8, 1, "Panning Loop Start"),
".ELP": (UInt8, 1, "Panning Loop End"),
".BSP": (UInt8, 1, "Panning Substain Begin"),
".ESP": (UInt8, 1, "Padding Substain End"),
"SLiP": (UInt8, 1, "Pitch Loop Start"),
"ELiP": (UInt8, 1, "Pitch Loop End"),
"BSiP": (UInt8, 1, "Pitch Substain Begin"),
"ESiP": (UInt8, 1, "Pitch Substain End"),
".ANN": (UInt8, 1, "NNA"),
".TCD": (UInt8, 1, "DCT"),
".AND": (UInt8, 1, "DNA"),
"..SP": (UInt8, 1, "Panning Swing"),
"..SV": (UInt8, 1, "Volume Swing"),
".CFI": (UInt8, 1, "IFC"),
".RFI": (UInt8, 1, "IFR"),
"..BM": (UInt32, 1, "Midi Bank"),
"..PM": (UInt8, 1, "Midi Program"),
"..CM": (UInt8, 1, "Midi Channel"),
".KDM": (UInt8, 1, "Midi Drum Key"),
".SPP": (Int8, 1, "PPS"),
".CPP": (UInt8, 1, "PPC"),
".[PV": (UInt32, MAX_ENVPOINTS, "Volume Points"),
".[PP": (UInt32, MAX_ENVPOINTS, "Panning Points"),
"[PiP": (UInt32, MAX_ENVPOINTS, "Pitch Points"),
".[EV": (UInt8, MAX_ENVPOINTS, "Volume Enveloppe"),
".[EP": (UInt8, MAX_ENVPOINTS, "Panning Enveloppe"),
"[EiP": (UInt8, MAX_ENVPOINTS, "Pitch Enveloppe"),
".[MN": (UInt8, 128, "Note Mapping"),
"..[K": (UInt32, 128, "Keyboard"),
"..[n": (String, 32, "Name"),
".[nf": (String, 12, "Filename"),
".PiM": (UInt8, 1, "MixPlug"),
"..RV": (UInt16, 1, "Volume Ramping"),
"...R": (UInt16, 1, "Resampling"),
"..SC": (UInt8, 1, "Cut Swing"),
"..SR": (UInt8, 1, "Res Swing"),
"..MF": (UInt8, 1, "Filter Mode"),
},
# See after "CODE tag dictionary", same place, elements with [EXT]
"STPM": {
"...C": (UInt32, 1, "Channels"),
".VWC": (None, 0, "CreatedWith version"),
".VGD": (None, 0, "Default global volume"),
"..TD": (None, 0, "Default tempo"),
"HIBE": (None, 0, "Embedded instrument header"),
"VWSL": (None, 0, "LastSavedWith version"),
".MMP": (None, 0, "Plugin Mix mode"),
".BPR": (None, 0, "Rows per beat"),
".MPR": (None, 0, "Rows per measure"),
"@PES": (None, 0, "Chunk separator"),
".APS": (None, 0, "Song Pre-amplification"),
"..MT": (None, 0, "Tempo mode"),
"VTSV": (None, 0, "VSTi volume"),
}
}
class MPField(FieldSet):
def __init__(self, parent, name, ext, desc=None):
FieldSet.__init__(self, parent, name, desc)
self.ext = ext
self.info(self.createDescription())
self._size = (6+self["data_size"].value)*8
def createFields(self):
# Identify tag
code = self.stream.readBytes(self.absolute_address, 4)
if code in self.ext:
cls, count, comment = self.ext[code]
else:
cls, count, comment = RawBytes, 1, "Unknown tag"
# Header
yield String(self, "code", 4, comment)
yield UInt16(self, "data_size")
# Data
if not cls:
size = self["data_size"].value
if size > 0:
yield RawBytes(self, "data", size)
elif cls in (String, RawBytes):
yield cls(self, "value", count)
else:
if count > 1:
yield GenericVector(self, "values", count, cls, "item")
else:
yield cls(self, "value")
def createDescription(self):
return "Element '%s', size %i" % \
(self["code"]._description, self["data_size"].value)
def parseFields(parser):
# Determine field names
ext = EXTENSIONS[parser["block_type"].value]
if ext == None:
raise ParserError("Unknown parent '%s'" % parser["block_type"].value)
# Parse fields
addr = parser.absolute_address + parser.current_size
while not parser.eof and parser.stream.readBytes(addr, 4) in ext:
field = MPField(parser, "field[]", ext)
yield field
addr += field._size
# Abort on unknown codes
parser.info("End of extension '%s' when finding '%s'" %
(parser["block_type"].value, parser.stream.readBytes(addr, 4)))
class ModplugBlock(FieldSet):
BLOCK_INFO = {
"TEXT": ("comment", True, "Comment", parseComments),
"MIDI": ("midi_config", True, "Midi configuration", parseMidiConfig),
"XFHC": ("channel_settings", True, "Channel settings", parseChannelSettings),
"XTPM": ("instrument_ext", False, "Instrument extensions", parseFields),
"STPM": ("song_ext", False, "Song extensions", parseFields),
}
def __init__(self, parent, name, desc=None):
FieldSet.__init__(self, parent, name, desc)
self.parseBlock = parsePlugin
t = self["block_type"].value
self.has_size = False
if t in self.BLOCK_INFO:
self._name, self.has_size, desc, parseBlock = self.BLOCK_INFO[t]
if callable(desc):
self.createDescription = lambda: desc(self)
if parseBlock:
self.parseBlock = lambda: parseBlock(self)
if self.has_size:
self._size = 8*(self["block_size"].value + 8)
def createFields(self):
yield String(self, "block_type", 4)
if self.has_size:
yield UInt32(self, "block_size")
if self.parseBlock:
for field in self.parseBlock():
yield field
if self.has_size:
size = self["block_size"].value - (self.current_size//8)
if size > 0:
yield RawBytes(self, "data", size, "Unknown data")
def ParseModplugMetadata(parser):
while not parser.eof:
block = ModplugBlock(parser, "block[]")
yield block
if block["block_type"].value == "STPM":
break
# More undocumented stuff: date ?
size = (parser._size - parser.absolute_address - parser.current_size)//8
if size > 0:
yield RawBytes(parser, "info", size)
| gpl-3.0 | -6,328,550,716,291,144,000 | 2,101,849,836,004,009,500 | 35.656357 | 97 | 0.56417 | false |
VahidooX/DeepCCA | objectives.py | 1 | 2281 | import theano.tensor as T
def cca_loss(outdim_size, use_all_singular_values):
"""
The main loss function (inner_cca_objective) is wrapped in this function due to
the constraints imposed by Keras on objective functions
"""
def inner_cca_objective(y_true, y_pred):
"""
It is the loss function of CCA as introduced in the original paper. There can be other formulations.
It is implemented by Theano tensor operations, and does not work on Tensorflow backend
y_true is just ignored
"""
r1 = 1e-4
r2 = 1e-4
eps = 1e-12
o1 = o2 = y_pred.shape[1]//2
# unpack (separate) the output of networks for view 1 and view 2
H1 = y_pred[:, 0:o1].T
H2 = y_pred[:, o1:o1+o2].T
m = H1.shape[1]
H1bar = H1 - (1.0 / m) * T.dot(H1, T.ones([m, m]))
H2bar = H2 - (1.0 / m) * T.dot(H2, T.ones([m, m]))
SigmaHat12 = (1.0 / (m - 1)) * T.dot(H1bar, H2bar.T)
SigmaHat11 = (1.0 / (m - 1)) * T.dot(H1bar, H1bar.T) + r1 * T.eye(o1)
SigmaHat22 = (1.0 / (m - 1)) * T.dot(H2bar, H2bar.T) + r2 * T.eye(o2)
# Calculating the root inverse of covariance matrices by using eigen decomposition
[D1, V1] = T.nlinalg.eigh(SigmaHat11)
[D2, V2] = T.nlinalg.eigh(SigmaHat22)
# Added to increase stability
posInd1 = T.gt(D1, eps).nonzero()[0]
D1 = D1[posInd1]
V1 = V1[:, posInd1]
posInd2 = T.gt(D2, eps).nonzero()[0]
D2 = D2[posInd2]
V2 = V2[:, posInd2]
SigmaHat11RootInv = T.dot(T.dot(V1, T.nlinalg.diag(D1 ** -0.5)), V1.T)
SigmaHat22RootInv = T.dot(T.dot(V2, T.nlinalg.diag(D2 ** -0.5)), V2.T)
Tval = T.dot(T.dot(SigmaHat11RootInv, SigmaHat12), SigmaHat22RootInv)
if use_all_singular_values:
# all singular values are used to calculate the correlation
corr = T.sqrt(T.nlinalg.trace(T.dot(Tval.T, Tval)))
else:
# just the top outdim_size singular values are used
[U, V] = T.nlinalg.eigh(T.dot(Tval.T, Tval))
U = U[T.gt(U, eps).nonzero()[0]]
U = U.sort()
corr = T.sum(T.sqrt(U[0:outdim_size]))
return -corr
return inner_cca_objective
| mit | -4,617,263,627,075,889,000 | -2,169,575,786,629,578,200 | 34.640625 | 108 | 0.562034 | false |
nagyistoce/nips14-ssl | anglepy/paramgraphics.py | 5 | 6074 | import numpy as np
import os
import PIL.Image
import pylab
def save_images(images, directory, filename):
if not os.path.exists(directory):
os.makedirs(directory)
w = sum(i.size[0] for i in images)
mh = max(i.size[1] for i in images)
result = PIL.Image.new("RGBA", (w, mh))
x = 0
for i in images:
result.paste(i, (x, 0))
x += i.size[0]
result.save(directory+'/'+filename)
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale=True,
output_pixel_vals=True,
colorImg=False):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0,1] or not
:returns: array suitable for viewing as an image.
(See:`PIL.Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
"""
X = X * 1.0 # converts ints to floats
if colorImg:
channelSize = X.shape[1]/3
X = (X[:,0:channelSize], X[:,channelSize:2*channelSize], X[:,2*channelSize:3*channelSize], None)
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0] + tile_spacing[0]) * tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1] + tile_spacing[1]) * tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
out_array[:, :, i] = np.zeros(out_shape,
dtype='uint8' if output_pixel_vals else out_array.dtype
) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
xi = X[i]
if scale:
xi = (X[i] - X[i].min()) / (X[i].max() - X[i].min())
out_array[:, :, i] = tile_raster_images(xi, img_shape, tile_shape, tile_spacing, False, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
out_array = np.zeros(out_shape, dtype='uint8' if output_pixel_vals else X.dtype)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
if scale:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
tmp = X[tile_row * tile_shape[1] + tile_col].reshape(img_shape)
this_img = scale_to_unit_interval(tmp)
else:
this_img = X[tile_row * tile_shape[1] + tile_col].reshape(img_shape)
# add the slice to the corresponding position in the
# output array
out_array[
tile_row * (H+Hs): tile_row * (H + Hs) + H,
tile_col * (W+Ws): tile_col * (W + Ws) + W
] \
= this_img * (255 if output_pixel_vals else 1)
return out_array
# Matrix to image
def mat_to_img(w, dim_input, scale=False, colorImg=False, tile_spacing=(1,1), tile_shape=0):
if tile_shape == 0:
rowscols = int(w.shape[1]**0.5)
tile_shape = (rowscols,rowscols)
imgs = tile_raster_images(X=w.T, img_shape=dim_input, tile_shape=tile_shape, tile_spacing=tile_spacing, scale=scale, colorImg=colorImg)
return PIL.Image.fromarray(imgs)
# Show filters
def imgshow(plt, w, dim_input, scale=False, colorImg=False, convertImgs=False, tile_spacing=(1,1)):
if convertImgs:
channelSize = w.shape[0]/3
w = tuple([w[channelSize*i:channelSize*(i+1)] for i in range(3)])
plt.axis('Off')
pil_image = mat_to_img(w, dim_input, scale, colorImg, tile_spacing)
plt.imshow(pil_image, cmap=pylab.gray(), origin='upper')
return pil_image
| mit | -683,286,421,113,473,200 | 2,388,237,893,426,395,600 | 36.9625 | 139 | 0.557129 | false |
PLyczkowski/Sticky-Keymap | 2.74/scripts/addons/EWOCprojects_tools/EWOCprojects_tools/__init__.py | 2 | 6870 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Contributed to by
# meta-androcto #
bl_info = {
"name": "EWOCprojects tools",
"author": "Gert De Roost - paleajed",
"version": (0, 4, 1),
"blender": (2, 65, 0),
"location": "View3D > Toolbar and View3D > Specials (W-key)",
"description": "Edit mode tools - contrib version",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Mesh"}
if "bpy" in locals():
import imp
imp.reload(mesh_edgetune)
imp.reload(mesh_quadder)
imp.reload(mesh_paredge)
imp.reload(mesh_edgegrow)
imp.reload(mesh_fanconnect)
imp.reload(object_fastorigin)
imp.reload(mesh_laprelax)
imp.reload(mesh_polyredux)
imp.reload(mesh_filletplus)
imp.reload(mesh_innerweld)
imp.reload(mesh_straightenplus)
imp.reload(mesh_floodsel)
imp.reload(mesh_deathguppie)
imp.reload(mesh_selproject)
imp.reload(object_creaprim)
imp.reload(object_decouple)
imp.reload(object_keeptrans)
else:
from . import mesh_edgetune
from . import mesh_quadder
from . import mesh_paredge
from . import mesh_edgegrow
from . import mesh_fanconnect
from . import object_fastorigin
from . import mesh_laprelax
from . import mesh_polyredux
from . import mesh_filletplus
from . import mesh_innerweld
from . import mesh_straightenplus
from . import mesh_floodsel
from . import mesh_deathguppie
from . import mesh_selproject
from . import object_creaprim
from . import object_decouple
from . import object_keeptrans
import bpy
from bpy.app.handlers import persistent
class VIEW3D_MT_edit_mesh_paleajed(bpy.types.Menu):
# Define the "Extras" menu
bl_idname = "VIEW3D_MT_edit_mesh_paleajed"
bl_label = "EWOCprojects tools"
def draw(self, context):
layout = self.layout
layout.operator_context = "INVOKE_REGION_WIN"
layout.operator("mesh.edgetune",
text="EdgeTune")
layout.operator("mesh.quadder",
text="Quadder")
layout.operator("mesh.paredge",
text="ParEdge")
layout.operator("mesh.edgegrow",
text="EdgeGrow")
layout.operator("mesh.fanconnect",
text="FanConnect")
layout.operator("object.fastorigin",
text="FastOrigin")
layout.operator("mesh.laprelax",
text="LapRelax")
layout.operator("mesh.polyredux",
text="PolyRedux")
layout.operator("mesh.filletplus",
text="FilletPlus")
layout.operator("mesh.innerweld",
text="InnerWeld")
layout.operator("mesh.straightenplus",
text="StraightenPlus")
layout.operator("mesh.floodsel",
text="FloodSel")
layout.operator("mesh.deathguppie",
text="DeathGuppie")
layout.operator("mesh.selproject",
text="SelProject")
class PaleajedPanel(bpy.types.Panel):
bl_label = "EWOCprojects tools"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = 'Tools'
def draw(self, context):
scn = bpy.context.scene
layout = self.layout
layout.operator("mesh.edgetune")
layout.operator("mesh.quadder")
layout.operator("mesh.paredge")
if mesh_paredge.started:
layout.prop(mesh_paredge.mainop, "Distance")
layout.prop(mesh_paredge.mainop, "Both")
if mesh_paredge.mainop.Both:
layout.prop(mesh_paredge.mainop, "Cap")
layout.operator("mesh.edgegrow")
layout.operator("mesh.fanconnect")
layout.operator("object.fastorigin")
layout.operator("mesh.laprelax")
layout.operator("mesh.polyredux")
layout.operator("mesh.filletplus")
layout.operator("mesh.innerweld")
if not(mesh_straightenplus.started):
layout.operator("mesh.straightenplus")
else:
layout.operator("mesh.straightenplus")
msop = mesh_straightenplus.mainop
layout.prop(msop, "Percentage")
if mesh_straightenplus.started and msop.Percentage != msop.oldperc:
msop.do_straighten()
msop.oldperc = msop.Percentage
layout.prop(msop, "CancelAxis")
layout.operator("mesh.floodsel", text="Flood Sel")
if mesh_floodsel.started:
layout.prop(mesh_floodsel.mainop, "SelectMode")
layout.prop(mesh_floodsel.mainop, "Multiple")
layout.prop(mesh_floodsel.mainop, "Preselection")
layout.prop(mesh_floodsel.mainop, "Diagonal")
layout.operator("mesh.deathguppie")
layout.prop(scn, "Smooth")
layout.prop(scn, "Inner")
if not(mesh_selproject.started):
self.layout.operator("mesh.selproject", text="SelProject")
if context.mode == 'EDIT_MESH':
self.layout.prop(scn, "UseSel")
if not(scn.UseSel):
self.layout.prop(scn, "FromObject")
else:
scn.FromObject = bpy.context.active_object.name
context.region.tag_redraw()
else:
self.layout.prop(scn, "FromObject")
self.layout.prop(scn, "ToObject")
else:
self.layout.label(text="ENTER to confirm")
self.layout.operator("object.creaprim")
self.layout.prop(scn, "Name")
self.layout.prop(scn, "Apply")
if not(object_decouple.unparented):
layout.operator("object.decouple",
text="DeCouple")
else:
layout.operator("object.recouple",
text="ReCouple")
layout.operator("object.keeptrans")
# Register all operators and panels
# Define "Extras" menu
def menu_func(self, context):
self.layout.menu("VIEW3D_MT_edit_mesh_paleajed", icon='PLUGIN')
def register():
bpy.app.handlers.scene_update_post.append(sceneupdate_handler)
bpy.utils.register_module(__name__)
# Add "Extras" menu to the "Add Mesh" menu
bpy.types.VIEW3D_MT_edit_mesh_specials.prepend(menu_func)
def unregister():
bpy.app.handlers.scene_update_post.remove(sceneupdate_handler)
bpy.utils.unregister_module(__name__)
# Remove "Extras" menu from the "Add Mesh" menu.
bpy.types.VIEW3D_MT_edit_mesh_specials.remove(menu_func)
if __name__ == "__main__":
register()
@persistent
def sceneupdate_handler(dummy):
scn = bpy.context.scene
if not(list(scn.objects) == mesh_selproject.oldobjs):
itemlist = []
objs = list(scn.objects)
for ob in objs:
if ob.type == 'MESH':
itemlist.append((ob.name, ob.name, "Set From:"))
bpy.types.Scene.FromObject = bpy.props.EnumProperty(
items = itemlist,
name = "From",
description = "Object to project")
bpy.types.Scene.ToObject = bpy.props.EnumProperty(
items = itemlist,
name = "To",
description = "Object to project onto")
mesh_selproject.oldobjs = list(scn.objects)
| gpl-2.0 | -7,978,270,275,408,563,000 | -5,456,785,936,967,054,000 | 27.155738 | 74 | 0.714265 | false |
bdh1011/cupeye | venv/lib/python2.7/site-packages/werkzeug/utils.py | 148 | 23063 | # -*- coding: utf-8 -*-
"""
werkzeug.utils
~~~~~~~~~~~~~~
This module implements various utilities for WSGI applications. Most of
them are used by the request and response wrappers but especially for
middleware development it makes sense to use them without the wrappers.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import pkgutil
try:
from html.entities import name2codepoint
except ImportError:
from htmlentitydefs import name2codepoint
from werkzeug._compat import unichr, text_type, string_types, iteritems, \
reraise, PY2
from werkzeug._internal import _DictAccessorProperty, \
_parse_signature, _missing
_format_re = re.compile(r'\$(?:(%s)|\{(%s)\})' % (('[a-zA-Z_][a-zA-Z0-9_]*',) * 2))
_entity_re = re.compile(r'&([^;]+);')
_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]')
_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4', 'LPT1',
'LPT2', 'LPT3', 'PRN', 'NUL')
class cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
"""
# implementation detail: this property is implemented as non-data
# descriptor. non-data descriptors are only invoked if there is
# no entry with the same name in the instance's __dict__.
# this allows us to completely get rid of the access function call
# overhead. If one choses to invoke __get__ by hand the property
# will still work as expected because the lookup logic is replicated
# in __get__ for manual invocation.
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class environ_property(_DictAccessorProperty):
"""Maps request attributes to environment variables. This works not only
for the Werzeug request object, but also any other class with an
environ attribute:
>>> class Test(object):
... environ = {'key': 'value'}
... test = environ_property('key')
>>> var = Test()
>>> var.test
'value'
If you pass it a second value it's used as default if the key does not
exist, the third one can be a converter that takes a value and converts
it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value
is used. If no default value is provided `None` is used.
Per default the property is read only. You have to explicitly enable it
by passing ``read_only=False`` to the constructor.
"""
read_only = True
def lookup(self, obj):
return obj.environ
class header_property(_DictAccessorProperty):
"""Like `environ_property` but for headers."""
def lookup(self, obj):
return obj.headers
class HTMLBuilder(object):
"""Helper object for HTML generation.
Per default there are two instances of that class. The `html` one, and
the `xhtml` one for those two dialects. The class uses keyword parameters
and positional parameters to generate small snippets of HTML.
Keyword parameters are converted to XML/SGML attributes, positional
arguments are used as children. Because Python accepts positional
arguments before keyword arguments it's a good idea to use a list with the
star-syntax for some children:
>>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ',
... html.a('bar', href='bar.html')])
u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
This class works around some browser limitations and can not be used for
arbitrary SGML/XML generation. For that purpose lxml and similar
libraries exist.
Calling the builder escapes the string passed:
>>> html.p(html("<foo>"))
u'<p><foo></p>'
"""
_entity_re = re.compile(r'&([^;]+);')
_entities = name2codepoint.copy()
_entities['apos'] = 39
_empty_elements = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'keygen', 'isindex', 'link', 'meta', 'param',
'source', 'wbr'
])
_boolean_attributes = set([
'selected', 'checked', 'compact', 'declare', 'defer', 'disabled',
'ismap', 'multiple', 'nohref', 'noresize', 'noshade', 'nowrap'
])
_plaintext_elements = set(['textarea'])
_c_like_cdata = set(['script', 'style'])
def __init__(self, dialect):
self._dialect = dialect
def __call__(self, s):
return escape(s)
def __getattr__(self, tag):
if tag[:2] == '__':
raise AttributeError(tag)
def proxy(*children, **arguments):
buffer = '<' + tag
for key, value in iteritems(arguments):
if value is None:
continue
if key[-1] == '_':
key = key[:-1]
if key in self._boolean_attributes:
if not value:
continue
if self._dialect == 'xhtml':
value = '="' + key + '"'
else:
value = ''
else:
value = '="' + escape(value) + '"'
buffer += ' ' + key + value
if not children and tag in self._empty_elements:
if self._dialect == 'xhtml':
buffer += ' />'
else:
buffer += '>'
return buffer
buffer += '>'
children_as_string = ''.join([text_type(x) for x in children
if x is not None])
if children_as_string:
if tag in self._plaintext_elements:
children_as_string = escape(children_as_string)
elif tag in self._c_like_cdata and self._dialect == 'xhtml':
children_as_string = '/*<![CDATA[*/' + \
children_as_string + '/*]]>*/'
buffer += children_as_string + '</' + tag + '>'
return buffer
return proxy
def __repr__(self):
return '<%s for %r>' % (
self.__class__.__name__,
self._dialect
)
html = HTMLBuilder('html')
xhtml = HTMLBuilder('xhtml')
def get_content_type(mimetype, charset):
"""Returns the full content type string with charset for a mimetype.
If the mimetype represents text the charset will be appended as charset
parameter, otherwise the mimetype is returned unchanged.
:param mimetype: the mimetype to be used as content type.
:param charset: the charset to be appended in case it was a text mimetype.
:return: the content type.
"""
if mimetype.startswith('text/') or \
mimetype == 'application/xml' or \
(mimetype.startswith('application/') and
mimetype.endswith('+xml')):
mimetype += '; charset=' + charset
return mimetype
def format_string(string, context):
"""String-template format a string:
>>> format_string('$foo and ${foo}s', dict(foo=42))
'42 and 42s'
This does not do any attribute lookup etc. For more advanced string
formattings have a look at the `werkzeug.template` module.
:param string: the format string.
:param context: a dict with the variables to insert.
"""
def lookup_arg(match):
x = context[match.group(1) or match.group(2)]
if not isinstance(x, string_types):
x = type(string)(x)
return x
return _format_re.sub(lookup_arg, string)
def secure_filename(filename):
r"""Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows systems the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you generate random
filename if the function returned an empty one.
.. versionadded:: 0.5
:param filename: the filename to secure
"""
if isinstance(filename, text_type):
from unicodedata import normalize
filename = normalize('NFKD', filename).encode('ascii', 'ignore')
if not PY2:
filename = filename.decode('ascii')
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, ' ')
filename = str(_filename_ascii_strip_re.sub('', '_'.join(
filename.split()))).strip('._')
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if os.name == 'nt' and filename and \
filename.split('.')[0].upper() in _windows_device_files:
filename = '_' + filename
return filename
def escape(s, quote=None):
"""Replace special characters "&", "<", ">" and (") to HTML-safe sequences.
There is a special handling for `None` which escapes to an empty string.
.. versionchanged:: 0.9
`quote` is now implicitly on.
:param s: the string to escape.
:param quote: ignored.
"""
if s is None:
return ''
elif hasattr(s, '__html__'):
return text_type(s.__html__())
elif not isinstance(s, string_types):
s = text_type(s)
if quote is not None:
from warnings import warn
warn(DeprecationWarning('quote parameter is implicit now'), stacklevel=2)
s = s.replace('&', '&').replace('<', '<') \
.replace('>', '>').replace('"', """)
return s
def unescape(s):
"""The reverse function of `escape`. This unescapes all the HTML
entities, not only the XML entities inserted by `escape`.
:param s: the string to unescape.
"""
def handle_match(m):
name = m.group(1)
if name in HTMLBuilder._entities:
return unichr(HTMLBuilder._entities[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return u''
return _entity_re.sub(handle_match, s)
def redirect(location, code=302, Response=None):
"""Returns a response object (a WSGI application) that, if called,
redirects the client to the target location. Supported codes are 301,
302, 303, 305, and 307. 300 is not supported because it's not a real
redirect and 304 because it's the answer for a request with a request
with defined If-Modified-Since headers.
.. versionadded:: 0.6
The location can now be a unicode string that is encoded using
the :func:`iri_to_uri` function.
.. versionadded:: 0.10
The class used for the Response object can now be passed in.
:param location: the location the response should redirect to.
:param code: the redirect status code. defaults to 302.
:param class Response: a Response class to use when instantiating a
response. The default is :class:`werkzeug.wrappers.Response` if
unspecified.
"""
if Response is None:
from werkzeug.wrappers import Response
display_location = escape(location)
if isinstance(location, text_type):
# Safe conversion is necessary here as we might redirect
# to a broken URI scheme (for instance itms-services).
from werkzeug.urls import iri_to_uri
location = iri_to_uri(location, safe_conversion=True)
response = Response(
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
'<title>Redirecting...</title>\n'
'<h1>Redirecting...</h1>\n'
'<p>You should be redirected automatically to target URL: '
'<a href="%s">%s</a>. If not click the link.' %
(escape(location), display_location), code, mimetype='text/html')
response.headers['Location'] = location
return response
def append_slash_redirect(environ, code=301):
"""Redirects to the same URL but with a slash appended. The behavior
of this function is undefined if the path ends with a slash already.
:param environ: the WSGI environment for the request that triggers
the redirect.
:param code: the status code for the redirect.
"""
new_path = environ['PATH_INFO'].strip('/') + '/'
query_string = environ.get('QUERY_STRING')
if query_string:
new_path += '?' + query_string
return redirect(new_path, code)
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
# force the import name to automatically convert to strings
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
import_name = str(import_name).replace(':', '.')
try:
try:
__import__(import_name)
except ImportError:
if '.' not in import_name:
raise
else:
return sys.modules[import_name]
module_name, obj_name = import_name.rsplit('.', 1)
try:
module = __import__(module_name, None, None, [obj_name])
except ImportError:
# support importing modules not yet set up by the parent module
# (or package for that matter)
module = import_string(module_name)
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e)
except ImportError as e:
if not silent:
reraise(
ImportStringError,
ImportStringError(import_name, e),
sys.exc_info()[2])
def find_modules(import_path, include_packages=False, recursive=False):
"""Finds all the modules below a package. This can be useful to
automatically import all views / controllers so that their metaclasses /
function decorators have a chance to register themselves on the
application.
Packages are not returned unless `include_packages` is `True`. This can
also recursively list modules but in that case it will import all the
packages to get the correct load path of that module.
:param import_name: the dotted name for the package to find child modules.
:param include_packages: set to `True` if packages should be returned, too.
:param recursive: set to `True` if recursion should happen.
:return: generator
"""
module = import_string(import_path)
path = getattr(module, '__path__', None)
if path is None:
raise ValueError('%r is not a package' % import_path)
basename = module.__name__ + '.'
for importer, modname, ispkg in pkgutil.iter_modules(path):
modname = basename + modname
if ispkg:
if include_packages:
yield modname
if recursive:
for item in find_modules(modname, include_packages, True):
yield item
else:
yield modname
def validate_arguments(func, args, kwargs, drop_extra=True):
"""Checks if the function accepts the arguments and keyword arguments.
Returns a new ``(args, kwargs)`` tuple that can safely be passed to
the function without causing a `TypeError` because the function signature
is incompatible. If `drop_extra` is set to `True` (which is the default)
any extra positional or keyword arguments are dropped automatically.
The exception raised provides three attributes:
`missing`
A set of argument names that the function expected but where
missing.
`extra`
A dict of keyword arguments that the function can not handle but
where provided.
`extra_positional`
A list of values that where given by positional argument but the
function cannot accept.
This can be useful for decorators that forward user submitted data to
a view function::
from werkzeug.utils import ArgumentValidationError, validate_arguments
def sanitize(f):
def proxy(request):
data = request.values.to_dict()
try:
args, kwargs = validate_arguments(f, (request,), data)
except ArgumentValidationError:
raise BadRequest('The browser failed to transmit all '
'the data expected.')
return f(*args, **kwargs)
return proxy
:param func: the function the validation is performed against.
:param args: a tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:param drop_extra: set to `False` if you don't want extra arguments
to be silently dropped.
:return: tuple in the form ``(args, kwargs)``.
"""
parser = _parse_signature(func)
args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5]
if missing:
raise ArgumentValidationError(tuple(missing))
elif (extra or extra_positional) and not drop_extra:
raise ArgumentValidationError(None, extra, extra_positional)
return tuple(args), kwargs
def bind_arguments(func, args, kwargs):
"""Bind the arguments provided into a dict. When passed a function,
a tuple of arguments and a dict of keyword arguments `bind_arguments`
returns a dict of names as the function would see it. This can be useful
to implement a cache decorator that uses the function arguments to build
the cache key based on the values of the arguments.
:param func: the function the arguments should be bound for.
:param args: tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:return: a :class:`dict` of bound keyword arguments.
"""
args, kwargs, missing, extra, extra_positional, \
arg_spec, vararg_var, kwarg_var = _parse_signature(func)(args, kwargs)
values = {}
for (name, has_default, default), value in zip(arg_spec, args):
values[name] = value
if vararg_var is not None:
values[vararg_var] = tuple(extra_positional)
elif extra_positional:
raise TypeError('too many positional arguments')
if kwarg_var is not None:
multikw = set(extra) & set([x[0] for x in arg_spec])
if multikw:
raise TypeError('got multiple values for keyword argument ' +
repr(next(iter(multikw))))
values[kwarg_var] = extra
elif extra:
raise TypeError('got unexpected keyword argument ' +
repr(next(iter(extra))))
return values
class ArgumentValidationError(ValueError):
"""Raised if :func:`validate_arguments` fails to validate"""
def __init__(self, missing=None, extra=None, extra_positional=None):
self.missing = set(missing or ())
self.extra = extra or {}
self.extra_positional = extra_positional or []
ValueError.__init__(self, 'function arguments invalid. ('
'%d missing, %d additional)' % (
len(self.missing),
len(self.extra) + len(self.extra_positional)
))
class ImportStringError(ImportError):
"""Provides information about a failed :func:`import_string` attempt."""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
self.import_name = import_name
self.exception = exception
msg = (
'import_string() failed for %r. Possible reasons are:\n\n'
'- missing __init__.py in a package;\n'
'- package or module path not included in sys.path;\n'
'- duplicated package or module name taking precedence in '
'sys.path;\n'
'- missing module, class, function or variable;\n\n'
'Debugged import:\n\n%s\n\n'
'Original exception:\n\n%s: %s')
name = ''
tracked = []
for part in import_name.replace(':', '.').split('.'):
name += (name and '.') + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, getattr(imported, '__file__', None)))
else:
track = ['- %r found in %r.' % (n, i) for n, i in tracked]
track.append('- %r not found.' % name)
msg = msg % (import_name, '\n'.join(track),
exception.__class__.__name__, str(exception))
break
ImportError.__init__(self, msg)
def __repr__(self):
return '<%s(%r, %r)>' % (self.__class__.__name__, self.import_name,
self.exception)
# circular dependencies
from werkzeug.http import quote_header_value, unquote_header_value, \
cookie_date
# DEPRECATED
# these objects were previously in this module as well. we import
# them here for backwards compatibility with old pickles.
from werkzeug.datastructures import MultiDict, CombinedMultiDict, \
Headers, EnvironHeaders
from werkzeug.http import parse_cookie, dump_cookie
| bsd-3-clause | -6,275,994,589,413,769,000 | -1,477,958,765,962,136,600 | 35.959936 | 83 | 0.608334 | false |
tersmitten/ansible | lib/ansible/modules/cloud/google/gcp_compute_interconnect_attachment.py | 4 | 18037 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_interconnect_attachment
description:
- Represents an InterconnectAttachment (VLAN attachment) resource. For more information,
see Creating VLAN Attachments.
short_description: Creates a GCP InterconnectAttachment
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
interconnect:
description:
- URL of the underlying Interconnect object that this attachment's traffic will
traverse through. Required if type is DEDICATED, must not be set if type is
PARTNER.
required: false
description:
description:
- An optional description of this resource.
required: false
edge_availability_domain:
description:
- Desired availability domain for the attachment. Only available for type PARTNER,
at creation time. For improved reliability, customers should configure a pair
of attachments with one per availability domain. The selected availability domain
will be provided to the Partner via the pairing key so that the provisioned
circuit will lie in the specified domain. If not specified, the value will default
to AVAILABILITY_DOMAIN_ANY.
required: false
type:
description:
- The type of InterconnectAttachment you wish to create. Defaults to DEDICATED.
required: false
choices:
- DEDICATED
- PARTNER
- PARTNER_PROVIDER
router:
description:
- URL of the cloud router to be used for dynamic routing. This router must be
in the same region as this InterconnectAttachment. The InterconnectAttachment
will automatically connect the Interconnect to the network & region within which
the Cloud Router is configured.
- 'This field represents a link to a Router resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''selfLink'' and value
of your resource''s selfLink Alternatively, you can add `register: name-of-resource`
to a gcp_compute_router task and then set this router field to "{{ name-of-resource
}}"'
required: true
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
candidate_subnets:
description:
- Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress
and customerRouterIpAddress for this attachment.
- All prefixes must be within link-local address space (169.254.0.0/16) and must
be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29
from the supplied candidate prefix(es). The request will fail if all possible
/29s are in use on Google's edge. If not supplied, Google will randomly select
an unused /29 from all of link-local space.
required: false
vlan_tag8021q:
description:
- The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094.
required: false
region:
description:
- Region where the regional interconnect attachment resides.
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a interconnect attachment
gcp_compute_interconnect_attachment:
name: test_object
region: us-central1
project: test_project
auth_kind: serviceaccount
interconnect: https://googleapis.com/compute/v1/projects/test_project/global/interconnects/...
router: https://googleapis.com/compute/v1/projects/test_project/regions/us-central1/routers/...
service_account_file: "/tmp/auth.pem"
state: present
register: disk
'''
RETURN = '''
cloudRouterIpAddress:
description:
- IPv4 address + prefix length to be configured on Cloud Router Interface for this
interconnect attachment.
returned: success
type: str
customerRouterIpAddress:
description:
- IPv4 address + prefix length to be configured on the customer router subinterface
for this interconnect attachment.
returned: success
type: str
interconnect:
description:
- URL of the underlying Interconnect object that this attachment's traffic will
traverse through. Required if type is DEDICATED, must not be set if type is PARTNER.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
edgeAvailabilityDomain:
description:
- Desired availability domain for the attachment. Only available for type PARTNER,
at creation time. For improved reliability, customers should configure a pair
of attachments with one per availability domain. The selected availability domain
will be provided to the Partner via the pairing key so that the provisioned circuit
will lie in the specified domain. If not specified, the value will default to
AVAILABILITY_DOMAIN_ANY.
returned: success
type: str
pairingKey:
description:
- '[Output only for type PARTNER. Not present for DEDICATED]. The opaque identifier
of an PARTNER attachment used to initiate provisioning with a selected partner.
Of the form "XXXXX/region/domain" .'
returned: success
type: str
partnerAsn:
description:
- "[Output only for type PARTNER. Not present for DEDICATED]. Optional BGP ASN for
the router that should be supplied by a layer 3 Partner if they configured BGP
on behalf of the customer."
returned: success
type: str
privateInterconnectInfo:
description:
- Information specific to an InterconnectAttachment. This property is populated
if the interconnect that this is attached to is of type DEDICATED.
returned: success
type: complex
contains:
tag8021q:
description:
- 802.1q encapsulation tag to be used for traffic between Google and the customer,
going to and from this network and region.
returned: success
type: int
type:
description:
- The type of InterconnectAttachment you wish to create. Defaults to DEDICATED.
returned: success
type: str
state:
description:
- "[Output Only] The current state of this attachment's functionality."
returned: success
type: str
googleReferenceId:
description:
- Google reference ID, to be used when raising support tickets with Google or otherwise
to debug backend connectivity issues.
returned: success
type: str
router:
description:
- URL of the cloud router to be used for dynamic routing. This router must be in
the same region as this InterconnectAttachment. The InterconnectAttachment will
automatically connect the Interconnect to the network & region within which the
Cloud Router is configured.
returned: success
type: dict
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
id:
description:
- The unique identifier for the resource. This identifier is defined by the server.
returned: success
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
candidateSubnets:
description:
- Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress
and customerRouterIpAddress for this attachment.
- All prefixes must be within link-local address space (169.254.0.0/16) and must
be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29
from the supplied candidate prefix(es). The request will fail if all possible
/29s are in use on Google's edge. If not supplied, Google will randomly select
an unused /29 from all of link-local space.
returned: success
type: list
vlanTag8021q:
description:
- The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094.
returned: success
type: int
region:
description:
- Region where the regional interconnect attachment resides.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import re
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
interconnect=dict(type='str'),
description=dict(type='str'),
edge_availability_domain=dict(type='str'),
type=dict(type='str', choices=['DEDICATED', 'PARTNER', 'PARTNER_PROVIDER']),
router=dict(required=True, type='dict'),
name=dict(required=True, type='str'),
candidate_subnets=dict(type='list', elements='str'),
vlan_tag8021q=dict(type='int'),
region=dict(required=True, type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#interconnectAttachment'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
delete(module, self_link(module), kind)
create(module, collection(module), kind)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#interconnectAttachment',
u'interconnect': module.params.get('interconnect'),
u'description': module.params.get('description'),
u'edgeAvailabilityDomain': module.params.get('edge_availability_domain'),
u'type': module.params.get('type'),
u'router': replace_resource_dict(module.params.get(u'router', {}), 'selfLink'),
u'name': module.params.get('name'),
u'candidateSubnets': module.params.get('candidate_subnets'),
u'vlanTag8021q': module.params.get('vlan_tag8021q'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/interconnectAttachments/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/interconnectAttachments".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'cloudRouterIpAddress': response.get(u'cloudRouterIpAddress'),
u'customerRouterIpAddress': response.get(u'customerRouterIpAddress'),
u'interconnect': response.get(u'interconnect'),
u'description': response.get(u'description'),
u'edgeAvailabilityDomain': response.get(u'edgeAvailabilityDomain'),
u'pairingKey': response.get(u'pairingKey'),
u'partnerAsn': response.get(u'partnerAsn'),
u'privateInterconnectInfo': InterconnectAttachmentPrivateinterconnectinfo(response.get(u'privateInterconnectInfo', {}), module).from_response(),
u'type': response.get(u'type'),
u'state': response.get(u'state'),
u'googleReferenceId': response.get(u'googleReferenceId'),
u'router': response.get(u'router'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'id': response.get(u'id'),
u'name': response.get(u'name'),
u'candidateSubnets': response.get(u'candidateSubnets'),
u'vlanTag8021q': response.get(u'vlanTag8021q'),
}
def region_selflink(name, params):
if name is None:
return
url = r"https://www.googleapis.com/compute/v1/projects/.*/regions/[a-z1-9\-]*"
if not re.match(url, name):
name = "https://www.googleapis.com/compute/v1/projects/{project}/regions/%s".format(**params) % name
return name
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#interconnectAttachment')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class InterconnectAttachmentPrivateinterconnectinfo(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({})
def from_response(self):
return remove_nones_from_dict({})
if __name__ == '__main__':
main()
| gpl-3.0 | -1,013,111,257,229,280,900 | 8,711,365,324,471,904,000 | 35.364919 | 152 | 0.663858 | false |
openstack/ironic | ironic/common/release_mappings.py | 1 | 12857 | # Copyright 2016 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironic.common.i18n import _
# NOTE(xek): This decides the version cap of RPC messages sent to conductor
# and objects during rolling upgrades, when [DEFAULT]/pin_release_version
# configuration is set.
#
# Remember to add a new entry for the new version that is shipping in a new
# release.
#
# We support a rolling upgrade between adjacent named releases, as well as
# between a release and master, so old, unsupported releases can be removed,
# together with the supporting code, which is typically found in an object's
# make_compatible methods and RPC client code.
# NOTE(xek): The format of this dict is:
# { '<release version>': {
# 'api': '<Bare Metal API version>',
# 'rpc': '<RPC API version>',
# 'objects': {
# '<object class name>': ['<object version>'],
# }
# },
# }
# The list should contain all objects which are persisted in the database and
# sent over RPC. Notifications/Payloads are not being included here since we
# don't need to pin them during rolling upgrades.
#
# For each object, list the versions that the object can be in for a particular
# release. That is, any new versions that were added in that release. If there
# were no new versions, it should have the same (latest) version as the
# previous release.
# NOTE(rloo): We need a list, not just the latest version, for the DB queries
# that filter for objects that are not in particular versions; for more info,
# see comments after L1128 of
# https://review.opendev.org/#/c/408556/52/ironic/db/sqlalchemy/api.py.
#
# There should always be a 'master' entry that reflects the objects in the
# master branch.
#
# Just before doing a release, copy the 'master' entry, and rename the first
# 'master' entry to the (semver) version being released.
#
# Just after doing a named release, delete any entries associated with the
# oldest named release.
RELEASE_MAPPING = {
'9.2': {
'rpc': '1.41',
'api': '1.35',
'objects': {
'Node': ['1.21'],
'Conductor': ['1.2'],
'Chassis': ['1.3'],
'Port': ['1.7'],
'Portgroup': ['1.3'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'10.0': {
'api': '1.36',
'rpc': '1.42',
'objects': {
'Node': ['1.22'],
'Conductor': ['1.2'],
'Chassis': ['1.3'],
'Port': ['1.7'],
'Portgroup': ['1.3'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'10.1': {
'api': '1.38',
'rpc': '1.44',
'objects': {
'Node': ['1.23'],
'Conductor': ['1.2'],
'Chassis': ['1.3'],
'Port': ['1.7'],
'Portgroup': ['1.3'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'11.0': {
'api': '1.43',
'rpc': '1.44',
'objects': {
'Node': ['1.25', '1.24'],
'Conductor': ['1.2'],
'Chassis': ['1.3'],
'Port': ['1.8'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'11.1': {
'api': '1.46',
'rpc': '1.47',
'objects': {
'Node': ['1.27', '1.26'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Port': ['1.8'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'12.0': {
'api': '1.49',
'rpc': '1.47',
'objects': {
'Node': ['1.29', '1.28'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Port': ['1.8'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'12.1': {
'api': '1.56',
'rpc': '1.48',
'objects': {
'Allocation': ['1.0'],
'Node': ['1.32', '1.31', '1.30'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'DeployTemplate': ['1.0', '1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'12.2': {
'api': '1.58',
'rpc': '1.48',
'objects': {
'Allocation': ['1.0'],
'Node': ['1.32'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'DeployTemplate': ['1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'13.0': {
'api': '1.58',
'rpc': '1.48',
'objects': {
'Allocation': ['1.0'],
'Node': ['1.32'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'DeployTemplate': ['1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'14.0': {
'api': '1.61',
'rpc': '1.48',
'objects': {
'Allocation': ['1.1'],
'Node': ['1.33', '1.32'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'DeployTemplate': ['1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'15.0': {
'api': '1.65',
'rpc': '1.50',
'objects': {
'Allocation': ['1.1'],
'Node': ['1.34', '1.33', '1.32'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'DeployTemplate': ['1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'15.1': {
'api': '1.67',
'rpc': '1.50',
'objects': {
'Allocation': ['1.1'],
'Node': ['1.35', '1.34'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'DeployTemplate': ['1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'16.0': {
'api': '1.68',
'rpc': '1.51',
'objects': {
'Allocation': ['1.1'],
'Node': ['1.35'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Deployment': ['1.0'],
'DeployTemplate': ['1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'16.1': {
'api': '1.68',
'rpc': '1.51',
'objects': {
'Allocation': ['1.1'],
'Node': ['1.35'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Deployment': ['1.0'],
'DeployTemplate': ['1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'16.2': {
'api': '1.69',
'rpc': '1.52',
'objects': {
'Allocation': ['1.1'],
'Node': ['1.35'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Deployment': ['1.0'],
'DeployTemplate': ['1.1'],
'Port': ['1.10'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'17.0': {
'api': '1.72',
'rpc': '1.54',
'objects': {
'Allocation': ['1.1'],
'Node': ['1.35'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Deployment': ['1.0'],
'DeployTemplate': ['1.1'],
'Port': ['1.10'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'18.0': {
'api': '1.74',
'rpc': '1.54',
'objects': {
'Allocation': ['1.1'],
'BIOSSetting': ['1.1'],
'Node': ['1.35'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Deployment': ['1.0'],
'DeployTemplate': ['1.1'],
'Port': ['1.10'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'master': {
'api': '1.74',
'rpc': '1.54',
'objects': {
'Allocation': ['1.1'],
'BIOSSetting': ['1.1'],
'Node': ['1.35'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Deployment': ['1.0'],
'DeployTemplate': ['1.1'],
'Port': ['1.10'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
}
# NOTE(xek): Assign each named release to the appropriate semver.
#
# Just before we do a new named release (more specifically, create
# a stable/<release> branch), add a mapping for the new named
# release. This is needed; otherwise CI: a unit test (common.
# ReleaseMappingsTestCase.test_contains_current_release_entry())
# and grenade that tests old/new (new-release -> master) will fail.
#
# Just after we do a new named release, delete the oldest named
# release (that we are no longer supporting for a rolling upgrade).
#
# There should be at most two named mappings here.
# NOTE(mgoddard): remove victoria prior to the xena release.
RELEASE_MAPPING['victoria'] = RELEASE_MAPPING['16.0']
RELEASE_MAPPING['wallaby'] = RELEASE_MAPPING['17.0']
# List of available versions with named versions first; 'master' is excluded.
RELEASE_VERSIONS = sorted(set(RELEASE_MAPPING) - {'master'}, reverse=True)
# List of available (version, description) tuples.
RELEASE_VERSIONS_DESCS = [(v, _('"%s" release') % v) for v in RELEASE_VERSIONS]
def get_object_versions(releases=None, objects=None):
"""Gets the supported versions for all objects.
Supported versions are from the RELEASE_MAPPINGs.
:param releases: a list of release names; if empty/None, versions from all
releases are returned (the default).
:param objects: a list of names of objects of interest. If empty/None,
versions of all objects are returned (the default).
:returns: a dictionary where the key is the object name and the value is
a set of supported versions.
"""
if not releases:
releases = list(RELEASE_MAPPING)
versions = {}
for release in releases:
object_mapping = RELEASE_MAPPING[release]['objects']
for obj, version_list in object_mapping.items():
if not objects or obj in objects:
versions.setdefault(obj, set()).update(version_list)
return versions
| apache-2.0 | 3,794,942,388,484,504,000 | -1,148,718,736,854,764,400 | 30.435208 | 79 | 0.442327 | false |
p0cisk/Quantum-GIS | python/plugins/processing/algs/grass7/ext/i_gensigset.py | 7 | 1995 | # -*- coding: utf-8 -*-
"""
***************************************************************************
i_gensigset.py
--------------
Date : March 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from __future__ import absolute_import
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from .i import regroupRasters, file2Output, moveFile
from os import path
from ..Grass7Utils import Grass7Utils
def processCommand(alg):
# Transform output files in string parameter
signatureFile = alg.getOutputFromName('signaturefile')
origSigFile = signatureFile.value
shortSigFile = path.basename(origSigFile)
alg.setOutputValue('signaturefile', shortSigFile)
signatureFile = file2Output(alg, 'signaturefile')
# Regroup rasters
group, subgroup = regroupRasters(alg, 'input', 'group', 'subgroup')
# Re-add signature files
alg.addOutput(signatureFile)
# Find Grass directory
interSig = path.join(Grass7Utils.grassMapsetFolder(), 'PERMANENT', 'group', group, 'subgroup', subgroup, 'sigset', shortSigFile)
moveFile(alg, interSig, origSigFile)
alg.setOutputValue('signaturefile', origSigFile)
| gpl-2.0 | 1,649,274,113,198,355,500 | -1,007,767,305,565,914,800 | 37.25 | 132 | 0.538462 | false |
childresslab/MicrocavityExp1 | gui/manager/managergui.py | 1 | 25022 | # -*- coding: utf-8 -*-
""" This module contains a GUI through which the Manager core class can be controlled.
It can load and reload modules, show the configuration, and re-open closed windows.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
import core.logger
import logging
import numpy as np
import os
from collections import OrderedDict
from core.module import StatusVar
from .errordialog import ErrorDialog
from gui.guibase import GUIBase
from qtpy import QtCore, QtWidgets, uic
from qtpy.QtGui import QPalette
from qtpy.QtWidgets import QWidget
try:
from qtconsole.inprocess import QtInProcessKernelManager
except ImportError:
from IPython.qt.inprocess import QtInProcessKernelManager
try:
from git import Repo
except:
pass
try:
import pyqtgraph as pg
_has_pyqtgraph = True
except:
_has_pyqtgraph = False
# Rather than import the ui*.py file here, the ui*.ui file itself is
# loaded by uic.loadUI in the QtGui classes below.
class ManagerGui(GUIBase):
"""This class provides a GUI to the Qudi manager.
@signal sigStartAll: sent when all modules should be loaded
@signal str str sigStartThis: load a specific module
@signal str str sigReloadThis reload a specific module from Python code
@signal str str sigStopThis: stop all actions of a module and remove
references
It supports module loading, reloading, logging and other
administrative tasks.
"""
# status vars
consoleFontSize = StatusVar('console_font_size', 10)
# signals
sigStartAll = QtCore.Signal()
sigStartModule = QtCore.Signal(str, str)
sigReloadModule = QtCore.Signal(str, str)
sigCleanupStatus = QtCore.Signal(str, str)
sigStopModule = QtCore.Signal(str, str)
sigLoadConfig = QtCore.Signal(str, bool)
sigSaveConfig = QtCore.Signal(str)
sigRealQuit = QtCore.Signal()
def __init__(self, **kwargs):
"""Create an instance of the module.
@param object manager:
@param str name:
@param dict config:
"""
super().__init__(**kwargs)
self.modlist = list()
self.modules = set()
def on_activate(self):
""" Activation method called on change to active state.
This method creates the Manager main window.
"""
if _has_pyqtgraph:
# set background of pyqtgraph
testwidget = QWidget()
testwidget.ensurePolished()
bgcolor = testwidget.palette().color(QPalette.Normal,
testwidget.backgroundRole())
# set manually the background color in hex code according to our
# color scheme:
pg.setConfigOption('background', bgcolor)
# opengl usage
if 'useOpenGL' in self._manager.tree['global']:
pg.setConfigOption('useOpenGL',
self._manager.tree['global']['useOpenGL'])
self._mw = ManagerMainWindow()
self.restoreWindowPos(self._mw)
self.errorDialog = ErrorDialog(self)
self._about = AboutDialog()
version = self.getSoftwareVersion()
configFile = self._manager.configFile
self._about.label.setText(
'<a href=\"https://github.com/Ulm-IQO/qudi/commit/{0}\"'
' style=\"color: cyan;\"> {0} </a>, on branch {1}.'.format(
version[0], version[1]))
self.versionLabel = QtWidgets.QLabel()
self.versionLabel.setText(
'<a href=\"https://github.com/Ulm-IQO/qudi/commit/{0}\"'
' style=\"color: cyan;\"> {0} </a>,'
' on branch {1}, configured from {2}'.format(
version[0], version[1], configFile))
self.versionLabel.setOpenExternalLinks(True)
self._mw.statusBar().addWidget(self.versionLabel)
# Connect up the buttons.
self._mw.actionQuit.triggered.connect(self._manager.quit)
self._mw.actionLoad_configuration.triggered.connect(self.getLoadFile)
self._mw.actionReload_current_configuration.triggered.connect(self.reloadConfig)
self._mw.actionSave_configuration.triggered.connect(self.getSaveFile)
self._mw.action_Load_all_modules.triggered.connect(self._manager.startAllConfiguredModules)
self._mw.actionAbout_Qt.triggered.connect(QtWidgets.QApplication.aboutQt)
self._mw.actionAbout_Qudi.triggered.connect(self.showAboutQudi)
self._mw.actionReset_to_default_layout.triggered.connect(self.resetToDefaultLayout)
self._manager.sigShowManager.connect(self.show)
self._manager.sigConfigChanged.connect(self.updateConfigWidgets)
self._manager.sigModulesChanged.connect(self.updateConfigWidgets)
self._manager.sigShutdownAcknowledge.connect(self.promptForShutdown)
# Log widget
self._mw.logwidget.setManager(self._manager)
for loghandler in logging.getLogger().handlers:
if isinstance(loghandler, core.logger.QtLogHandler):
loghandler.sigLoggedMessage.connect(self.handleLogEntry)
# Module widgets
self.sigStartModule.connect(self._manager.startModule)
self.sigReloadModule.connect(self._manager.restartModuleRecursive)
self.sigCleanupStatus.connect(self._manager.removeStatusFile)
self.sigStopModule.connect(self._manager.deactivateModule)
self.sigLoadConfig.connect(self._manager.loadConfig)
self.sigSaveConfig.connect(self._manager.saveConfig)
self.sigRealQuit.connect(self._manager.realQuit)
# Module state display
self.checkTimer = QtCore.QTimer()
self.checkTimer.start(1000)
self.updateGUIModuleList()
# IPython console widget
self.startIPython()
self.updateIPythonModuleList()
self.startIPythonWidget()
# thread widget
self._mw.threadWidget.threadListView.setModel(self._manager.tm)
# remote widget
self._mw.remoteWidget.hostLabel.setText('URL:')
self._mw.remoteWidget.portLabel.setText(
'rpyc://{0}:{1}/'.format(self._manager.rm.host,
self._manager.rm.server.port))
self._mw.remoteWidget.remoteModuleListView.setModel(
self._manager.rm.remoteModules)
self._mw.remoteWidget.sharedModuleListView.setModel(
self._manager.rm.sharedModules)
self._mw.configDisplayDockWidget.hide()
self._mw.remoteDockWidget.hide()
self._mw.threadDockWidget.hide()
self._mw.show()
def on_deactivate(self):
"""Close window and remove connections.
"""
self.stopIPythonWidget()
self.stopIPython()
self.checkTimer.stop()
if len(self.modlist) > 0:
self.checkTimer.timeout.disconnect()
self.sigStartModule.disconnect()
self.sigReloadModule.disconnect()
self.sigStopModule.disconnect()
self.sigLoadConfig.disconnect()
self.sigSaveConfig.disconnect()
self._mw.actionQuit.triggered.disconnect()
self._mw.actionLoad_configuration.triggered.disconnect()
self._mw.actionSave_configuration.triggered.disconnect()
self._mw.action_Load_all_modules.triggered.disconnect()
self._mw.actionAbout_Qt.triggered.disconnect()
self._mw.actionAbout_Qudi.triggered.disconnect()
self.saveWindowPos(self._mw)
self._mw.close()
def show(self):
"""Show the window and bring it t the top.
"""
QtWidgets.QMainWindow.show(self._mw)
self._mw.activateWindow()
self._mw.raise_()
def showAboutQudi(self):
"""Show a dialog with details about Qudi.
"""
self._about.show()
@QtCore.Slot(bool, bool)
def promptForShutdown(self, locked, broken):
""" Display a dialog, asking the user to confirm shutdown. """
text = "Some modules are locked right now, really quit?"
result = QtWidgets.QMessageBox.question(
self._mw,
'Qudi: Really Quit?',
text,
QtWidgets.QMessageBox.Yes,
QtWidgets.QMessageBox.No
)
if result == QtWidgets.QMessageBox.Yes:
self.sigRealQuit.emit()
def resetToDefaultLayout(self):
""" Return the dockwidget layout and visibility to its default state """
self._mw.configDisplayDockWidget.setVisible(False)
self._mw.consoleDockWidget.setVisible(True)
self._mw.remoteDockWidget.setVisible(False)
self._mw.threadDockWidget.setVisible(False)
self._mw.logDockWidget.setVisible(True)
self._mw.actionConfigurationView.setChecked(False)
self._mw.actionConsoleView.setChecked(True)
self._mw.actionRemoteView.setChecked(False)
self._mw.actionThreadsView.setChecked(False)
self._mw.actionLogView.setChecked(True)
self._mw.configDisplayDockWidget.setFloating(False)
self._mw.consoleDockWidget.setFloating(False)
self._mw.remoteDockWidget.setFloating(False)
self._mw.threadDockWidget.setFloating(False)
self._mw.logDockWidget.setFloating(False)
self._mw.addDockWidget(QtCore.Qt.DockWidgetArea(8), self._mw.configDisplayDockWidget)
self._mw.addDockWidget(QtCore.Qt.DockWidgetArea(2), self._mw.consoleDockWidget)
self._mw.addDockWidget(QtCore.Qt.DockWidgetArea(8), self._mw.remoteDockWidget)
self._mw.addDockWidget(QtCore.Qt.DockWidgetArea(8), self._mw.threadDockWidget)
self._mw.addDockWidget(QtCore.Qt.DockWidgetArea(8), self._mw.logDockWidget)
def handleLogEntry(self, entry):
""" Forward log entry to log widget and show an error popup if it is
an error message.
@param dict entry: Log entry
"""
self._mw.logwidget.addEntry(entry)
if entry['level'] == 'error' or entry['level'] == 'critical':
self.errorDialog.show(entry)
def startIPython(self):
""" Create an IPython kernel manager and kernel.
Add modules to its namespace.
"""
# make sure we only log errors and above from ipython
logging.getLogger('ipykernel').setLevel(logging.WARNING)
self.log.debug('IPy activation in thread {0}'.format(
QtCore.QThread.currentThreadId()))
self.kernel_manager = QtInProcessKernelManager()
self.kernel_manager.start_kernel()
self.kernel = self.kernel_manager.kernel
self.namespace = self.kernel.shell.user_ns
self.namespace.update({
'np': np,
'config': self._manager.tree['defined'],
'manager': self._manager
})
if _has_pyqtgraph:
self.namespace['pg'] = pg
self.updateIPythonModuleList()
self.kernel.gui = 'qt4'
self.log.info('IPython has kernel {0}'.format(
self.kernel_manager.has_kernel))
self.log.info('IPython kernel alive {0}'.format(
self.kernel_manager.is_alive()))
self._manager.sigModulesChanged.connect(self.updateIPythonModuleList)
def startIPythonWidget(self):
""" Create an IPython console widget and connect it to an IPython
kernel.
"""
if (_has_pyqtgraph):
banner_modules = 'The numpy and pyqtgraph modules have already ' \
'been imported as ''np'' and ''pg''.'
else:
banner_modules = 'The numpy module has already been imported ' \
'as ''np''.'
banner = """
This is an interactive IPython console. {0}
Configuration is in 'config', the manager is 'manager' and all loaded modules are in this namespace with their configured name.
View the current namespace with dir().
Go, play.
""".format(banner_modules)
self._mw.consolewidget.banner = banner
# font size
self.consoleSetFontSize(self.consoleFontSize)
# settings
self._csd = ConsoleSettingsDialog()
self._csd.accepted.connect(self.consoleApplySettings)
self._csd.rejected.connect(self.consoleKeepSettings)
self._csd.buttonBox.button(
QtWidgets.QDialogButtonBox.Apply).clicked.connect(
self.consoleApplySettings)
self._mw.actionConsoleSettings.triggered.connect(self._csd.exec_)
self.consoleKeepSettings()
self._mw.consolewidget.kernel_manager = self.kernel_manager
self._mw.consolewidget.kernel_client = \
self._mw.consolewidget.kernel_manager.client()
self._mw.consolewidget.kernel_client.start_channels()
# the linux style theme which is basically the monokai theme
self._mw.consolewidget.set_default_style(colors='linux')
def stopIPython(self):
""" Stop the IPython kernel.
"""
self.log.debug('IPy deactivation: {0}'.format(QtCore.QThread.currentThreadId()))
self.kernel_manager.shutdown_kernel()
def stopIPythonWidget(self):
""" Disconnect the IPython widget from the kernel.
"""
self._mw.consolewidget.kernel_client.stop_channels()
def updateIPythonModuleList(self):
"""Remove non-existing modules from namespace,
add new modules to namespace, update reloaded modules
"""
currentModules = set()
newNamespace = dict()
for base in ['hardware', 'logic', 'gui']:
for module in self._manager.tree['loaded'][base]:
currentModules.add(module)
newNamespace[module] = self._manager.tree[
'loaded'][base][module]
discard = self.modules - currentModules
self.namespace.update(newNamespace)
for module in discard:
self.namespace.pop(module, None)
self.modules = currentModules
def consoleKeepSettings(self):
""" Write old values into config dialog.
"""
self._csd.fontSizeBox.setProperty('value', self.consoleFontSize)
def consoleApplySettings(self):
""" Apply values from config dialog to console.
"""
self.consoleSetFontSize(self._csd.fontSizeBox.value())
def consoleSetFontSize(self, fontsize):
self._mw.consolewidget.font_size = fontsize
self.consoleFontSize = fontsize
self._mw.consolewidget.reset_font()
def updateConfigWidgets(self):
""" Clear and refill the tree widget showing the configuration.
"""
self.fillTreeWidget(self._mw.treeWidget, self._manager.tree)
def updateGUIModuleList(self):
""" Clear and refill the module list widget
"""
# self.clearModuleList(self)
self.fillModuleList(self._mw.guilayout, 'gui')
self.fillModuleList(self._mw.logiclayout, 'logic')
self.fillModuleList(self._mw.hwlayout, 'hardware')
def fillModuleList(self, layout, base):
""" Fill the module list widget with module widgets for defined gui
modules.
@param QLayout layout: layout of th module list widget where
module widgest should be addad
@param str base: module category to fill
"""
for module in self._manager.tree['defined'][base]:
if not module in self._manager.tree['global']['startup']:
widget = ModuleListItem(self._manager, base, module)
self.modlist.append(widget)
layout.addWidget(widget)
widget.sigLoadThis.connect(self.sigStartModule)
widget.sigReloadThis.connect(self.sigReloadModule)
widget.sigDeactivateThis.connect(self.sigStopModule)
widget.sigCleanupStatus.connect(self.sigCleanupStatus)
self.checkTimer.timeout.connect(widget.checkModuleState)
def fillTreeItem(self, item, value):
""" Recursively fill a QTreeWidgeItem with the contents from a
dictionary.
@param QTreeWidgetItem item: the widget item to fill
@param (dict, list, etc) value: value to fill in
"""
item.setExpanded(True)
if type(value) is OrderedDict or type(value) is dict:
for key in value:
child = QtWidgets.QTreeWidgetItem()
child.setText(0, key)
item.addChild(child)
self.fillTreeItem(child, value[key])
elif type(value) is list:
for val in value:
child = QtWidgets.QTreeWidgetItem()
item.addChild(child)
if type(val) is dict:
child.setText(0, '[dict]')
self.fillTreeItem(child, val)
elif type(val) is OrderedDict:
child.setText(0, '[odict]')
self.fillTreeItem(child, val)
elif type(val) is list:
child.setText(0, '[list]')
self.fillTreeItem(child, val)
else:
child.setText(0, str(val))
child.setExpanded(True)
else:
child = QtWidgets.QTreeWidgetItem()
child.setText(0, str(value))
item.addChild(child)
def getSoftwareVersion(self):
""" Try to determine the software version in case the program is in
a git repository.
"""
try:
repo = Repo(self.get_main_dir())
branch = repo.active_branch
rev = str(repo.head.commit)
return (rev, str(branch))
except Exception as e:
print('Could not get git repo because:', e)
return ('unknown', -1)
def fillTreeWidget(self, widget, value):
""" Fill a QTreeWidget with the content of a dictionary
@param QTreeWidget widget: the tree widget to fill
@param dict,OrderedDict value: the dictionary to fill in
"""
widget.clear()
self.fillTreeItem(widget.invisibleRootItem(), value)
def reloadConfig(self):
""" Reload the current config. """
reply = QtWidgets.QMessageBox.question(
self._mw,
'Restart',
'Do you want to restart the current configuration?',
QtWidgets.QMessageBox.Yes,
QtWidgets.QMessageBox.No
)
configFile = self._manager._getConfigFile()
restart = (reply == QtWidgets.QMessageBox.Yes)
self.sigLoadConfig.emit(configFile, restart)
def getLoadFile(self):
""" Ask the user for a file where the configuration should be loaded
from
"""
defaultconfigpath = os.path.join(self.get_main_dir(), 'config')
filename = QtWidgets.QFileDialog.getOpenFileName(
self._mw,
'Load Configration',
defaultconfigpath,
'Configuration files (*.cfg)')[0]
if filename != '':
reply = QtWidgets.QMessageBox.question(
self._mw,
'Restart',
'Do you want to restart to use the configuration?',
QtWidgets.QMessageBox.Yes,
QtWidgets.QMessageBox.No
)
restart = (reply == QtWidgets.QMessageBox.Yes)
self.sigLoadConfig.emit(filename, restart)
def getSaveFile(self):
""" Ask the user for a file where the configuration should be saved
to.
"""
defaultconfigpath = os.path.join(self.get_main_dir(), 'config')
filename = QtWidgets.QFileDialog.getSaveFileName(
self._mw,
'Save Configration',
defaultconfigpath,
'Configuration files (*.cfg)')[0]
if filename != '':
self.sigSaveConfig.emit(filename)
class ManagerMainWindow(QtWidgets.QMainWindow):
""" This class represents the Manager Window.
"""
def __init__(self):
""" Create the Manager Window.
"""
# Get the path to the *.ui file
this_dir = os.path.dirname(__file__)
ui_file = os.path.join(this_dir, 'ui_manager_window.ui')
# Load it
super(ManagerMainWindow, self).__init__()
uic.loadUi(ui_file, self)
self.show()
# Set up the layout
# this really cannot be done in Qt designer, you cannot set a layout
# on an empty widget
self.guilayout = QtWidgets.QVBoxLayout(self.guiscroll)
self.logiclayout = QtWidgets.QVBoxLayout(self.logicscroll)
self.hwlayout = QtWidgets.QVBoxLayout(self.hwscroll)
class AboutDialog(QtWidgets.QDialog):
""" This class represents the Qudi About dialog.
"""
def __init__(self):
""" Create Qudi About Dialog.
"""
# Get the path to the *.ui file
this_dir = os.path.dirname(__file__)
ui_file = os.path.join(this_dir, 'ui_about.ui')
# Load it
super().__init__()
uic.loadUi(ui_file, self)
class ConsoleSettingsDialog(QtWidgets.QDialog):
""" Create the SettingsDialog window, based on the corresponding *.ui
file.
"""
def __init__(self):
# Get the path to the *.ui file
this_dir = os.path.dirname(__file__)
ui_file = os.path.join(this_dir, 'ui_console_settings.ui')
# Load it
super().__init__()
uic.loadUi(ui_file, self)
class ModuleListItem(QtWidgets.QFrame):
""" This class represents a module widget in the Qudi module list.
@signal str str sigLoadThis: gives signal with base and name of module
to be loaded
@signal str str sigReloadThis: gives signal with base and name of
module to be reloaded
@signal str str sigStopThis: gives signal with base and name of module
to be deactivated
"""
sigLoadThis = QtCore.Signal(str, str)
sigReloadThis = QtCore.Signal(str, str)
sigDeactivateThis = QtCore.Signal(str, str)
sigCleanupStatus = QtCore.Signal(str, str)
def __init__(self, manager, basename, modulename):
""" Create a module widget.
@param str basename: module category
@param str modulename: unique module name
"""
# Get the path to the *.ui file
this_dir = os.path.dirname(__file__)
ui_file = os.path.join(this_dir, 'ui_module_widget.ui')
# Load it
super().__init__()
uic.loadUi(ui_file, self)
self.manager = manager
self.name = modulename
self.base = basename
self.loadButton.setText('Load {0}'.format(self.name))
# connect buttons
self.loadButton.clicked.connect(self.loadButtonClicked)
self.reloadButton.clicked.connect(self.reloadButtonClicked)
self.deactivateButton.clicked.connect(self.deactivateButtonClicked)
self.cleanupButton.clicked.connect(self.cleanupButtonClicked)
def loadButtonClicked(self):
""" Send signal to load and activate this module.
"""
self.sigLoadThis.emit(self.base, self.name)
if self.base == 'gui':
self.loadButton.setText('Show {0}'.format(self.name))
def reloadButtonClicked(self):
""" Send signal to reload this module.
"""
self.sigReloadThis.emit(self.base, self.name)
def deactivateButtonClicked(self):
""" Send signal to deactivate this module.
"""
self.sigDeactivateThis.emit(self.base, self.name)
def cleanupButtonClicked(self):
""" Send signal to deactivate this module.
"""
self.sigCleanupStatus.emit(self.base, self.name)
def checkModuleState(self):
""" Get the state of this module and display it in the statusLabel
"""
state = ''
if self.statusLabel.text() != 'exception, cannot get state':
try:
if (self.base in self.manager.tree['loaded']
and self.name in self.manager.tree['loaded'][self.base]):
state = self.manager.tree['loaded'][self.base][self.name].getState()
else:
state = 'not loaded'
except:
state = 'exception, cannot get state'
self.statusLabel.setText(state)
| gpl-3.0 | -2,194,907,414,192,798,500 | 8,774,676,485,791,247,000 | 37.975078 | 127 | 0.622812 | false |
meteorcloudy/tensorflow | tensorflow/contrib/all_reduce/__init__.py | 38 | 1466 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""All-reduce implementations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import
from tensorflow.contrib.all_reduce.python.all_reduce import *
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long,wildcard-import
_allowed_symbols = [
'build_ring_all_reduce',
'build_recursive_hd_all_reduce',
'build_shuffle_all_reduce',
'build_nccl_all_reduce',
'build_nccl_then_ring',
'build_nccl_then_recursive_hd',
'build_nccl_then_shuffle',
'build_shuffle_then_ring',
'build_shuffle_then_shuffle'
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
| apache-2.0 | -1,616,739,215,081,602,600 | -9,071,975,448,318,683,000 | 36.589744 | 80 | 0.705321 | false |
soylentdeen/BlurryApple | Tools/Gendron/gral_zer.py | 2 | 7113 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 24 16:52:46 2014
Mix of various routines around Zernike modes
@author: tristanbuey
"""
# Load libraries
import numpy as np;
def gammX(n, i0):
"""
gammX(n, i0)
Computes Noll matrix for derivatives of Zernike Gx.
The matrix applies on a Zernike vector z, and produces
the Zernike decomposition z' of x-derivative :
z' = Gx . z
n = number of zernike coefficients on the input vector z.
i0 = zernike index of the first coefficient of z (1=piston, 2=tip, ...)
It results that Gx is a matrix with a size (n+i0-1, n).
"""
gg = np.zeros((i0+n-1,n));
# variable i will span Zernike indexes, starting at i0.
for i in range (i0, i0+n):
# variable j spans Zernike indexes, starting at piston
# and stopping at i
for j in range(1, i+1):
gg[j-1,i-i0] = gamX(i,j);
return gg;
def gammY(n, i0):
"""
gammY(n, i0)
Computes Noll matrix for derivatives of Zernike Gy.
The matrix applies on a Zernike vector z, and produces
the Zernike decomposition z' of y-derivative :
z' = Gy . z
n = number of zernike coefficients on the input vector z.
i0 = zernike index of the first coefficient of z (1=piston, 2=tip, ...)
It results that Gy is a matrix with a size (n+i0-1, n).
"""
gg = np.zeros((i0+n-1,n));
# variable i will span Zernike indexes, starting at i0.
for i in range(i0, i0+n):
# variable j spans Zernike indexes, starting at piston
# and stopping at i
for j in range(1, i+1):
gg[j-1,i-i0] = gamY(i,j);
return gg;
"""
A lot of sub-functions to calculate the Noll matrix
"""
def pair(number):
return number % 2 == 0;
def impair(num):
return num % 2 != 0;
def nm(i):
"""
For a given Zernike mode of index <i>, returns the
radial and azimutal orders (n,m)
"""
n = int( (-1.+np.sqrt(8*(i-1)+1))/2.);
p = (i-(n*(n+1))/2);
k = n%2;
m = int((p+k)/2)*2 - k;
return (n,m);
def gamY(i,j):
"""
Input arguments:
2 scalar int i and j, that are indexes of Zernike modes.
Returns the coefficient of the derivative matrix of (Noll R.J., 1976)
The algorithm coded below is a python translation of the series of
rules that Noll has enounced in his article of 1976, for derivating
Zernike.
Warning: Unfortunately Noll had made a little error in his rules,
that has been corrected in this program.
"""
# determine radial and azimutal orders of Zernike number i
ni,mi = nm(i);
# idem for j
n,m = nm(j);
# Noll's rules :
if(mi==(m-1) or mi==(m+1)):
if(m==0 or mi==0):
if((m==0 and impair(i)) or (mi==0 and impair(j))):
return np.sqrt(2*(n+1)*(ni+1));
else:
return 0.00;
else:
if(impair(i+j)):
if((mi==m+1 and impair(j)) or (mi==m-1 and pair(j))):
return -np.sqrt((n+1)*(ni+1));
else:
return np.sqrt((n+1)*(ni+1));
else:
return 0.0;
else:
return 0.0;
return;
def gamX(i,j):
"""
Input arguments:
2 scalar int i and j, that are indexes of Zernike modes.
Returns the coefficient of the derivative matrix of (Noll R.J., 1976)
The algorithm coded below is a python translation of the series of
rules that Noll has enounced in his article of 1976, for derivating
Zernike.
Warning: Unfortunately Noll had made a little error in his rules,
that has been corrected in this program.
"""
# determine radial and azimutal orders of Zernike number i
ni,mi = nm(i);
# idem for j
n,m = nm(j);
# Noll's rules :
if(mi==m-1 or mi==m+1):
if(m==0 or mi==0):
if((m==0 and pair(i)) or (mi==0 and pair(j))):
return np.sqrt(2*(n+1)*(ni+1));
else:
return 0.00;
else:
if( (j+i)%2==0 ):
return np.sqrt((n+1)*(ni+1));
else:
return 0.00;
else:
return 0.0;
return;
def polyfute(m,n):
"""
Les coefs des poly de zer sont des K_mn(s).
Le coeff K_mn(s) pondère r^(n-2s)
Il y a la relation de recurrence
K_mn(s+1) = K_mn(s) * ((n+m)/2-s)*((n-m)/2-s)/(s+1)/(n-s)
Il y a aussi
K_mn(0) = n! / ((n+m)/2)! / ((n-m)/2)!
"""
a = np.zeros(n+1)
# Calcul de K_mn(0)
st = 2 # start index for dividing by ((n-m)/2)!
coef = 1.00
for i in range((n+m)/2+1, n+1):
if( st<=((n-m)/2) and i%st==0 ) :
j = i/st
st = st+1
coef = coef*j
else:
coef = coef*i
# division by ((n-m)/2)! (has already been partially done)
for i in range(st,(n-m)/2+1):
coef = coef / i
a[n] = round(coef); # pour K_nm(0)
for i in range(1,(n-m)/2+1):
coef = -coef * ((n+m)/2-i+1)*((n-m)/2-i+1);
coef = coef / i;
coef = coef / (n-i+1);
a[n-2*i] = round(coef)
return a
def evaluate_poly(n,m,a,r):
"""
evaluate_poly(n,m,a,r)
n is the radial order
m is the azimutal order
a[] is the list of coefficient, with a(i+1) the coeff of r^i
r is the variable of the polynomial
"""
if n>1 :
r2 = r*r
p = a[n]
for i in range(n-2,m-1,-2):
p = p*r2 + a[i]
if(m==0): return p
elif(m==1): p*=r
elif(m==2): p*=r2
else: p = p * r**m
return p
def zer(r,t,i):
"""
Computes Zernike polynom of index i, at point (r,t)
The algo is using
1) a recursive function to compute coefficients of the polynom, so that
there is no factorial function of large numbers to invoke (risk of
roundoff errors, plus takes more exec time)
2) a smarter way to compute polynomial expressions such as
ax^3+bx^2+cx+d = x(x(ax+b)+c)+d
to avoid roundoff errors and minimize number of operations
"""
if(i==1):
return np.ones_like(r+t)
# calcul de n et m a partir de i
n = int( (-1.+np.sqrt(8*(i-1)+1))/2.)
p = (i-(n*(n+1))/2);
k = n%2;
m = int((p+k)/2)*2 - k;
a = polyfute(m,n)
Z = evaluate_poly(n,m,a,r) * np.sqrt(n+1);
if( m!=0 ):
Z *= np.sqrt(2);
if( i%2 ):
Z *= np.sin(m*t)
else:
Z *= np.cos(m*t)
return Z
"""
Defines a meshgrid of npt X npt points, and express
it in polar coordinates.
Returns a tuple (r,theta).
"""
def mkxy(npt, center):
# generate an array of coordinates
if center==1:
x = np.linspace(-1,1,npt+1)[0:npt]
else:
x = np.linspace(-1,1,npt)
x,y = np.meshgrid(x,x)
# generates a map of the distance of subapertures to pupil center
r = np.sqrt(x**2 + y**2)
# generates a map of the azimut angle of subapertures
theta = np.arctan2(y,x)
return r,theta | gpl-2.0 | -8,310,914,567,057,378,000 | 2,246,355,805,592,243,200 | 23.443299 | 77 | 0.530934 | false |
balloob/home-assistant | homeassistant/components/locative/__init__.py | 10 | 4327 | """Support for Locative."""
import logging
from typing import Dict
from aiohttp import web
import voluptuous as vol
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.const import (
ATTR_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_WEBHOOK_ID,
HTTP_OK,
HTTP_UNPROCESSABLE_ENTITY,
STATE_NOT_HOME,
)
from homeassistant.helpers import config_entry_flow
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
_LOGGER = logging.getLogger(__name__)
DOMAIN = "locative"
TRACKER_UPDATE = f"{DOMAIN}_tracker_update"
ATTR_DEVICE_ID = "device"
ATTR_TRIGGER = "trigger"
def _id(value: str) -> str:
"""Coerce id by removing '-'."""
return value.replace("-", "")
def _validate_test_mode(obj: Dict) -> Dict:
"""Validate that id is provided outside of test mode."""
if ATTR_ID not in obj and obj[ATTR_TRIGGER] != "test":
raise vol.Invalid("Location id not specified")
return obj
WEBHOOK_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(ATTR_LATITUDE): cv.latitude,
vol.Required(ATTR_LONGITUDE): cv.longitude,
vol.Required(ATTR_DEVICE_ID): cv.string,
vol.Required(ATTR_TRIGGER): cv.string,
vol.Optional(ATTR_ID): vol.All(cv.string, _id),
},
extra=vol.ALLOW_EXTRA,
),
_validate_test_mode,
)
async def async_setup(hass, hass_config):
"""Set up the Locative component."""
hass.data[DOMAIN] = {"devices": set(), "unsub_device_tracker": {}}
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle incoming webhook from Locative."""
try:
data = WEBHOOK_SCHEMA(dict(await request.post()))
except vol.MultipleInvalid as error:
return web.Response(text=error.error_message, status=HTTP_UNPROCESSABLE_ENTITY)
device = data[ATTR_DEVICE_ID]
location_name = data.get(ATTR_ID, data[ATTR_TRIGGER]).lower()
direction = data[ATTR_TRIGGER]
gps_location = (data[ATTR_LATITUDE], data[ATTR_LONGITUDE])
if direction == "enter":
async_dispatcher_send(hass, TRACKER_UPDATE, device, gps_location, location_name)
return web.Response(text=f"Setting location to {location_name}", status=HTTP_OK)
if direction == "exit":
current_state = hass.states.get(f"{DEVICE_TRACKER}.{device}")
if current_state is None or current_state.state == location_name:
location_name = STATE_NOT_HOME
async_dispatcher_send(
hass, TRACKER_UPDATE, device, gps_location, location_name
)
return web.Response(text="Setting location to not home", status=HTTP_OK)
# Ignore the message if it is telling us to exit a zone that we
# aren't currently in. This occurs when a zone is entered
# before the previous zone was exited. The enter message will
# be sent first, then the exit message will be sent second.
return web.Response(
text=f"Ignoring exit from {location_name} (already in {current_state})",
status=HTTP_OK,
)
if direction == "test":
# In the app, a test message can be sent. Just return something to
# the user to let them know that it works.
return web.Response(text="Received test message.", status=HTTP_OK)
_LOGGER.error("Received unidentified message from Locative: %s", direction)
return web.Response(
text=f"Received unidentified message: {direction}",
status=HTTP_UNPROCESSABLE_ENTITY,
)
async def async_setup_entry(hass, entry):
"""Configure based on config entry."""
hass.components.webhook.async_register(
DOMAIN, "Locative", entry.data[CONF_WEBHOOK_ID], handle_webhook
)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, DEVICE_TRACKER)
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID])
hass.data[DOMAIN]["unsub_device_tracker"].pop(entry.entry_id)()
return await hass.config_entries.async_forward_entry_unload(entry, DEVICE_TRACKER)
async_remove_entry = config_entry_flow.webhook_async_remove_entry
| apache-2.0 | -2,973,712,797,579,613,700 | 2,356,233,200,635,843,600 | 32.030534 | 88 | 0.667668 | false |
googleads/googleads-python-lib | examples/ad_manager/v202011/activity_group_service/get_active_activity_groups.py | 1 | 1957 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all active activity groups.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
activity_group_service = client.GetService(
'ActivityGroupService', version='v202011')
# Create a statement to select activity groups.
statement = (ad_manager.StatementBuilder(version='v202011')
.Where('status = :status')
.WithBindVariable('status', 'ACTIVE'))
# Retrieve a small amount of activity groups at a time, paging
# through until all activity groups have been retrieved.
while True:
response = activity_group_service.getActivityGroupsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for activity_group in response['results']:
# Print out some information for each activity group.
print('Activity group with ID "%d" and name "%s" was found.\n' %
(activity_group['id'], activity_group['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| apache-2.0 | 8,039,465,386,393,325,000 | 6,658,078,196,392,536,000 | 36.634615 | 74 | 0.709249 | false |
macioosch/dynamo-hard-spheres-sim | convergence-plot.py | 1 | 6346 | #!/usr/bin/env python2
# encoding=utf-8
from __future__ import division, print_function
from glob import glob
from itertools import izip
from matplotlib import pyplot as plt
import numpy as np
input_files = glob("csv/convergence-256000-0.*.csv")
#input_files = glob("csv/convergence-500000-0.*.csv")
#input_files = glob("csv/convergence-1000188-0.*.csv")
#plotted_parameter = "msds_diffusion"
plotted_parameter = "pressures_collision"
#plotted_parameter = "pressures_virial"
#plotted_parameter = "msds_val"
#plotted_parameter = "times"
legend_names = []
tight_layout = False
show_legend = False
for file_number, file_name in enumerate(sorted(input_files)):
data = np.genfromtxt(file_name, delimiter='\t', names=[
"packings","densities","collisions","n_atoms","pressures_virial",
"pressures_collision","msds_val","msds_diffusion","times",
"std_pressures_virial","std_pressures_collision","std_msds_val",
"std_msds_diffusion","std_times"])
n_atoms = data["n_atoms"][0]
density = data["densities"][0]
equilibrated_collisions = data["collisions"] - 2*data["collisions"][0] \
+ data["collisions"][1]
"""
### 5 graphs: D(CPS) ###
tight_layout = True
skip_points = 0
ax = plt.subplot(3, 2, file_number+1)
plt.fill_between((equilibrated_collisions / n_atoms)[skip_points:],
data[plotted_parameter][skip_points:]
- data["std_" + plotted_parameter][skip_points:],
data[plotted_parameter][skip_points:]
+ data["std_" + plotted_parameter][skip_points:], alpha=0.3)
plt.plot((equilibrated_collisions / n_atoms)[skip_points:],
data[plotted_parameter][skip_points:], lw=2)
if plotted_parameter == "msds_diffusion":
plt.ylim(0.990*data[plotted_parameter][-1],
1.005*data[plotted_parameter][-1])
plt.xlim([0, 1e5])
plt.legend(["Density {}".format(data["densities"][0])], loc="lower right")
ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%.4f'))
plt.xlabel("Collisions per sphere")
plt.ylabel("D")
"""
### 5 graphs: relative D(CPS) ###
tight_layout = True
skip_points = 0
ax = plt.subplot(3, 2, file_number+1)
plt.fill_between((equilibrated_collisions / n_atoms)[skip_points:],
-1 + (data[plotted_parameter][skip_points:]
- data["std_" + plotted_parameter][skip_points:])/data[plotted_parameter][-1],
-1 + (data[plotted_parameter][skip_points:]
+ data["std_" + plotted_parameter][skip_points:])/data[plotted_parameter][-1], alpha=0.3)
plt.plot((equilibrated_collisions / n_atoms)[skip_points:],
-1 + data[plotted_parameter][skip_points:]/data[plotted_parameter][-1], lw=2)
plt.ylim(data["std_" + plotted_parameter][-1]*20*np.array([-1, 1])/data[plotted_parameter][-1])
#plt.xscale("log")
plt.xlim([0, 1e5])
plt.legend(["$\\rho\\sigma^3=\\ {}$".format(data["densities"][0])], loc="lower right")
ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%.2e'))
plt.xlabel("$C/N$")
plt.ylabel("$[Z_{MD}(C) / Z_{MD}(C=10^5 N)] - 1$")
"""
### 1 graph: D(t) ###
show_legend = True
skip_points = 0
plt.title("D(t) for 5 densities")
plt.loglog(data["times"][skip_points:],
data[plotted_parameter][skip_points:])
legend_names.append(data["densities"][0])
plt.xlabel("Time")
plt.ylabel("D")
"""
"""
### 1 graph: D(t) / Dinf ###
show_legend = True
skip_points = 0
#plt.fill_between(data["times"][skip_points:],
# (data[plotted_parameter] - data["std_" + plotted_parameter])
# / data[plotted_parameter][-1] - 1,
# (data[plotted_parameter] + data["std_" + plotted_parameter])
# / data[plotted_parameter][-1] - 1, color="grey", alpha=0.4)
plt.plot(data["times"][skip_points:],
data[plotted_parameter] / data[plotted_parameter][-1] - 1, lw=1)
legend_names.append(data["densities"][0])
#plt.xscale("log")
plt.xlabel("Time")
plt.ylabel("D / D(t --> inf)")
"""
"""
### 5 graphs: D(1/CPS) ###
tight_layout = True
skip_points = 40
ax = plt.subplot(3, 2, file_number+1)
plt.fill_between((n_atoms / equilibrated_collisions)[skip_points:],
data[plotted_parameter][skip_points:]
- data["std_" + plotted_parameter][skip_points:],
data[plotted_parameter][skip_points:]
+ data["std_" + plotted_parameter][skip_points:], alpha=0.3)
plt.plot((n_atoms / equilibrated_collisions)[skip_points:],
data[plotted_parameter][skip_points:], lw=2)
plt.title("Density {}:".format(data["densities"][0]))
ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%.7f'))
plt.xlim(xmin=0)
plt.xlabel("1 / Collisions per sphere")
plt.ylabel("D")
"""
"""
### 1 graph: D(CPS) / Dinf ###
show_legend = True
plt.fill_between(equilibrated_collisions / n_atoms,
(data[plotted_parameter] - data["std_" + plotted_parameter])
/ data[plotted_parameter][-1] - 1,
(data[plotted_parameter] + data["std_" + plotted_parameter])
/ data[plotted_parameter][-1] - 1, color="grey", alpha=0.4)
plt.plot(equilibrated_collisions / n_atoms,
data[plotted_parameter] / data[plotted_parameter][-1] - 1, lw=2)
legend_names.append(data["densities"][0])
plt.xlabel("Collisions per sphere")
plt.ylabel("D / D(t --> inf)")
"""
"""
### 1 graph: D(1/CPS) / Dinf ###
show_legend = True
plt.fill_between(n_atoms / equilibrated_collisions,
(data[plotted_parameter] - data["std_" + plotted_parameter])
/ data[plotted_parameter][-1] - 1,
(data[plotted_parameter] + data["std_" + plotted_parameter])
/ data[plotted_parameter][-1] - 1, color="grey", alpha=0.4)
plt.plot( n_atoms / equilibrated_collisions,
data[plotted_parameter] / data[plotted_parameter][-1] - 1)
legend_names.append(data["densities"][0])
plt.xlabel(" 1 / Collisions per sphere")
plt.ylabel(plotted_parameter)
"""
#if tight_layout:
# plt.tight_layout(pad=0.0, w_pad=0.0, h_pad=0.0)
if show_legend:
plt.legend(legend_names, title="Density:", loc="lower right")
plt.show()
| gpl-3.0 | 4,336,289,583,770,098,700 | -5,692,922,065,035,013,000 | 39.941935 | 101 | 0.601954 | false |
piffey/ansible | lib/ansible/modules/storage/netapp/na_cdot_user.py | 23 | 10225 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_user
short_description: useradmin configuration and management
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar (sumit4@netapp.com)
description:
- Create or destroy users.
options:
state:
description:
- Whether the specified user should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the user to manage.
required: true
application:
description:
- Applications to grant access to.
required: true
choices: ['console', 'http','ontapi','rsh','snmp','sp','ssh','telnet']
authentication_method:
description:
- Authentication method for the application.
- Not all authentication methods are valid for an application.
- Valid authentication methods for each application are as denoted in I(authentication_choices_description).
- password for console application
- password, domain, nsswitch, cert for http application.
- password, domain, nsswitch, cert for ontapi application.
- community for snmp application (when creating SNMPv1 and SNMPv2 users).
- usm and community for snmp application (when creating SNMPv3 users).
- password for sp application.
- password for rsh application.
- password for telnet application.
- password, publickey, domain, nsswitch for ssh application.
required: true
choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm']
set_password:
description:
- Password for the user account.
- It is ignored for creating snmp users, but is required for creating non-snmp users.
- For an existing user, this value will be used as the new password.
role_name:
description:
- The name of the role. Required when C(state=present)
vserver:
description:
- The name of the vserver to use.
required: true
'''
EXAMPLES = """
- name: Create User
na_cdot_user:
state: present
name: SampleUser
application: ssh
authentication_method: password
set_password: apn1242183u1298u41
role_name: vsadmin
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTUser(object):
"""
Common operations to manage users and roles.
"""
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
application=dict(required=True, type='str', choices=[
'console', 'http', 'ontapi', 'rsh',
'snmp', 'sp', 'ssh', 'telnet']),
authentication_method=dict(required=True, type='str',
choices=['community', 'password',
'publickey', 'domain',
'nsswitch', 'usm']),
set_password=dict(required=False, type='str', default=None),
role_name=dict(required=False, type='str'),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['role_name'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.application = p['application']
self.authentication_method = p['authentication_method']
self.set_password = p['set_password']
self.role_name = p['role_name']
self.vserver = p['vserver']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module)
def get_user(self):
"""
Checks if the user exists.
:return:
True if user found
False if user is not found
:rtype: bool
"""
security_login_get_iter = netapp_utils.zapi.NaElement('security-login-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-account-info', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
'authentication-method':
self.authentication_method})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
security_login_get_iter.add_child_elem(query)
try:
result = self.server.invoke_successfully(security_login_get_iter,
enable_tunneling=False)
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
return True
else:
return False
except netapp_utils.zapi.NaApiError as e:
# Error 16034 denotes a user not being found.
if to_native(e.code) == "16034":
return False
else:
self.module.fail_json(msg='Error getting user %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def create_user(self):
user_create = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-create', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
'authentication-method':
self.authentication_method,
'role-name': self.role_name})
if self.set_password is not None:
user_create.add_new_child('password', self.set_password)
try:
self.server.invoke_successfully(user_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error creating user %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_user(self):
user_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-delete', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
'authentication-method':
self.authentication_method})
try:
self.server.invoke_successfully(user_delete,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error removing user %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def change_password(self):
"""
Changes the password
:return:
True if password updated
False if password is not updated
:rtype: bool
"""
self.server.set_vserver(self.vserver)
modify_password = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-modify-password', **{
'new-password': str(self.set_password),
'user-name': self.name})
try:
self.server.invoke_successfully(modify_password,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
if to_native(e.code) == '13114':
return False
else:
self.module.fail_json(msg='Error setting password for user %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
self.server.set_vserver(None)
return True
def apply(self):
property_changed = False
password_changed = False
user_exists = self.get_user()
if user_exists:
if self.state == 'absent':
property_changed = True
elif self.state == 'present':
if self.set_password is not None:
password_changed = self.change_password()
else:
if self.state == 'present':
# Check if anything needs to be updated
property_changed = True
if property_changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not user_exists:
self.create_user()
# Add ability to update parameters.
elif self.state == 'absent':
self.delete_user()
changed = property_changed or password_changed
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTUser()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 | -6,610,385,007,158,626,000 | 7,875,032,192,134,256,000 | 33.083333 | 112 | 0.549927 | false |
amdouglas/OpenPNM | OpenPNM/Geometry/models/throat_misc.py | 1 | 1124 | r"""
===============================================================================
throat_misc -- Miscillaneous and generic functions to apply to throats
===============================================================================
"""
import scipy as _sp
def random(geometry, seed=None, num_range=[0, 1], **kwargs):
r"""
Assign random number to throats
note: should this be called 'poisson'?
"""
range_size = num_range[1] - num_range[0]
range_min = num_range[0]
_sp.random.seed(seed=seed)
value = _sp.random.rand(geometry.num_throats(),)
value = value*range_size + range_min
return value
def neighbor(geometry, network, pore_prop='pore.seed', mode='min', **kwargs):
r"""
Adopt a value based on the neighboring pores
"""
throats = network.throats(geometry.name)
P12 = network.find_connected_pores(throats)
pvalues = network[pore_prop][P12]
if mode == 'min':
value = _sp.amin(pvalues, axis=1)
if mode == 'max':
value = _sp.amax(pvalues, axis=1)
if mode == 'mean':
value = _sp.mean(pvalues, axis=1)
return value
| mit | 4,595,044,792,830,171,600 | 8,586,826,656,675,615,000 | 30.222222 | 79 | 0.536477 | false |
Digmaster/TicTacToe | Agent.py | 1 | 2030 | from random import randint
from random import getrandbits
from copy import deepcopy
# Agent that will either be the human player or a secondary agent for the dual agent play
class DumbAgent:
#initialize the board for the first player
def __init__(self, board):
self.board = board
def __str__(self):
return "Hi, Im dumb agent. I play randomly as player {0}".format(self.player)
# readin the next move for the human or secondary agent
def getNextMove(self, player):
board = deepcopy(self.board)
if(player!='X' and player!='O'):
raise ValueError('The only valid players are X and O')
while(True):
try:
square = randint(1, 9)
board.setSquare(square, player)
return square
except ValueError:
"""Do nothing"""
# Define the smart agent - uses the minimax algorithm
class SmartAgent:
def __init__(self, board):
self.board = board
self.signal = False
self.bestVal = None
def __str__(self):
return "Hi, Im smart agent. I whatever move will net me the most points, or avail my enemy of points. I'm {0}".format(self.player)
# to get the next move,call the decideMove function
def getNextMove(self, player):
self.decideMove(deepcopy(self.board), player)
return self.bestVal
def decideMove(self, board, player):
if(self.signal):
return 0
winner = board.testWin() # test for a winning solution to the current state
if(winner!='.'):
if(winner=='X'):
return 1.0
elif(winner=='T'):
return 0.0
else:
return -1.0
values = []
moves = {}
for i in range(1,10):
if(self.signal):
return 0
if(board.getSquare(i)=='.'):
nBoard = deepcopy(board)
nBoard.setSquare(i, player)
value = self.decideMove(nBoard, 'X' if player=='O' else 'O')
values.append(value)
moves[value] = i
if(player=='X'and value==1):
break
elif(player=='O' and value==-1):
break
# calculate the highest probability / best move
if(player=='X'):
sum = max(values)
else:
sum = min(values)
self.bestVal = moves[sum]
return sum
| apache-2.0 | -3,236,340,699,975,577,000 | 7,864,411,717,753,884,000 | 25.363636 | 132 | 0.666995 | false |
boooka/GeoPowerOff | venv/lib/python2.7/site-packages/django/contrib/syndication/views.py | 74 | 8760 | from __future__ import unicode_literals
from calendar import timegm
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import HttpResponse, Http404
from django.template import loader, TemplateDoesNotExist, RequestContext
from django.utils import feedgenerator
from django.utils.encoding import force_text, iri_to_uri, smart_text
from django.utils.html import escape
from django.utils.http import http_date
from django.utils import six
from django.utils.timezone import get_default_timezone, is_naive, make_aware
def add_domain(domain, url, secure=False):
protocol = 'https' if secure else 'http'
if url.startswith('//'):
# Support network-path reference (see #16753) - RSS requires a protocol
url = '%s:%s' % (protocol, url)
elif not (url.startswith('http://')
or url.startswith('https://')
or url.startswith('mailto:')):
url = iri_to_uri('%s://%s%s' % (protocol, domain, url))
return url
class FeedDoesNotExist(ObjectDoesNotExist):
pass
class Feed(object):
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(content_type=feedgen.mime_type)
if hasattr(self, 'item_pubdate') or hasattr(self, 'item_updateddate'):
# if item_pubdate or item_updateddate is defined for the feed, set
# header so as ConditionalGetMiddleware is able to send 304 NOT MODIFIED
response['Last-Modified'] = http_date(
timegm(feedgen.latest_post_date().utctimetuple()))
feedgen.write(response, 'utf-8')
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(force_text(item))
def item_description(self, item):
return force_text(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured('Give your %s class a get_absolute_url() method, or define an item_link() method in your Feed class.' % item.__class__.__name__)
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check co_argcount rather than try/excepting the function and
# catching the TypeError, because something inside the function
# may raise the TypeError. This technique is more accurate.
try:
code = six.get_function_code(attr)
except AttributeError:
code = six.get_function_code(attr.__call__)
if code.co_argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_context_data(self, **kwargs):
"""
Returns a dictionary to use as extra context if either
``self.description_template`` or ``self.item_template`` are used.
Default implementation preserves the old behavior
of using {'obj': item, 'site': current_site} as the context.
"""
return {'obj': kwargs.get('item'), 'site': kwargs.get('site')}
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
current_site = get_current_site(request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link, request.is_secure())
feed = self.feed_type(
title=self.__get_dynamic_attr('title', obj),
subtitle=self.__get_dynamic_attr('subtitle', obj),
link=link,
description=self.__get_dynamic_attr('description', obj),
language=settings.LANGUAGE_CODE,
feed_url=add_domain(
current_site.domain,
self.__get_dynamic_attr('feed_url', obj) or request.path,
request.is_secure(),
),
author_name=self.__get_dynamic_attr('author_name', obj),
author_link=self.__get_dynamic_attr('author_link', obj),
author_email=self.__get_dynamic_attr('author_email', obj),
categories=self.__get_dynamic_attr('categories', obj),
feed_copyright=self.__get_dynamic_attr('feed_copyright', obj),
feed_guid=self.__get_dynamic_attr('feed_guid', obj),
ttl=self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self.__get_dynamic_attr('items', obj):
context = self.get_context_data(item=item, site=current_site,
obj=obj, request=request)
if title_tmp is not None:
title = title_tmp.render(RequestContext(request, context))
else:
title = self.__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(RequestContext(request, context))
else:
description = self.__get_dynamic_attr('item_description', item)
link = add_domain(
current_site.domain,
self.__get_dynamic_attr('item_link', item),
request.is_secure(),
)
enc = None
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url=smart_text(enc_url),
length=smart_text(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type=smart_text(self.__get_dynamic_attr('item_enclosure_mime_type', item))
)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
tz = get_default_timezone()
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and is_naive(pubdate):
pubdate = make_aware(pubdate, tz)
updateddate = self.__get_dynamic_attr('item_updateddate', item)
if updateddate and is_naive(updateddate):
updateddate = make_aware(updateddate, tz)
feed.add_item(
title=title,
link=link,
description=description,
unique_id=self.__get_dynamic_attr('item_guid', item, link),
unique_id_is_permalink=self.__get_dynamic_attr(
'item_guid_is_permalink', item),
enclosure=enc,
pubdate=pubdate,
updateddate=updateddate,
author_name=author_name,
author_email=author_email,
author_link=author_link,
categories=self.__get_dynamic_attr('item_categories', item),
item_copyright=self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
| apache-2.0 | 3,094,510,302,649,954,300 | 9,055,935,862,303,329,000 | 39.555556 | 167 | 0.584932 | false |
mfit/PdfTableAnnotator | script/csv-compare.py | 1 | 8051 | """
Copyright 2014 Matthias Frey
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
CSV-compare
-----------
Compare table data stored in CSV (comma seperated values) format.
"""
import re
import csv
import sys
import os
def _pr_list(l1, l2, replace_chars = '[\n ]'):
""" Calculate precision and recall regarding elements of a list.
When a 1:1 match cannot be achieved, the list pointers will be
moved forward until a match occurs (first of list A, then of list B).
The closest match will count, and matching will continue from those
list positions onwards.
The replace_chars parameter is used to remove characters from the
strings before comparing. The default will remove newlines and spaces.
"""
def _fnext(l, item):
item = re.sub(replace_chars, '', item).strip()
for i, txt in enumerate(l):
txt = re.sub(replace_chars, '', txt).strip()
if txt == item:
return i
return -1
if len(l2)==0 or len(l1)==0:
return 0, 0
i = 0
j = 0
match = 0
while len(l1)>i and len(l2)>j:
t1 = re.sub(replace_chars, '', l1[i]).strip()
t2 = re.sub(replace_chars, '', l2[j]).strip()
if t1 == t2:
match += 1
i += 1
j += 1
else:
ii = _fnext(l1[i:], l2[j])
jj = _fnext(l2[j:], l1[i])
if ii>=0 and (ii<jj or jj<0): i+=ii
elif jj>=0: j+=jj
else:
i+=1
j+=1
return float(match)/len(l2), float(match)/len(l1)
def clean_table(tab):
""" Remove trailing empty cells resulting from the way some
spreadsheet application output csv for multi table documents.
"""
if len(tab) == 0:
return []
n_empty=[]
for row in tab:
for n, val in enumerate(reversed(row)):
if val!='':
break
n_empty.append(n)
strip_cols = min(n_empty)
cleaned = []
for row in tab:
cleaned.append(row[0:len(row)-strip_cols])
return cleaned
def compare_tables(tab1, tab2):
""" Compare two tables (2dim lists).
"""
info = {'rows_a':len(tab1),
'rows_b':len(tab2),
'rows_match': 1 if len(tab1) == len(tab2) else 0,
}
sizesA = [len(l) for l in tab1]
sizesB = [len(l) for l in tab2]
info['dim_match'] = 1 if sizesA == sizesB else 0
info['size_a'] = sum(sizesA)
info['size_b'] = sum(sizesA)
if len(sizesA)>0 and len(sizesB)>0:
info['cols_match'] = 1 if min(sizesA) == max(sizesA) and \
min(sizesB) == max(sizesB) and min(sizesA) == min(sizesB) else 0
# 'flatten' tables
cellsA = []
cellsB = []
for r in tab1: cellsA += [c for c in r]
for r in tab2: cellsB += [c for c in r]
info['p'], info['r'] = _pr_list(cellsA, cellsB)
info['F1'] = F1(info['p'], info['r'])
return info
def compare_files_pr(file1, file2):
""" Calculate simple P/R .
Compare lists of cells, left to right , top to bottom.
"""
cells = [[], []]
for i, fname in enumerate([file1, file2]):
with file(fname) as csvfile:
rd = csv.reader(csvfile, delimiter=',', quotechar='"')
for r in rd:
cells[i] += [c for c in r]
return _pr_list(*cells)
def compare_files(file1, file2):
""" Compare two csv files.
"""
groundtruth = read_tables_from_file(file1)
try:
compare = read_tables_from_file(file2)
except:
compare = []
tbs = [groundtruth, compare]
finfo = {'tabcount_a': len(tbs[0]),
'tabcount_b': len(tbs[1]),
'tabcount_match': len(tbs[0]) == len(tbs[1]),
}
finfo['tables']=[]
for n in range(0, len(tbs[0])):
if finfo['tabcount_match']:
comp_info = compare_tables(tbs[0][n], tbs[1][n])
else:
if n < len(tbs[1]):
comp_info = compare_tables(tbs[0][n], tbs[1][n])
else:
comp_info = compare_tables(tbs[0][n], [[]])
comp_info['n']=n
finfo['tables'].append(comp_info)
return finfo
def output_compareinfo_csv(file, info, fields=['p', 'r', 'F1']):
""" Pre-format a row that holds measures about similarity of a table
to the ground truth.
"""
lines = []
tabmatch = 1 if info['tabcount_match'] else 0
for tinfo in info['tables']:
lines.append([file, str(tabmatch)] + [str(tinfo[k]) for k in fields])
return lines
def F1(p, r):
""" Calculate F1 score from precision and recall.
Returns zero if one of p, r is zero.
"""
return (2*p*r/(p+r)) if p != 0 and r != 0 else 0
def read_tables_from_file(csvfile):
""" Opens csvfile, returns all tables found.
Guesses csv format (delimiter, etc.)
Splits data into different tables at newline (or empty row).
Returns list of tables.
"""
tables=[]
table_id = 0
with file(csvfile) as f:
sniffer = csv.Sniffer()
dialect = sniffer.sniff(f.next())
rd = csv.reader(f, delimiter=dialect.delimiter,
quotechar=dialect.quotechar)
for r in rd:
if len(tables) <= table_id:
tables.append([])
# Begin next table if there is an empty line
if r == [] or sum([len(v) for v in r]) == 0:
if len(tables[table_id])>0:
table_id+=1
else:
tables[table_id].append(r)
return [clean_table(t) for t in tables if t!=[]]
if __name__ == '__main__':
""" Script usage.
"""
fields = [
#'rows_a', 'rows_b',
#'size_a', 'size_b',
'n',
'rows_match', 'cols_match', 'dim_match',
'p', 'r', 'F1',]
limitchar = ' & '
if len(sys.argv) < 3:
print "Specify two (csv-)files or directories"
quit(-1)
# Params 1 + 2 are files or directories
file1 = sys.argv[1]
file2 = sys.argv[2]
srcinfo = [os.path.basename(file1), os.path.basename(file2)]
# 3rd parameter becomes 'tooldef' (text cols to name rows),
# and 4th parameter tells whether to print headers
tooldef = sys.argv[3].split('-') if len(sys.argv) > 3 else ['na', 'na']
print_headers = len(sys.argv) > 4 and sys.argv[4] in ["1", "y", "yes"]
if print_headers:
print ','.join(['name', 'tool', 'src1', 'src2',
'filename', 'tabsmatch',] + fields)
if os.path.isfile(file1) and os.path.isfile(file2):
inf = compare_files(file1, file2)
lines = output_compareinfo_csv(file1, inf, fields)
for l in lines:
print ','.join(tooldef + srcinfo + l)
elif os.path.isdir(file1) and os.path.isdir(file2):
for f in [path for path in os.listdir(file1) if path[-4:]=='.csv']:
if os.path.isfile(file2 + '/' + f):
inf = compare_files(file1 + '/' + f, file2 + '/' + f)
lines = output_compareinfo_csv(f, inf, fields)
for l in lines:
print ','.join(tooldef + srcinfo + l)
else:
print ','.join(['','',] + srcinfo + ['', "Missing {} for {} {}".format(f, *tooldef)]) | apache-2.0 | -6,863,849,236,616,673,000 | 8,213,845,252,431,914,000 | 29.044776 | 101 | 0.527264 | false |
michael-dev2rights/ansible | lib/ansible/modules/database/mssql/mssql_db.py | 29 | 7066 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Vedit Firat Arig <firatarig@gmail.com>
# Outline and parts are reused from Mark Theunissen's mysql_db module
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: mssql_db
short_description: Add or remove MSSQL databases from a remote host.
description:
- Add or remove MSSQL databases from a remote host.
version_added: "2.2"
options:
name:
description:
- name of the database to add or remove
required: true
default: null
aliases: [ db ]
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
login_port:
description:
- Port of the MSSQL server. Requires login_host be defined as other then localhost if login_port is used
required: false
default: 1433
state:
description:
- The database state
required: false
default: present
choices: [ "present", "absent", "import" ]
target:
description:
- Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
files (C(.sql)) files are supported.
required: false
autocommit:
description:
- Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed
within a transaction.
required: false
default: false
choices: [ "false", "true" ]
notes:
- Requires the pymssql Python package on the remote host. For Ubuntu, this
is as easy as pip install pymssql (See M(pip).)
requirements:
- python >= 2.7
- pymssql
author: Vedit Firat Arig
'''
EXAMPLES = '''
# Create a new database with name 'jackdata'
- mssql_db:
name: jackdata
state: present
# Copy database dump file to remote host and restore it to database 'my_db'
- copy:
src: dump.sql
dest: /tmp
- mssql_db:
name: my_db
state: import
target: /tmp/dump.sql
'''
RETURN = '''
#
'''
import os
try:
import pymssql
except ImportError:
mssql_found = False
else:
mssql_found = True
from ansible.module_utils.basic import AnsibleModule
def db_exists(conn, cursor, db):
cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db)
conn.commit()
return bool(cursor.rowcount)
def db_create(conn, cursor, db):
cursor.execute("CREATE DATABASE [%s]" % db)
return db_exists(conn, cursor, db)
def db_delete(conn, cursor, db):
try:
cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db)
except:
pass
cursor.execute("DROP DATABASE [%s]" % db)
return not db_exists(conn, cursor, db)
def db_import(conn, cursor, module, db, target):
if os.path.isfile(target):
backup = open(target, 'r')
try:
sqlQuery = "USE [%s]\n" % db
for line in backup:
if line is None:
break
elif line.startswith('GO'):
cursor.execute(sqlQuery)
sqlQuery = "USE [%s]\n" % db
else:
sqlQuery += line
cursor.execute(sqlQuery)
conn.commit()
finally:
backup.close()
return 0, "import successful", ""
else:
return 1, "cannot find target file", "cannot find target file"
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['db']),
login_user=dict(default=''),
login_password=dict(default='', no_log=True),
login_host=dict(required=True),
login_port=dict(default='1433'),
target=dict(default=None),
autocommit=dict(type='bool', default=False),
state=dict(
default='present', choices=['present', 'absent', 'import'])
)
)
if not mssql_found:
module.fail_json(msg="pymssql python module is required")
db = module.params['name']
state = module.params['state']
autocommit = module.params['autocommit']
target = module.params["target"]
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_querystring = login_host
if login_port != "1433":
login_querystring = "%s:%s" % (login_host, login_port)
if login_user != "" and login_password == "":
module.fail_json(msg="when supplying login_user arguments login_password must be provided")
try:
conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master')
cursor = conn.cursor()
except Exception as e:
if "Unknown database" in str(e):
errno, errstr = e.args
module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
else:
module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your "
"@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
conn.autocommit(True)
changed = False
if db_exists(conn, cursor, db):
if state == "absent":
try:
changed = db_delete(conn, cursor, db)
except Exception as e:
module.fail_json(msg="error deleting database: " + str(e))
elif state == "import":
conn.autocommit(autocommit)
rc, stdout, stderr = db_import(conn, cursor, module, db, target)
if rc != 0:
module.fail_json(msg="%s" % stderr)
else:
module.exit_json(changed=True, db=db, msg=stdout)
else:
if state == "present":
try:
changed = db_create(conn, cursor, db)
except Exception as e:
module.fail_json(msg="error creating database: " + str(e))
elif state == "import":
try:
changed = db_create(conn, cursor, db)
except Exception as e:
module.fail_json(msg="error creating database: " + str(e))
conn.autocommit(autocommit)
rc, stdout, stderr = db_import(conn, cursor, module, db, target)
if rc != 0:
module.fail_json(msg="%s" % stderr)
else:
module.exit_json(changed=True, db=db, msg=stdout)
module.exit_json(changed=changed, db=db)
if __name__ == '__main__':
main()
| gpl-3.0 | 7,994,485,670,958,094,000 | -3,744,632,118,124,510,000 | 28.689076 | 153 | 0.595528 | false |
jshiv/turntable | test/lib/python2.7/site-packages/scipy/lib/lapack/tests/test_gesv.py | 13 | 3510 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import TestCase, assert_array_almost_equal, dec, \
assert_equal, assert_, run_module_suite
from common import FUNCS_TP, FLAPACK_IS_EMPTY, CLAPACK_IS_EMPTY, FUNCS_FLAPACK, \
FUNCS_CLAPACK, PREC
A = np.array([[1,2,3],[2,2,3],[3,3,6]])
B = np.array([[10,-1,1],[-1,8,-2],[1,-2,6]])
class TestSygv(TestCase):
def _test_base(self, func, lang, itype):
tp = FUNCS_TP[func]
a = A.astype(tp)
b = B.astype(tp)
if lang == 'C':
f = FUNCS_CLAPACK[func]
elif lang == 'F':
f = FUNCS_FLAPACK[func]
else:
raise ValueError("Lang %s ??" % lang)
w, v, info = f(a, b, itype=itype)
assert_(not info, msg=repr(info))
for i in range(3):
if itype == 1:
assert_array_almost_equal(np.dot(a,v[:,i]), w[i]*np.dot(b,v[:,i]),
decimal=PREC[tp])
elif itype == 2:
assert_array_almost_equal(np.dot(a,np.dot(b,v[:,i])), w[i]*v[:,i],
decimal=PREC[tp])
elif itype == 3:
assert_array_almost_equal(np.dot(b,np.dot(a,v[:,i])),
w[i]*v[:,i], decimal=PREC[tp] - 1)
else:
raise ValueError(itype)
@dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test")
def test_ssygv_1(self):
self._test_base('ssygv', 'F', 1)
@dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test")
def test_ssygv_2(self):
self._test_base('ssygv', 'F', 2)
@dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test")
def test_ssygv_3(self):
self._test_base('ssygv', 'F', 3)
@dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test")
def test_dsygv_1(self):
self._test_base('dsygv', 'F', 1)
@dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test")
def test_dsygv_2(self):
self._test_base('dsygv', 'F', 2)
@dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test")
def test_dsygv_3(self):
self._test_base('dsygv', 'F', 3)
@dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssygv"],
"Clapack empty, skip flapack test")
def test_clapack_ssygv_1(self):
self._test_base('ssygv', 'C', 1)
@dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssygv"],
"Clapack empty, skip flapack test")
def test_clapack_ssygv_2(self):
self._test_base('ssygv', 'C', 2)
@dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssygv"],
"Clapack empty, skip flapack test")
def test_clapack_ssygv_3(self):
self._test_base('ssygv', 'C', 3)
@dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsygv"],
"Clapack empty, skip flapack test")
def test_clapack_dsygv_1(self):
self._test_base('dsygv', 'C', 1)
@dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsygv"],
"Clapack empty, skip flapack test")
def test_clapack_dsygv_2(self):
self._test_base('dsygv', 'C', 2)
@dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsygv"],
"Clapack empty, skip flapack test")
def test_clapack_dsygv_3(self):
self._test_base('dsygv', 'C', 3)
if __name__ == "__main__":
run_module_suite()
| mit | 7,048,757,427,930,014,000 | 6,959,063,868,737,633,000 | 35.185567 | 82 | 0.547863 | false |
Garrett-R/scikit-learn | sklearn/datasets/samples_generator.py | 14 | 54612 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import warnings
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=False,
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
lb = MultiLabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
else:
warnings.warn('Support for the sequence of sequences multilabel '
'representation is being deprecated and replaced with '
'a sparse indicator matrix. '
'return_indicator will default to True from version '
'0.17.',
DeprecationWarning)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Parameters
----------
dim: integer, optional (default=1)
The size of the random (matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
prec: array of shape = [dim, dim]
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
d = np.diag(prec)
d = 1. / np.sqrt(d)
prec *= d
prec *= d[:, np.newaxis]
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perpsective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause | -6,279,763,379,905,299,000 | -2,861,419,228,821,455,000 | 33.412098 | 79 | 0.614828 | false |
plotly/python-api | packages/python/plotly/plotly/graph_objs/scatterpolargl/hoverlabel/_font.py | 2 | 11245 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolargl.hoverlabel"
_path_str = "scatterpolargl.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterpolargl
.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolargl.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | 2,543,047,827,447,217,700 | -6,567,917,946,596,954,000 | 33.075758 | 82 | 0.552334 | false |
syci/ingadhoc-odoo-addons | product_price_currency/product.py | 2 | 2492 | # -*- coding: utf-8 -*-
from openerp import fields, models, api
import openerp.addons.decimal_precision as dp
class product_template(models.Model):
_inherit = 'product.template'
@api.model
def get_currency_id(self):
price_type_obj = self.env['product.price.type']
price_type_ids = price_type_obj.search([('field', '=', 'list_price')])
if not price_type_ids.currency_id:
return self.env.user.company_id.currency_id
return price_type_ids.currency_id
sale_price_currency_id = fields.Many2one(
'res.currency', 'Sale Price Currency',
required=True, default=get_currency_id,
help="Currency used for the Currency List Price."
)
cia_currency_list_price = fields.Float(
'Company Currency Sale Price',
digits=dp.get_precision('Product Price'),
compute='get_cia_currency_list_price',
help="Base price on company currency at actual exchange rate",
)
@api.multi
@api.depends('list_price', 'sale_price_currency_id')
def get_cia_currency_list_price(self):
company_currency = self.env.user.company_id.currency_id
for product in self:
if product.sale_price_currency_id != company_currency:
cia_currency_list_price = product.sale_price_currency_id.compute(
product.list_price, company_currency)
else:
cia_currency_list_price = product.list_price
product.cia_currency_list_price = cia_currency_list_price
def _price_get(self, cr, uid, products, ptype='list_price', context=None):
if not context:
context = {}
res = super(product_template, self)._price_get(
cr, uid, products, ptype=ptype, context=context)
if ptype == 'list_price':
pricetype_obj = self.pool.get('product.price.type')
price_type_id = pricetype_obj.search(
cr, uid, [('field', '=', ptype)])[0]
price_type_currency_id = pricetype_obj.browse(
cr, uid, price_type_id).currency_id.id
for product in products:
if product.sale_price_currency_id.id != price_type_currency_id:
res[product.id] = self.pool.get('res.currency').compute(
cr, uid, product.sale_price_currency_id.id,
price_type_currency_id, res[product.id],
context=context)
return res
| agpl-3.0 | 4,227,982,220,788,372,500 | 1,021,912,609,808,678,100 | 41.237288 | 81 | 0.597111 | false |
lmprice/ansible | lib/ansible/plugins/lookup/k8s.py | 2 | 11120 | #
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: k8s
version_added: "2.5"
short_description: Query the K8s API
description:
- Uses the OpenShift Python client to fetch a specific object by name, all matching objects within a
namespace, or all matching objects for all namespaces, as well as information about the cluster.
- Provides access the full range of K8s APIs.
- Enables authentication via config file, certificates, password or token.
options:
cluster_info:
description:
- Use to specify the type of cluster information you are attempting to retrieve. Will take priority
over all the other options.
api_version:
description:
- Use to specify the API version. If I(resource definition) is provided, the I(apiVersion) from the
I(resource_definition) will override this option.
default: v1
kind:
description:
- Use to specify an object model. If I(resource definition) is provided, the I(kind) from a
I(resource_definition) will override this option.
required: true
resource_name:
description:
- Fetch a specific object by name. If I(resource definition) is provided, the I(metadata.name) value
from the I(resource_definition) will override this option.
namespace:
description:
- Limit the objects returned to a specific namespace. If I(resource definition) is provided, the
I(metadata.namespace) value from the I(resource_definition) will override this option.
label_selector:
description:
- Additional labels to include in the query. Ignored when I(resource_name) is provided.
field_selector:
description:
- Specific fields on which to query. Ignored when I(resource_name) is provided.
resource_definition:
description:
- "Provide a YAML configuration for an object. NOTE: I(kind), I(api_version), I(resource_name),
and I(namespace) will be overwritten by corresponding values found in the provided I(resource_definition)."
src:
description:
- "Provide a path to a file containing a valid YAML definition of an object dated. Mutually
exclusive with I(resource_definition). NOTE: I(kind), I(api_version), I(resource_name), and I(namespace)
will be overwritten by corresponding values found in the configuration read in from the I(src) file."
- Reads from the local file system. To read from the Ansible controller's file system, use the file lookup
plugin or template lookup plugin, combined with the from_yaml filter, and pass the result to
I(resource_definition). See Examples below.
host:
description:
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
api_key:
description:
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment variable.
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG environment
variable.
context:
description:
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
variable.
username:
description:
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME environment
variable.
password:
description:
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD environment
variable.
cert_file:
description:
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
environment
variable.
key_file:
description:
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST environment
variable.
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API. Can also be specified via K8S_AUTH_SSL_CA_CERT
environment variable.
verify_ssl:
description:
- Whether or not to verify the API server's SSL certificates. Can also be specified via K8S_AUTH_VERIFY_SSL
environment variable.
type: bool
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
notes:
- "The OpenShift Python client wraps the K8s Python client, providing full access to
all of the APIS and models available on both platforms. For API version details and
additional information visit https://github.com/openshift/openshift-restclient-python"
"""
EXAMPLES = """
- name: Fetch a list of namespaces
set_fact:
projects: "{{ lookup('k8s', api_version='v1', kind='Namespace') }}"
- name: Fetch all deployments
set_fact:
deployments: "{{ lookup('k8s', kind='Deployment', namespace='testing') }}"
- name: Fetch all deployments in a namespace
set_fact:
deployments: "{{ lookup('k8s', kind='Deployment', namespace='testing') }}"
- name: Fetch a specific deployment by name
set_fact:
deployments: "{{ lookup('k8s', kind='Deployment', namespace='testing', resource_name='elastic') }}"
- name: Fetch with label selector
set_fact:
service: "{{ lookup('k8s', kind='Service', label_selector='app=galaxy') }}"
# Use parameters from a YAML config
- name: Load config from the Ansible controller filesystem
set_fact:
config: "{{ lookup('file', 'service.yml') | from_yaml }}"
- name: Using the config (loaded from a file in prior task), fetch the latest version of the object
set_fact:
service: "{{ lookup('k8s', resource_definition=config) }}"
- name: Use a config from the local filesystem
set_fact:
service: "{{ lookup('k8s', src='service.yml') }}"
"""
RETURN = """
_list:
description:
- One ore more object definitions returned from the API.
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
"""
from ansible.plugins.lookup import LookupBase
import os
from ansible.module_utils.six import iteritems
from ansible.module_utils.k8s.common import K8sAnsibleMixin
try:
from openshift.dynamic import DynamicClient
from openshift.dynamic.exceptions import NotFoundError
HAS_K8S_MODULE_HELPER = True
except ImportError as exc:
HAS_K8S_MODULE_HELPER = False
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
class KubernetesLookup(K8sAnsibleMixin):
def __init__(self):
if not HAS_K8S_MODULE_HELPER:
raise Exception(
"Requires the OpenShift Python client. Try `pip install openshift`"
)
if not HAS_YAML:
raise Exception(
"Requires PyYAML. Try `pip install PyYAML`"
)
self.kind = None
self.name = None
self.namespace = None
self.api_version = None
self.label_selector = None
self.field_selector = None
self.include_uninitialized = None
self.resource_definition = None
self.helper = None
self.connection = {}
def run(self, terms, variables=None, **kwargs):
self.params = kwargs
self.client = self.get_api_client()
cluster_info = kwargs.get('cluster_info')
if cluster_info == 'version':
return [self.client.version]
if cluster_info == 'api_groups':
return [self.client.resources.api_groups]
self.kind = kwargs.get('kind')
self.name = kwargs.get('resource_name')
self.namespace = kwargs.get('namespace')
self.api_version = kwargs.get('api_version', 'v1')
self.label_selector = kwargs.get('label_selector')
self.field_selector = kwargs.get('field_selector')
self.include_uninitialized = kwargs.get('include_uninitialized', False)
resource_definition = kwargs.get('resource_definition')
src = kwargs.get('src')
if src:
resource_definition = self.load_resource_definitions(src)[0]
if resource_definition:
self.kind = resource_definition.get('kind', self.kind)
self.api_version = resource_definition.get('apiVersion', self.api_version)
self.name = resource_definition.get('metadata', {}).get('name', self.name)
self.namespace = resource_definition.get('metadata', {}).get('namespace', self.namespace)
if not self.kind:
raise Exception(
"Error: no Kind specified. Use the 'kind' parameter, or provide an object YAML configuration "
"using the 'resource_definition' parameter."
)
resource = self.client.resources.get(kind=self.kind, api_version=self.api_version)
try:
k8s_obj = resource.get(name=self.name, namespace=self.namespace, label_selector=self.label_selector, field_selector=self.field_selector)
except NotFoundError:
return []
if self.name:
return [k8s_obj.to_dict()]
return k8s_obj.to_dict().get('items')
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
return KubernetesLookup().run(terms, variables=variables, **kwargs)
| gpl-3.0 | 5,002,527,723,172,759,000 | -1,353,177,564,722,572,300 | 37.344828 | 148 | 0.660612 | false |
jalilag/apspir | objdictgen/gnosis/xml/pickle/ext/_mutators.py | 3 | 7670 | from _mutate import XMLP_Mutator, XMLP_Mutated
import _mutate
import sys, string
from types import *
from gnosis.util.introspect import isInstanceLike, attr_update, \
data2attr, attr2data, getCoreData, setCoreData, isinstance_any
from gnosis.xml.pickle.util import _klass, _module, obj_from_name
from gnosis.util.XtoY import aton
import gnosis.pyconfig
class _EmptyClass: pass
class mutate_builtin_wrapper(XMLP_Mutator):
def __init__(self):
XMLP_Mutator.__init__(self,None,'builtin_wrapper')
def mutate(self,obj):
wrap = _EmptyClass()
wrap.__toplevel__ = obj
return XMLP_Mutated(wrap)
def unmutate(self,mobj):
return mobj.obj.__toplevel__
_mutate.add_mutator(mutate_builtin_wrapper())
# We pickle array.array() as type "array" and Numeric.array as
# type "Numpy_array" (this is really what earlier xml_pickles did,
# except you had to use EITHER array.array() or Numeric.array() -
# you couldn't mix them (in fact, you couldn't pickle array.array()
# types if Numeric was installed).
import array
#-- array.array --
class mutate_array(XMLP_Mutator):
def __init__(self):
XMLP_Mutator.__init__(self,array.ArrayType,'array',0)
def mutate(self,obj):
list = []
for item in obj:
list.append(item)
return XMLP_Mutated(list)
def unmutate(self,mobj):
obj = mobj.obj
as_int = 1
for item in obj:
if type(item) == type(1.0):
as_int = 0
if as_int:
return array.array('b',obj)
else:
return array.array('d',obj) # double precision
_mutate.add_mutator(mutate_array())
#-- Numeric.array --
try:
import Numeric
HAS_NUMERIC = 1
except:
HAS_NUMERIC = 0
class mutate_numpy(XMLP_Mutator):
def __init__(self):
# note, Numeric.ArrayType != array.ArrayType, which is good :-)
XMLP_Mutator.__init__(self,Numeric.ArrayType,'NumPy_array',0)
def mutate(self,obj):
list = []
for item in obj:
list.append(item)
return XMLP_Mutated(list)
def unmutate(self,mobj):
return Numeric.array(mobj.obj)
if HAS_NUMERIC:
_mutate.add_mutator(mutate_numpy())
#-- SREs --
# save the RE pattern in the element body
import re
SRE_Pattern_type = type(re.compile(''))
class mutate_sre(XMLP_Mutator):
def __init__(self):
XMLP_Mutator.__init__(self,SRE_Pattern_type,'SRE',paranoia=0,
in_body=1)
def mutate(self,obj):
return XMLP_Mutated(obj.pattern)
def unmutate(self,mobj):
return re.compile(mobj.obj)
_mutate.add_mutator(mutate_sre())
#-- rawpickles --
# save the pickle in the element body
try: import cPickle as pickle
except: import pickle
class mutate_rawpickle(XMLP_Mutator):
def __init__(self):
XMLP_Mutator.__init__(self,None,'rawpickle',0)
def mutate(self,obj): return XMLP_Mutated(pickle.dumps(obj))
def unmutate(self,mobj): return pickle.loads(str(mobj.obj))
_mutate.add_mutator(mutate_rawpickle())
#-- mx.DateTime --
# see test_mutators.py for an alternate way to pickle these
try:
import mx.DateTime
mxDateTime_type = type(mx.DateTime.localtime())
except:
mxDateTime_type = None
class mutate_mxdatetime(XMLP_Mutator):
def __init__(self):
XMLP_Mutator.__init__(self,mxDateTime_type,'mxDateTime',
paranoia=0,in_body=1)
def mutate(self,obj):
# (I avoided using strftime(), for portability reasons.)
# Pickle seconds as a float to save full precision.
s = "YMD = %d/%d/%d, HMS = %d:%d:%.17g" % \
(obj.year,obj.month,obj.day,\
obj.hour,obj.minute,obj.second)
return XMLP_Mutated(s)
def unmutate(self,mobj):
obj = mobj.obj
# is this forgiving enough? :-)
fmt = 'YMD\s*=\s*([0-9]+)\s*/\s*([0-9]+)\s*/\s*([0-9]+)\s*,\s*'
fmt += 'HMS\s*=\s*([0-9]+)\s*:\s*([0-9]+)\s*:\s*([0-9\.]+)'
m = re.match(fmt,obj)
# this started giving a deprecation warning about passing a
# float where an int was expected
#return apply(mx.DateTime.DateTime,map(float,m.groups()))
args = map(int,m.groups()[:5]) + [float(m.group(6))]
return apply(mx.DateTime.DateTime,args)
if mxDateTime_type is not None:
_mutate.add_mutator(mutate_mxdatetime())
#-- mutator + support functions for handling objects subclassed
#-- from builtin types (Python >= 2.2)
def newdata_to_olddata(o):
"""Given o, an object subclassed from a builtin type with no attributes,
return a tuple containing the raw data and a string containing
a tag to save in the extra= field"""
return (getCoreData(o),"%s %s"%(_module(o),_klass(o)))
def olddata_to_newdata(data,extra,paranoia):
"""Given raw data, the extra= tag, and paranoia setting,
recreate the object that was passed to newdata_to_olddata."""
(module,klass) = extra.split()
o = obj_from_name(klass,module,paranoia)
#if isinstance(o,ComplexType) and \
# type(data) in [StringType,UnicodeType]:
# # yuck ... have to strip () from complex data before
# # passing to __init__ (ran into this also in one of the
# # parsers ... maybe the () shouldn't be in the XML at all?)
# if data[0] == '(' and data[-1] == ')':
# data = data[1:-1]
if isinstance_any(o,(IntType,FloatType,ComplexType,LongType)) and \
type(data) in [StringType,UnicodeType]:
data = aton(data)
o = setCoreData(o,data)
return o
# my semantic preferences, of the moment :-)
newinst_to_oldinst = data2attr
oldinst_to_newinst = attr2data
def hasPickleFuncs(obj):
"Does obj define the special pickling functions?"
return (hasattr(obj,'__getstate__') or \
hasattr(obj,'__setstate__') or \
hasattr(obj,'__getinitargs__'))
class mutate_bltin_instances(XMLP_Mutator):
def __init__(self):
XMLP_Mutator.__init__(self,None,'__compound__',0)
def mutate(self,obj):
if isinstance(obj,UnicodeType):
# unicode strings are required to be placed in the body
# (by our encoding scheme)
self.in_body = 1
else:
# XXX really should check getInBody(), but we'd have
# to do isinstance() for each type ... maybe do later
self.in_body = 0
if isInstanceLike(obj) or hasPickleFuncs(obj):
# obj has data items (list,dict,tuple) *AND* attributes.
# mutate to an oldstyle object, turning the data items into
# a special attribute (eg. __items__, __entries__).
#
# also, if obj defines the special pickling functions, we treat
# it as an instance so we don't have to duplicate all the
# protocol logic here.
return XMLP_Mutated(newinst_to_oldinst(obj))
else:
# obj has only data items (list,dict,tuple,etc.)
# convert to the raw datatype and remember the
# module.class of obj for unpickling.
(o,t) = newdata_to_olddata(obj)
return XMLP_Mutated(o,t)
def unmutate(self,mobj):
obj = mobj.obj
if not mobj.extra:
# converting obj with __coredata__ + attrs
return oldinst_to_newinst(obj)
else:
# converting obj with __coredata__ but no attrs
return olddata_to_newdata(obj,mobj.extra,self.paranoia)
# add mutator for instances of builtin classes (int, dict, object, etc.)
if gnosis.pyconfig.Have_ObjectClass():
_mutate.add_mutator(mutate_bltin_instances())
| lgpl-2.1 | 2,041,296,515,769,213,700 | 8,699,253,710,631,373,000 | 30.052632 | 76 | 0.614993 | false |
xq262144/hue | desktop/core/ext-py/Django-1.6.10/tests/validators/tests.py | 38 | 9616 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import types
from datetime import datetime, timedelta
from django.core.exceptions import ValidationError
from django.core.validators import *
from django.test.utils import str_prefix
from django.utils.unittest import TestCase
NOW = datetime.now()
TEST_DATA = (
# (validator, value, expected),
(validate_integer, '42', None),
(validate_integer, '-42', None),
(validate_integer, -42, None),
(validate_integer, -42.5, None),
(validate_integer, None, ValidationError),
(validate_integer, 'a', ValidationError),
(validate_email, 'email@here.com', None),
(validate_email, 'weirder-email@here.and.there.com', None),
(validate_email, 'email@[127.0.0.1]', None),
(validate_email, 'example@valid-----hyphens.com', None),
(validate_email, 'example@valid-with-hyphens.com', None),
(validate_email, 'test@domain.with.idn.tld.उदाहरण.परीक्षा', None),
(validate_email, 'email@localhost', None),
(EmailValidator(whitelist=['localdomain']), 'email@localdomain', None),
(validate_email, '"test@test"@example.com', None),
(validate_email, None, ValidationError),
(validate_email, '', ValidationError),
(validate_email, 'abc', ValidationError),
(validate_email, 'abc@', ValidationError),
(validate_email, 'abc@bar', ValidationError),
(validate_email, 'a @x.cz', ValidationError),
(validate_email, 'abc@.com', ValidationError),
(validate_email, 'something@@somewhere.com', ValidationError),
(validate_email, 'email@127.0.0.1', ValidationError),
(validate_email, 'example@invalid-.com', ValidationError),
(validate_email, 'example@-invalid.com', ValidationError),
(validate_email, 'example@invalid.com-', ValidationError),
(validate_email, 'example@inv-.alid-.com', ValidationError),
(validate_email, 'example@inv-.-alid.com', ValidationError),
(validate_email, 'test@example.com\n\n<script src="x.js">', ValidationError),
# Quoted-string format (CR not allowed)
(validate_email, '"\\\011"@here.com', None),
(validate_email, '"\\\012"@here.com', ValidationError),
(validate_slug, 'slug-ok', None),
(validate_slug, 'longer-slug-still-ok', None),
(validate_slug, '--------', None),
(validate_slug, 'nohyphensoranything', None),
(validate_slug, '', ValidationError),
(validate_slug, ' text ', ValidationError),
(validate_slug, ' ', ValidationError),
(validate_slug, 'some@mail.com', ValidationError),
(validate_slug, '你好', ValidationError),
(validate_slug, '\n', ValidationError),
(validate_ipv4_address, '1.1.1.1', None),
(validate_ipv4_address, '255.0.0.0', None),
(validate_ipv4_address, '0.0.0.0', None),
(validate_ipv4_address, '256.1.1.1', ValidationError),
(validate_ipv4_address, '25.1.1.', ValidationError),
(validate_ipv4_address, '25,1,1,1', ValidationError),
(validate_ipv4_address, '25.1 .1.1', ValidationError),
# validate_ipv6_address uses django.utils.ipv6, which
# is tested in much greater detail in it's own testcase
(validate_ipv6_address, 'fe80::1', None),
(validate_ipv6_address, '::1', None),
(validate_ipv6_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv6_address, '1:2', ValidationError),
(validate_ipv6_address, '::zzz', ValidationError),
(validate_ipv6_address, '12345::', ValidationError),
(validate_ipv46_address, '1.1.1.1', None),
(validate_ipv46_address, '255.0.0.0', None),
(validate_ipv46_address, '0.0.0.0', None),
(validate_ipv46_address, 'fe80::1', None),
(validate_ipv46_address, '::1', None),
(validate_ipv46_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv46_address, '256.1.1.1', ValidationError),
(validate_ipv46_address, '25.1.1.', ValidationError),
(validate_ipv46_address, '25,1,1,1', ValidationError),
(validate_ipv46_address, '25.1 .1.1', ValidationError),
(validate_ipv46_address, '1:2', ValidationError),
(validate_ipv46_address, '::zzz', ValidationError),
(validate_ipv46_address, '12345::', ValidationError),
(validate_comma_separated_integer_list, '1', None),
(validate_comma_separated_integer_list, '1,2,3', None),
(validate_comma_separated_integer_list, '1,2,3,', None),
(validate_comma_separated_integer_list, '', ValidationError),
(validate_comma_separated_integer_list, 'a,b,c', ValidationError),
(validate_comma_separated_integer_list, '1, 2, 3', ValidationError),
(MaxValueValidator(10), 10, None),
(MaxValueValidator(10), -10, None),
(MaxValueValidator(10), 0, None),
(MaxValueValidator(NOW), NOW, None),
(MaxValueValidator(NOW), NOW - timedelta(days=1), None),
(MaxValueValidator(0), 1, ValidationError),
(MaxValueValidator(NOW), NOW + timedelta(days=1), ValidationError),
(MinValueValidator(-10), -10, None),
(MinValueValidator(-10), 10, None),
(MinValueValidator(-10), 0, None),
(MinValueValidator(NOW), NOW, None),
(MinValueValidator(NOW), NOW + timedelta(days=1), None),
(MinValueValidator(0), -1, ValidationError),
(MinValueValidator(NOW), NOW - timedelta(days=1), ValidationError),
(MaxLengthValidator(10), '', None),
(MaxLengthValidator(10), 10*'x', None),
(MaxLengthValidator(10), 15*'x', ValidationError),
(MinLengthValidator(10), 15*'x', None),
(MinLengthValidator(10), 10*'x', None),
(MinLengthValidator(10), '', ValidationError),
(URLValidator(), 'http://www.djangoproject.com/', None),
(URLValidator(), 'http://localhost/', None),
(URLValidator(), 'http://example.com/', None),
(URLValidator(), 'http://www.example.com/', None),
(URLValidator(), 'http://www.example.com:8000/test', None),
(URLValidator(), 'http://valid-with-hyphens.com/', None),
(URLValidator(), 'http://subdomain.example.com/', None),
(URLValidator(), 'http://200.8.9.10/', None),
(URLValidator(), 'http://200.8.9.10:8000/test', None),
(URLValidator(), 'http://valid-----hyphens.com/', None),
(URLValidator(), 'http://example.com?something=value', None),
(URLValidator(), 'http://example.com/index.php?something=value&another=value2', None),
(URLValidator(), 'foo', ValidationError),
(URLValidator(), 'http://', ValidationError),
(URLValidator(), 'http://example', ValidationError),
(URLValidator(), 'http://example.', ValidationError),
(URLValidator(), 'http://.com', ValidationError),
(URLValidator(), 'http://invalid-.com', ValidationError),
(URLValidator(), 'http://-invalid.com', ValidationError),
(URLValidator(), 'http://invalid.com-', ValidationError),
(URLValidator(), 'http://inv-.alid-.com', ValidationError),
(URLValidator(), 'http://inv-.-alid.com', ValidationError),
(BaseValidator(True), True, None),
(BaseValidator(True), False, ValidationError),
(RegexValidator(), '', None),
(RegexValidator(), 'x1x2', None),
(RegexValidator('[0-9]+'), 'xxxxxx', ValidationError),
(RegexValidator('[0-9]+'), '1234', None),
(RegexValidator(re.compile('[0-9]+')), '1234', None),
(RegexValidator('.*'), '', None),
(RegexValidator(re.compile('.*')), '', None),
(RegexValidator('.*'), 'xxxxx', None),
(RegexValidator('x'), 'y', ValidationError),
(RegexValidator(re.compile('x')), 'y', ValidationError),
)
def create_simple_test_method(validator, expected, value, num):
if expected is not None and issubclass(expected, Exception):
test_mask = 'test_%s_raises_error_%d'
def test_func(self):
# assertRaises not used, so as to be able to produce an error message
# containing the tested value
try:
validator(value)
except expected:
pass
else:
self.fail("%s not raised when validating '%s'" % (
expected.__name__, value))
else:
test_mask = 'test_%s_%d'
def test_func(self):
try:
self.assertEqual(expected, validator(value))
except ValidationError as e:
self.fail("Validation of '%s' failed. Error message was: %s" % (
value, str(e)))
if isinstance(validator, types.FunctionType):
val_name = validator.__name__
else:
val_name = validator.__class__.__name__
test_name = test_mask % (val_name, num)
return test_name, test_func
# Dynamically assemble a test class with the contents of TEST_DATA
class TestSimpleValidators(TestCase):
def test_single_message(self):
v = ValidationError('Not Valid')
self.assertEqual(str(v), str_prefix("[%(_)s'Not Valid']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'Not Valid'])"))
def test_message_list(self):
v = ValidationError(['First Problem', 'Second Problem'])
self.assertEqual(str(v), str_prefix("[%(_)s'First Problem', %(_)s'Second Problem']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'First Problem', %(_)s'Second Problem'])"))
def test_message_dict(self):
v = ValidationError({'first': ['First Problem']})
self.assertEqual(str(v), str_prefix("{%(_)s'first': [%(_)s'First Problem']}"))
self.assertEqual(repr(v), str_prefix("ValidationError({%(_)s'first': [%(_)s'First Problem']})"))
test_counter = 0
for validator, value, expected in TEST_DATA:
name, method = create_simple_test_method(validator, expected, value, test_counter)
setattr(TestSimpleValidators, name, method)
test_counter += 1
| apache-2.0 | -2,559,156,650,503,663,600 | 1,133,712,972,287,762,800 | 41.229075 | 111 | 0.63864 | false |
ychen820/microblog | y/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/core/updater/local_state.py | 2 | 26812 | # Copyright 2013 Google Inc. All Rights Reserved.
"""Manages the state of what is installed in the cloud SDK.
This tracks the installed modules along with the files they created. It also
provides functionality like extracting tar files into the installation and
tracking when we check for updates.
"""
import errno
import json
import logging
import os
import shutil
import sys
import time
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core.updater import installers
from googlecloudsdk.core.updater import snapshots
from googlecloudsdk.core.util import console_io
from googlecloudsdk.core.util import files as file_utils
class Error(exceptions.Error):
"""Base exception for the local_state module."""
pass
class InvalidSDKRootError(Error):
"""Error for when the root of the Cloud SDK is invalid or cannot be found."""
def __init__(self):
super(InvalidSDKRootError, self).__init__(
'The update action could not be performed because the installation root'
' of the Cloud SDK could not be located. Please re-install the Cloud '
'SDK and try again.')
class InvalidDownloadError(Error):
"""Exception for when the SDK that was download was invalid."""
def __init__(self):
super(InvalidDownloadError, self).__init__(
'The Cloud SDK download was invalid.')
class PermissionsError(Error):
"""Error for when a file operation cannot complete due to permissions."""
def __init__(self, message, path):
"""Initialize a PermissionsError.
Args:
message: str, The message from the underlying error.
path: str, The absolute path to a file or directory that needs to be
operated on, but can't because of insufficient permissions.
"""
super(PermissionsError, self).__init__(
'{message}: [{path}]\n\nEnsure you have the permissions to access the '
'file and that the file is not in use.'
.format(message=message, path=path))
def _RaisesPermissionsError(func):
"""Use this decorator for functions that deal with files.
If an exception indicating file permissions is raised, this decorator will
raise a PermissionsError instead, so that the caller only has to watch for
one type of exception.
Args:
func: The function to decorate.
Returns:
A decorator.
"""
def _TryFunc(*args, **kwargs):
try:
return func(*args, **kwargs)
except (OSError, IOError) as e:
if e.errno == errno.EACCES:
new_exc = PermissionsError(
message=e.strerror, path=os.path.abspath(e.filename))
# Maintain original stack trace.
raise new_exc, None, sys.exc_info()[2]
raise
except shutil.Error as e:
args = e.args[0][0]
# unfortunately shutil.Error *only* has formatted strings to inspect.
# Looking for this substring is looking for errno.EACCES, which has
# a numeric value of 13.
if args[2].startswith('[Errno 13]'):
new_exc = PermissionsError(
message=args[2], path=os.path.abspath(args[0]))
# Maintain original stack trace.
raise new_exc, None, sys.exc_info()[2]
raise
return _TryFunc
class InstallationState(object):
"""The main class for checking / updating local installation state."""
STATE_DIR_NAME = config.Paths.CLOUDSDK_STATE_DIR
BACKUP_DIR_NAME = '.backup'
TRASH_DIR_NAME = '.trash'
STAGING_ROOT_SUFFIX = '.staging'
COMPONENT_SNAPSHOT_FILE_SUFFIX = '.snapshot.json'
@staticmethod
def ForCurrent():
"""Gets the installation state for the SDK that this code is running in.
Returns:
InstallationState, The state for this area.
Raises:
InvalidSDKRootError: If this code is not running under a valid SDK.
"""
sdk_root = config.Paths().sdk_root
if not sdk_root:
raise InvalidSDKRootError()
return InstallationState(os.path.realpath(sdk_root))
@staticmethod
def VersionForInstalledComponent(component_id):
"""Gets the version string for the given installed component.
This function is to be used to get component versions for metrics reporting.
If it fails in any way or if the component_id is unknown, it will return
None. This prevents errors from surfacing when the version is needed
strictly for reporting purposes.
Args:
component_id: str, The component id of the component you want the version
for.
Returns:
str, The installed version of the component, or None if it is not
installed or if an error occurs.
"""
try:
state = InstallationState.ForCurrent()
# pylint: disable=protected-access, This is the same class.
return InstallationManifest(
state._state_directory, component_id).VersionString()
# pylint: disable=bare-except, We never want to fail because of metrics.
except:
logging.debug('Failed to get installed version for component [%s]: [%s]',
component_id, sys.exc_info())
return None
@_RaisesPermissionsError
def __init__(self, sdk_root):
"""Initializes the installation state for the given sdk install.
Args:
sdk_root: str, The file path of the root of the SDK installation.
Raises:
ValueError: If the given SDK root does not exist.
"""
if not os.path.isdir(sdk_root):
raise ValueError('The given Cloud SDK root does not exist: [{0}]'
.format(sdk_root))
self.__sdk_root = sdk_root
self._state_directory = os.path.join(sdk_root,
InstallationState.STATE_DIR_NAME)
self.__backup_directory = os.path.join(self._state_directory,
InstallationState.BACKUP_DIR_NAME)
self.__trash_directory = os.path.join(self._state_directory,
InstallationState.TRASH_DIR_NAME)
self.__sdk_staging_root = (os.path.normpath(self.__sdk_root) +
InstallationState.STAGING_ROOT_SUFFIX)
for d in [self._state_directory]:
if not os.path.isdir(d):
file_utils.MakeDir(d)
@property
def sdk_root(self):
"""Gets the root of the SDK that this state corresponds to.
Returns:
str, the path to the root directory.
"""
return self.__sdk_root
def _FilesForSuffix(self, suffix):
"""Returns the files in the state directory that have the given suffix.
Args:
suffix: str, The file suffix to match on.
Returns:
list of str, The file names that match.
"""
files = os.listdir(self._state_directory)
matching = [f for f in files
if os.path.isfile(os.path.join(self._state_directory, f))
and f.endswith(suffix)]
return matching
@_RaisesPermissionsError
def InstalledComponents(self):
"""Gets all the components that are currently installed.
Returns:
A dictionary of component id string to InstallationManifest.
"""
snapshot_files = self._FilesForSuffix(
InstallationState.COMPONENT_SNAPSHOT_FILE_SUFFIX)
manifests = {}
for f in snapshot_files:
component_id = f[:-len(InstallationState.COMPONENT_SNAPSHOT_FILE_SUFFIX)]
manifests[component_id] = InstallationManifest(self._state_directory,
component_id)
return manifests
@_RaisesPermissionsError
def Snapshot(self):
"""Generates a ComponentSnapshot from the currently installed components."""
return snapshots.ComponentSnapshot.FromInstallState(self)
def LastUpdateCheck(self):
"""Gets a LastUpdateCheck object to check update status."""
return LastUpdateCheck(self)
def DiffCurrentState(self, latest_snapshot, platform_filter=None):
"""Generates a ComponentSnapshotDiff from current state and the given state.
Args:
latest_snapshot: snapshots.ComponentSnapshot, The current state of the
world to diff against.
platform_filter: platforms.Platform, A platform that components must
match in order to be considered for any operations.
Returns:
A ComponentSnapshotDiff.
"""
return self.Snapshot().CreateDiff(latest_snapshot,
platform_filter=platform_filter)
@_RaisesPermissionsError
def CloneToStaging(self, progress_callback=None):
"""Clones this state to the temporary staging area.
This is used for making temporary copies of the entire Cloud SDK
installation when doing updates. The entire installation is cloned, but
doing so removes any backups and trash from this state before doing the
copy.
Args:
progress_callback: f(float), A function to call with the fraction of
completeness.
Returns:
An InstallationState object for the cloned install.
"""
(rm_staging_cb, rm_backup_cb, rm_trash_cb, copy_cb) = (
console_io.ProgressBar.SplitProgressBar(progress_callback,
[1, 1, 1, 7]))
self._ClearStaging(progress_callback=rm_staging_cb)
self.ClearBackup(progress_callback=rm_backup_cb)
self.ClearTrash(progress_callback=rm_trash_cb)
class Counter(object):
def __init__(self, progress_callback, total):
self.count = 0
self.progress_callback = progress_callback
self.total = float(total)
# This function must match the signature that shutil expects for the
# ignore function.
def Tick(self, *unused_args):
self.count += 1
self.progress_callback(self.count / self.total)
return []
if progress_callback:
# This takes a little time, so only do it if we are going to report
# progress.
dirs = set()
for _, manifest in self.InstalledComponents().iteritems():
dirs.update(manifest.InstalledDirectories())
# There is always the root directory itself and the .install directory.
# In general, there could be in the SDK (if people just put stuff in there
# but this is fine for an estimate. The progress bar will at worst stay
# at 100% for slightly longer.
total_dirs = len(dirs) + 2
ticker = Counter(copy_cb, total_dirs).Tick if total_dirs else None
else:
ticker = None
shutil.copytree(self.__sdk_root, self.__sdk_staging_root, symlinks=True,
ignore=ticker)
return InstallationState(self.__sdk_staging_root)
@_RaisesPermissionsError
def CreateStagingFromDownload(self, url, progress_callback=None):
"""Creates a new staging area from a fresh download of the Cloud SDK.
Args:
url: str, The url to download the new SDK from.
progress_callback: f(float), A function to call with the fraction of
completeness.
Returns:
An InstallationState object for the new install.
Raises:
installers.URLFetchError: If the new SDK could not be downloaded.
InvalidDownloadError: If the new SDK was malformed.
"""
self._ClearStaging()
with file_utils.TemporaryDirectory() as t:
download_dir = os.path.join(t, '.download')
extract_dir = os.path.join(t, '.extract')
installers.ComponentInstaller.DownloadAndExtractTar(
url, download_dir, extract_dir, progress_callback=progress_callback)
files = os.listdir(extract_dir)
if len(files) != 1:
raise InvalidDownloadError()
sdk_root = os.path.join(extract_dir, files[0])
file_utils.MoveDir(sdk_root, self.__sdk_staging_root)
staging_sdk = InstallationState(self.__sdk_staging_root)
self.CopyMachinePropertiesTo(staging_sdk)
return staging_sdk
@_RaisesPermissionsError
def ReplaceWith(self, other_install_state):
"""Replaces this installation with the given other installation.
This moves the current installation to the backup directory of the other
installation. Then, it moves the entire second installation to replace
this one on the file system. The result is that the other installation
completely replaces the current one, but the current one is snapshotted and
stored as a backup under the new one (and can be restored later).
Args:
other_install_state: InstallationState, The other state with which to
replace this one.
"""
self.ClearBackup()
self.ClearTrash()
other_install_state.ClearBackup()
# pylint: disable=protected-access, This is an instance of InstallationState
file_utils.MoveDir(self.__sdk_root, other_install_state.__backup_directory)
file_utils.MoveDir(other_install_state.__sdk_root, self.__sdk_root)
@_RaisesPermissionsError
def RestoreBackup(self):
"""Restore the backup from this install state if it exists.
If this installation has a backup stored in it (created by and update that
used ReplaceWith(), above), it replaces this installation with the backup,
using a temporary staging area. This installation is moved to the trash
directory under the installation that exists after this is done. The trash
directory can be removed at any point in the future. We just don't want to
delete code that is running since some platforms have a problem with that.
Returns:
bool, True if there was a backup to restore, False otherwise.
"""
if not self.HasBackup():
return False
self._ClearStaging()
file_utils.MoveDir(self.__backup_directory, self.__sdk_staging_root)
staging_state = InstallationState(self.__sdk_staging_root)
staging_state.ClearTrash()
# pylint: disable=protected-access, This is an instance of InstallationState
file_utils.MoveDir(self.__sdk_root, staging_state.__trash_directory)
file_utils.MoveDir(staging_state.__sdk_root, self.__sdk_root)
return True
def HasBackup(self):
"""Determines if this install has a valid backup that can be restored.
Returns:
bool, True if there is a backup, False otherwise.
"""
return os.path.isdir(self.__backup_directory)
def BackupDirectory(self):
"""Gets the backup directory of this installation if it exists.
Returns:
str, The path to the backup directory or None if it does not exist.
"""
if self.HasBackup():
return self.__backup_directory
return None
@_RaisesPermissionsError
def _ClearStaging(self, progress_callback=None):
"""Deletes the current staging directory if it exists.
Args:
progress_callback: f(float), A function to call with the fraction of
completeness.
"""
if os.path.exists(self.__sdk_staging_root):
file_utils.RmTree(self.__sdk_staging_root)
if progress_callback:
progress_callback(1)
@_RaisesPermissionsError
def ClearBackup(self, progress_callback=None):
"""Deletes the current backup if it exists.
Args:
progress_callback: f(float), A function to call with the fraction of
completeness.
"""
if os.path.isdir(self.__backup_directory):
file_utils.RmTree(self.__backup_directory)
if progress_callback:
progress_callback(1)
@_RaisesPermissionsError
def ClearTrash(self, progress_callback=None):
"""Deletes the current trash directory if it exists.
Args:
progress_callback: f(float), A function to call with the fraction of
completeness.
"""
if os.path.isdir(self.__trash_directory):
file_utils.RmTree(self.__trash_directory)
if progress_callback:
progress_callback(1)
def _GetInstaller(self, snapshot):
"""Gets a component installer based on the given snapshot.
Args:
snapshot: snapshots.ComponentSnapshot, The snapshot that describes the
component to install.
Returns:
The installers.ComponentInstaller.
"""
return installers.ComponentInstaller(self.__sdk_root,
self._state_directory,
snapshot)
@_RaisesPermissionsError
def Install(self, snapshot, component_id, progress_callback=None):
"""Installs the given component based on the given snapshot.
Args:
snapshot: snapshots.ComponentSnapshot, The snapshot that describes the
component to install.
component_id: str, The component to install from the given snapshot.
progress_callback: f(float), A function to call with the fraction of
completeness.
Raises:
installers.URLFetchError: If the component associated with the provided
component ID has a URL that is not fetched correctly.
"""
files = self._GetInstaller(snapshot).Install(
component_id, progress_callback=progress_callback)
manifest = InstallationManifest(self._state_directory, component_id)
manifest.MarkInstalled(snapshot, files)
@_RaisesPermissionsError
def Uninstall(self, component_id, progress_callback=None):
"""Uninstalls the given component.
Deletes all the files for this component and marks it as no longer being
installed.
Args:
component_id: str, The id of the component to uninstall.
progress_callback: f(float), A function to call with the fraction of
completeness.
"""
manifest = InstallationManifest(self._state_directory, component_id)
paths = manifest.InstalledPaths()
total_paths = float(len(paths))
root = self.__sdk_root
dirs_to_remove = set()
for num, p in enumerate(paths, start=1):
path = os.path.join(root, p)
if os.path.isfile(path) or os.path.islink(path):
os.remove(path)
# Clean up the pyc files that correspond to any py files being removed.
if p.endswith('.py'):
pyc_path = path + 'c'
if os.path.isfile(pyc_path):
os.remove(pyc_path)
dir_path = os.path.dirname(path)
if dir_path:
dirs_to_remove.add(os.path.normpath(dir_path))
elif os.path.isdir(path):
dirs_to_remove.add(os.path.normpath(path))
if progress_callback:
progress_callback(num / total_paths)
# Remove dirs from the bottom up. Subdirs will always have a longer path
# than it's parent.
for d in sorted(dirs_to_remove, key=len, reverse=True):
if os.path.isdir(d) and not os.path.islink(d) and not os.listdir(d):
os.rmdir(d)
manifest.MarkUninstalled()
def CopyMachinePropertiesTo(self, other_state):
"""Copy this state's properties file to another state.
This is primarily intended to be used to maintain the machine properties
file during a schema-change-induced reinstall.
Args:
other_state: InstallationState, The installation state of the fresh
Cloud SDK that needs the properties file mirrored in.
"""
my_properties = os.path.join(
self.sdk_root, config.Paths.CLOUDSDK_PROPERTIES_NAME)
other_properties = os.path.join(
other_state.sdk_root, config.Paths.CLOUDSDK_PROPERTIES_NAME)
if not os.path.exists(my_properties):
return
shutil.copyfile(my_properties, other_properties)
class InstallationManifest(object):
"""Class to encapsulate the data stored in installation manifest files."""
MANIFEST_SUFFIX = '.manifest'
def __init__(self, state_dir, component_id):
"""Creates a new InstallationManifest.
Args:
state_dir: str, The directory path where install state is stored.
component_id: str, The component id that you want to get the manifest for.
"""
self.state_dir = state_dir
self.id = component_id
self.snapshot_file = os.path.join(
self.state_dir,
component_id + InstallationState.COMPONENT_SNAPSHOT_FILE_SUFFIX)
self.manifest_file = os.path.join(
self.state_dir,
component_id + InstallationManifest.MANIFEST_SUFFIX)
def MarkInstalled(self, snapshot, files):
"""Marks this component as installed with the given snapshot and files.
This saves the ComponentSnapshot and writes the installed files to a
manifest so they can be removed later.
Args:
snapshot: snapshots.ComponentSnapshot, The snapshot that was the source
of the install.
files: list of str, The files that were created by the installation.
"""
with open(self.manifest_file, 'w') as fp:
for f in files:
fp.write(f + '\n')
snapshot.WriteToFile(self.snapshot_file)
def MarkUninstalled(self):
"""Marks this component as no longer being installed.
This does not actually uninstall the component, but rather just removes the
snapshot and manifest.
"""
for f in [self.manifest_file, self.snapshot_file]:
if os.path.isfile(f):
os.remove(f)
def ComponentSnapshot(self):
"""Loads the local ComponentSnapshot for this component.
Returns:
The snapshots.ComponentSnapshot for this component.
"""
return snapshots.ComponentSnapshot.FromFile(self.snapshot_file)
def ComponentDefinition(self):
"""Loads the ComponentSnapshot and get the schemas.Component this component.
Returns:
The schemas.Component for this component.
"""
return self.ComponentSnapshot().ComponentFromId(self.id)
def VersionString(self):
"""Gets the version string of this component as it was installed.
Returns:
str, The installed version of this component.
"""
return self.ComponentDefinition().version.version_string
def InstalledPaths(self):
"""Gets the list of files and dirs created by installing this component.
Returns:
list of str, The files and directories installed by this component.
"""
with open(self.manifest_file) as f:
files = [line.rstrip() for line in f]
return files
def InstalledDirectories(self):
"""Gets the set of directories created by installing this component.
Returns:
set(str), The directories installed by this component.
"""
with open(self.manifest_file) as f:
dirs = set()
for line in f:
fixed = line.rstrip()
if fixed.endswith('/'):
dirs.add(fixed)
return dirs
class LastUpdateCheck(object):
"""A class to encapsulate information on when we last checked for updates."""
LAST_UPDATE_CHECK_FILE = 'last_update_check.json'
DATE = 'date'
LAST_NAG_DATE = 'last_nag_date'
REVISION = 'revision'
UPDATES_AVAILABLE = 'updates_available'
def __init__(self, install_state):
self.__install_state = install_state
# pylint: disable=protected-access, These classes work together
self.__last_update_check_file = os.path.join(
install_state._state_directory, LastUpdateCheck.LAST_UPDATE_CHECK_FILE)
self._LoadData()
def _LoadData(self):
"""Deserializes data from the json file."""
self.__dirty = False
if not os.path.isfile(self.__last_update_check_file):
data = {}
else:
with open(self.__last_update_check_file) as fp:
data = json.loads(fp.read())
self.__last_update_check_date = data.get(LastUpdateCheck.DATE, 0)
self.__last_nag_date = data.get(LastUpdateCheck.LAST_NAG_DATE, 0)
self.__last_update_check_revision = data.get(LastUpdateCheck.REVISION, 0)
self.__updates_available = data.get(LastUpdateCheck.UPDATES_AVAILABLE,
False)
def _SaveData(self):
"""Serializes data to the json file."""
if not self.__dirty:
return
data = {LastUpdateCheck.DATE: self.__last_update_check_date,
LastUpdateCheck.LAST_NAG_DATE: self.__last_nag_date,
LastUpdateCheck.REVISION: self.__last_update_check_revision,
LastUpdateCheck.UPDATES_AVAILABLE: self.__updates_available}
with open(self.__last_update_check_file, 'w') as fp:
fp.write(json.dumps(data))
self.__dirty = False
def __enter__(self):
return self
def __exit__(self, *args):
self._SaveData()
def UpdatesAvailable(self):
"""Returns whether we already know about updates that are available.
Returns:
bool, True if we know about updates, False otherwise.
"""
return self.__updates_available
def LastUpdateCheckRevision(self):
"""Gets the revision of the snapshot from the last update check.
Returns:
int, The revision of the last checked snapshot.
"""
return self.__last_update_check_revision
def LastUpdateCheckDate(self):
"""Gets the time of the last update check as seconds since the epoch.
Returns:
int, The time of the last update check.
"""
return self.__last_update_check_date
def LastNagDate(self):
"""Gets the time when the last nag was printed as seconds since the epoch.
Returns:
int, The time of the last nag.
"""
return self.__last_nag_date
def SecondsSinceLastUpdateCheck(self):
"""Gets the number of seconds since we last did an update check.
Returns:
int, The amount of time in seconds.
"""
return time.time() - self.__last_update_check_date
def SecondsSinceLastNag(self):
"""Gets the number of seconds since we last printed that there were updates.
Returns:
int, The amount of time in seconds.
"""
return time.time() - self.__last_nag_date
@_RaisesPermissionsError
def SetFromSnapshot(self, snapshot, force=False):
"""Sets that we just did an update check and found the given snapshot.
If the given snapshot is different that the last one we saw, this will also
diff the new snapshot with the current install state to refresh whether
there are components available for update.
You must call Save() to persist these changes.
Args:
snapshot: snapshots.ComponentSnapshot, The snapshot pulled from the
server.
force: bool, True to force a recalculation of whether there are available
updates, even if the snapshot revision has not changed.
Returns:
bool, True if there are now components to update, False otherwise.
"""
if force or self.__last_update_check_revision != snapshot.revision:
diff = self.__install_state.DiffCurrentState(snapshot)
self.__updates_available = bool(diff.AvailableUpdates())
self.__last_update_check_revision = snapshot.revision
self.__last_update_check_date = time.time()
self.__dirty = True
return self.__updates_available
def SetFromIncompatibleSchema(self):
"""Sets that we just did an update check and found a new schema version.
You must call Save() to persist these changes.
"""
self.__updates_available = True
self.__last_update_check_revision = 0 # Doesn't matter
self.__last_update_check_date = time.time()
self.__dirty = True
def SetNagged(self):
"""Sets that we printed the update nag."""
self.__last_nag_date = time.time()
self.__dirty = True
def Save(self):
"""Saves the changes we made to this object."""
self._SaveData()
| bsd-3-clause | -8,158,004,777,159,290,000 | -2,086,401,390,481,443,600 | 33.507079 | 80 | 0.676339 | false |
psykidellic/appengine-flask-skeleton | lib/pyasn1_modules/rfc2560.py | 127 | 7821 | #
# OCSP request/response syntax
#
# Derived from a minimal OCSP library (RFC2560) code written by
# Bud P. Bruegger <bud@ancitel.it>
# Copyright: Ancitel, S.p.a, Rome, Italy
# License: BSD
#
#
# current limitations:
# * request and response works only for a single certificate
# * only some values are parsed out of the response
# * the request does't set a nonce nor signature
# * there is no signature validation of the response
# * dates are left as strings in GeneralizedTime format -- datetime.datetime
# would be nicer
#
from pyasn1.type import tag, namedtype, namedval, univ, constraint, useful
from pyasn1_modules import rfc2459
# Start of OCSP module definitions
# This should be in directory Authentication Framework (X.509) module
class CRLReason(univ.Enumerated):
namedValues = namedval.NamedValues(
('unspecified', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6),
('removeFromCRL', 8),
('privilegeWithdrawn', 9),
('aACompromise', 10)
)
# end of directory Authentication Framework (X.509) module
# This should be in PKIX Certificate Extensions module
class GeneralName(univ.OctetString): pass
# end of PKIX Certificate Extensions module
id_kp_OCSPSigning = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 3, 9))
id_pkix_ocsp = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1))
id_pkix_ocsp_basic = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 1))
id_pkix_ocsp_nonce = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 2))
id_pkix_ocsp_crl = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 3))
id_pkix_ocsp_response = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 4))
id_pkix_ocsp_nocheck = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 5))
id_pkix_ocsp_archive_cutoff = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 6))
id_pkix_ocsp_service_locator = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 7))
class AcceptableResponses(univ.SequenceOf):
componentType = univ.ObjectIdentifier()
class ArchiveCutoff(useful.GeneralizedTime): pass
class UnknownInfo(univ.Null): pass
class RevokedInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('revocationTime', useful.GeneralizedTime()),
namedtype.OptionalNamedType('revocationReason', CRLReason().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class CertID(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('hashAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('issuerNameHash', univ.OctetString()),
namedtype.NamedType('issuerKeyHash', univ.OctetString()),
namedtype.NamedType('serialNumber', rfc2459.CertificateSerialNumber())
)
class CertStatus(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('good', univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('revoked', RevokedInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('unknown', UnknownInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class SingleResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certID', CertID()),
namedtype.NamedType('certStatus', CertStatus()),
namedtype.NamedType('thisUpdate', useful.GeneralizedTime()),
namedtype.OptionalNamedType('nextUpdate', useful.GeneralizedTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('singleExtensions', rfc2459.Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class KeyHash(univ.OctetString): pass
class ResponderID(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('byName', rfc2459.Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('byKey', KeyHash().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class Version(univ.Integer):
namedValues = namedval.NamedValues(('v1', 0))
class ResponseData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('responderID', ResponderID()),
namedtype.NamedType('producedAt', useful.GeneralizedTime()),
namedtype.NamedType('responses', univ.SequenceOf(SingleResponse())),
namedtype.OptionalNamedType('responseExtensions', rfc2459.Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class BasicOCSPResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsResponseData', ResponseData()),
namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString()),
namedtype.OptionalNamedType('certs', univ.SequenceOf(rfc2459.Certificate()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class ResponseBytes(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('responseType', univ.ObjectIdentifier()),
namedtype.NamedType('response', univ.OctetString())
)
class OCSPResponseStatus(univ.Enumerated):
namedValues = namedval.NamedValues(
('successful', 0),
('malformedRequest', 1),
('internalError', 2),
('tryLater', 3),
('undefinedStatus', 4), # should never occur
('sigRequired', 5),
('unauthorized', 6)
)
class OCSPResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('responseStatus', OCSPResponseStatus()),
namedtype.OptionalNamedType('responseBytes', ResponseBytes().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class Request(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('reqCert', CertID()),
namedtype.OptionalNamedType('singleRequestExtensions', rfc2459.Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class Signature(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString()),
namedtype.OptionalNamedType('certs', univ.SequenceOf(rfc2459.Certificate()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class TBSRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('requestorName', GeneralName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('requestList', univ.SequenceOf(Request())),
namedtype.OptionalNamedType('requestExtensions', rfc2459.Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class OCSPRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsRequest', TBSRequest()),
namedtype.OptionalNamedType('optionalSignature', Signature().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
| apache-2.0 | 7,204,109,742,983,788,000 | 8,340,062,385,328,884,000 | 44.736842 | 158 | 0.71129 | false |
IronLanguages/ironpython3 | Src/StdLib/Lib/csv.py | 90 | 16185 |
"""
csv.py - read/write/investigate CSV files
"""
import re
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect
from io import StringIO
__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "__doc__", "excel", "excel_tab",
"field_size_limit", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter" ]
class Dialect:
"""Describe a CSV dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError as e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab)
class unix_dialect(Dialect):
"""Describe the usual properties of Unix-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\n'
quoting = QUOTE_ALL
register_dialect("unix", unix_dialect)
class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self._fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0
def __iter__(self):
return self
@property
def fieldnames(self):
if self._fieldnames is None:
try:
self._fieldnames = next(self.reader)
except StopIteration:
pass
self.line_num = self.reader.line_num
return self._fieldnames
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
def __next__(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = next(self.reader)
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = next(self.reader)
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'"
% extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def writeheader(self):
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
wrong_fields = [k for k in rowdict if k not in self.fieldnames]
if wrong_fields:
raise ValueError("dict contains fields not in fieldnames: "
+ ", ".join([repr(x) for x in wrong_fields]))
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append(self._dict_to_list(rowdict))
return self.writer.writerows(rows)
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error("Could not determine delimiter")
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
# (quotechar, doublequote, delimiter, skipinitialspace)
return ('', False, None, 0)
quotes = {}
delims = {}
spaces = 0
for m in matches:
n = regexp.groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = regexp.groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = regexp.groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = max(quotes, key=quotes.get)
if delims:
delim = max(delims, key=delims.get)
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
# if we see an extra quote between delimiters, we've got a
# double quoted format
dq_regexp = re.compile(
r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
{'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE)
if dq_regexp.search(data):
doublequote = True
else:
doublequote = False
return (quotechar, doublequote, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = list(filter(None, data.split('\n')))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = list(charFrequency[char].items())
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = max(items, key=lambda x: x[1])
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- sum(item[1] for item in items))
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = list(delims.keys())[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = next(rdr) # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in list(columnTypes.keys()):
for thisType in [int, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
| apache-2.0 | 2,108,630,418,715,456,800 | -2,070,635,695,983,158,800 | 35.046771 | 131 | 0.526475 | false |
3dfxmadscientist/odoo-infrastructure | addons/infrastructure/hostname.py | 1 | 1468 | # -*- coding: utf-8 -*-
##############################################################################
#
# Infrastructure
# Copyright (C) 2014 Ingenieria ADHOC
# No email
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp import netsvc
from openerp.osv import osv, fields
class hostname(osv.osv):
""""""
_name = 'infrastructure.hostname'
_description = 'hostname'
_columns = {
'name': fields.char(string='name', required=True),
'server_id': fields.many2one('infrastructure.server', string='Server', ondelete='cascade', required=True),
}
_defaults = {
}
_constraints = [
]
hostname()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,518,633,610,384,951,000 | -7,900,098,675,806,680,000 | 27.784314 | 115 | 0.608311 | false |
neurotechuoft/MindType | Code/V1/src/deprecated/pyqtgraph/tests/test_srttransform3d.py | 51 | 1339 | import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_almost_equal
testPoints = np.array([
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[-1, -1, 0],
[0, -1, -1]])
def testMatrix():
"""
SRTTransform3D => Transform3D => SRTTransform3D
"""
tr = pg.SRTTransform3D()
tr.setRotate(45, (0, 0, 1))
tr.setScale(0.2, 0.4, 1)
tr.setTranslate(10, 20, 40)
assert tr.getRotation() == (45, QtGui.QVector3D(0, 0, 1))
assert tr.getScale() == QtGui.QVector3D(0.2, 0.4, 1)
assert tr.getTranslation() == QtGui.QVector3D(10, 20, 40)
tr2 = pg.Transform3D(tr)
assert np.all(tr.matrix() == tr2.matrix())
# This is the most important test:
# The transition from Transform3D to SRTTransform3D is a tricky one.
tr3 = pg.SRTTransform3D(tr2)
assert_array_almost_equal(tr.matrix(), tr3.matrix())
assert_almost_equal(tr3.getRotation()[0], tr.getRotation()[0])
assert_array_almost_equal(tr3.getRotation()[1], tr.getRotation()[1])
assert_array_almost_equal(tr3.getScale(), tr.getScale())
assert_array_almost_equal(tr3.getTranslation(), tr.getTranslation())
| agpl-3.0 | -8,638,555,694,701,312,000 | -6,491,198,285,511,263,000 | 33.333333 | 72 | 0.584018 | false |
devendermishrajio/nova_test_latest | nova/tests/unit/scheduler/filters/test_json_filters.py | 63 | 11677 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nova.scheduler.filters import json_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestJsonFilter(test.NoDBTestCase):
def setUp(self):
super(TestJsonFilter, self).setUp()
self.filt_cls = json_filter.JsonFilter()
self.json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024]])
def test_json_filter_passes(self):
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_passes_with_no_query(self):
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 0,
'free_disk_mb': 0})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_memory(self):
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023,
'free_disk_mb': 200 * 1024})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_disk(self):
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': (200 * 1024) - 1})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_service_disabled(self):
json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024],
['not', '$service.disabled']])
filter_properties = {'instance_type': {'memory_mb': 1024,
'local_gb': 200},
'scheduler_hints': {'query': json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_happy_day(self):
# Test json filter more thoroughly.
raw = ['and',
'$capabilities.enabled',
['=', '$capabilities.opt1', 'match'],
['or',
['and',
['<', '$free_ram_mb', 30],
['<', '$free_disk_mb', 300]],
['and',
['>', '$free_ram_mb', 30],
['>', '$free_disk_mb', 300]]]]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
# Passes
capabilities = {'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 10,
'free_disk_mb': 200,
'capabilities': capabilities,
'service': service})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
# Passes
capabilities = {'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
# Fails due to capabilities being disabled
capabilities = {'enabled': False, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
# Fails due to being exact memory/disk we don't want
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 30,
'free_disk_mb': 300,
'capabilities': capabilities,
'service': service})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
# Fails due to memory lower but disk higher
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
# Fails due to capabilities 'opt1' not equal
capabilities = {'enabled': True, 'opt1': 'no-match'}
service = {'enabled': True}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_basic_operators(self):
host = fakes.FakeHostState('host1', 'node1', {})
# (operator, arguments, expected_result)
ops_to_test = [
['=', [1, 1], True],
['=', [1, 2], False],
['<', [1, 2], True],
['<', [1, 1], False],
['<', [2, 1], False],
['>', [2, 1], True],
['>', [2, 2], False],
['>', [2, 3], False],
['<=', [1, 2], True],
['<=', [1, 1], True],
['<=', [2, 1], False],
['>=', [2, 1], True],
['>=', [2, 2], True],
['>=', [2, 3], False],
['in', [1, 1], True],
['in', [1, 1, 2, 3], True],
['in', [4, 1, 2, 3], False],
['not', [True], False],
['not', [False], True],
['or', [True, False], True],
['or', [False, False], False],
['and', [True, True], True],
['and', [False, False], False],
['and', [True, False], False],
# Nested ((True or False) and (2 > 1)) == Passes
['and', [['or', True, False], ['>', 2, 1]], True]]
for (op, args, expected) in ops_to_test:
raw = [op] + args
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertEqual(expected,
self.filt_cls.host_passes(host, filter_properties))
# This results in [False, True, False, True] and if any are True
# then it passes...
raw = ['not', True, False, True, False]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
# This results in [False, False, False] and if any are True
# then it passes...which this doesn't
raw = ['not', True, True, True]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_unknown_operator_raises(self):
raw = ['!=', 1, 2]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
host = fakes.FakeHostState('host1', 'node1',
{})
self.assertRaises(KeyError,
self.filt_cls.host_passes, host, filter_properties)
def test_json_filter_empty_filters_pass(self):
host = fakes.FakeHostState('host1', 'node1',
{})
raw = []
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
raw = {}
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_invalid_num_arguments_fails(self):
host = fakes.FakeHostState('host1', 'node1',
{})
raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
raw = ['>', 1]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_unknown_variable_ignored(self):
host = fakes.FakeHostState('host1', 'node1',
{})
raw = ['=', '$........', 1, 1]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
raw = ['=', '$foo', 2, 2]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
| apache-2.0 | 7,277,047,055,752,989,000 | -4,262,919,172,204,826,000 | 39.404844 | 78 | 0.473581 | false |
shashank971/edx-platform | openedx/core/djangoapps/credit/routers.py | 138 | 1338 | """ DRF routers. """
from rest_framework import routers
class SimpleRouter(routers.SimpleRouter):
""" Simple DRF router. """
# Note (CCB): This is a retrofit of a DRF 2.4 feature onto DRF 2.3. This is, sadly, simpler than
# updating edx-ora2 to work with DRF 2.4. See https://github.com/tomchristie/django-rest-framework/pull/1333
# for details on this specific DRF 2.4 feature.
def get_lookup_regex(self, viewset, lookup_prefix=''):
"""
Given a viewset, return the portion of URL regex that is used
to match against a single instance.
Note that lookup_prefix is not used directly inside REST rest_framework
itself, but is required in order to nicely support nested router
implementations, such as drf-nested-routers.
https://github.com/alanjds/drf-nested-routers
"""
base_regex = '(?P<{lookup_prefix}{lookup_field}>{lookup_value})'
lookup_field = getattr(viewset, 'lookup_field', 'pk')
try:
lookup_value = viewset.lookup_value_regex
except AttributeError:
# Don't consume `.json` style suffixes
lookup_value = '[^/.]+'
return base_regex.format(
lookup_prefix=lookup_prefix,
lookup_field=lookup_field,
lookup_value=lookup_value
)
| agpl-3.0 | -3,746,220,683,715,027,000 | -4,440,480,908,540,571,000 | 40.8125 | 112 | 0.636771 | false |
baiyunping333/BurpSuite-Plugins | Sqlmap/thirdparty/chardet/sbcsgroupprober.py | 235 | 3127 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, sys
from charsetgroupprober import CharSetGroupProber
from sbcharsetprober import SingleByteCharSetProber
from langcyrillicmodel import Win1251CyrillicModel, Koi8rModel, Latin5CyrillicModel, MacCyrillicModel, Ibm866Model, Ibm855Model
from langgreekmodel import Latin7GreekModel, Win1253GreekModel
from langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from langthaimodel import TIS620ThaiModel
from langhebrewmodel import Win1255HebrewModel
from hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [ \
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, constants.False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, constants.True, hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber, visualHebrewProber])
self.reset()
| gpl-2.0 | -269,911,258,023,170,530 | -4,352,816,645,766,064,000 | 47.859375 | 127 | 0.745763 | false |
alianmohammad/pd-gem5-latest | tests/quick/se/70.tgen/test.py | 74 | 2122 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
maxtick = 100000000000
| bsd-3-clause | 2,308,458,096,866,157,600 | 4,013,598,476,213,444,600 | 54.842105 | 72 | 0.797361 | false |
brian-l/django-1.4.10 | tests/regressiontests/admin_filters/tests.py | 6 | 35308 | from __future__ import absolute_import
import datetime
from django.contrib.admin import (site, ModelAdmin, SimpleListFilter,
BooleanFieldListFilter)
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from django.utils.encoding import force_unicode
from .models import Book, Department, Employee
def select_by(dictlist, key, value):
return [x for x in dictlist if x[key] == value][0]
class DecadeListFilter(SimpleListFilter):
def lookups(self, request, model_admin):
return (
('the 80s', "the 1980's"),
('the 90s', "the 1990's"),
('the 00s', "the 2000's"),
('other', "other decades"),
)
def queryset(self, request, queryset):
decade = self.value()
if decade == 'the 80s':
return queryset.filter(year__gte=1980, year__lte=1989)
if decade == 'the 90s':
return queryset.filter(year__gte=1990, year__lte=1999)
if decade == 'the 00s':
return queryset.filter(year__gte=2000, year__lte=2009)
class DecadeListFilterWithTitleAndParameter(DecadeListFilter):
title = 'publication decade'
parameter_name = 'publication-decade'
class DecadeListFilterWithoutTitle(DecadeListFilter):
parameter_name = 'publication-decade'
class DecadeListFilterWithoutParameter(DecadeListFilter):
title = 'publication decade'
class DecadeListFilterWithNoneReturningLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
pass
class DecadeListFilterWithFailingQueryset(DecadeListFilterWithTitleAndParameter):
def queryset(self, request, queryset):
raise 1/0
class DecadeListFilterWithQuerysetBasedLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
qs = model_admin.queryset(request)
if qs.filter(year__gte=1980, year__lte=1989).exists():
yield ('the 80s', "the 1980's")
if qs.filter(year__gte=1990, year__lte=1999).exists():
yield ('the 90s', "the 1990's")
if qs.filter(year__gte=2000, year__lte=2009).exists():
yield ('the 00s', "the 2000's")
class DecadeListFilterParameterEndsWith__In(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__in' # Ends with '__in"
class DecadeListFilterParameterEndsWith__Isnull(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__isnull' # Ends with '__isnull"
class DepartmentListFilterLookupWithNonStringValue(SimpleListFilter):
title = 'department'
parameter_name = 'department'
def lookups(self, request, model_admin):
return sorted(set([
(employee.department.id, # Intentionally not a string (Refs #19318)
employee.department.code)
for employee in model_admin.queryset(request).all()
]))
def queryset(self, request, queryset):
if self.value():
return queryset.filter(department__id=self.value())
class CustomUserAdmin(UserAdmin):
list_filter = ('books_authored', 'books_contributed')
class BookAdmin(ModelAdmin):
list_filter = ('year', 'author', 'contributors', 'is_best_seller', 'date_registered', 'no')
ordering = ('-id',)
class BookAdminWithTupleBooleanFilter(BookAdmin):
list_filter = ('year', 'author', 'contributors', ('is_best_seller', BooleanFieldListFilter), 'date_registered', 'no')
class DecadeFilterBookAdmin(ModelAdmin):
list_filter = ('author', DecadeListFilterWithTitleAndParameter)
ordering = ('-id',)
class DecadeFilterBookAdminWithoutTitle(ModelAdmin):
list_filter = (DecadeListFilterWithoutTitle,)
class DecadeFilterBookAdminWithoutParameter(ModelAdmin):
list_filter = (DecadeListFilterWithoutParameter,)
class DecadeFilterBookAdminWithNoneReturningLookups(ModelAdmin):
list_filter = (DecadeListFilterWithNoneReturningLookups,)
class DecadeFilterBookAdminWithFailingQueryset(ModelAdmin):
list_filter = (DecadeListFilterWithFailingQueryset,)
class DecadeFilterBookAdminWithQuerysetBasedLookups(ModelAdmin):
list_filter = (DecadeListFilterWithQuerysetBasedLookups,)
class DecadeFilterBookAdminParameterEndsWith__In(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__In,)
class DecadeFilterBookAdminParameterEndsWith__Isnull(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__Isnull,)
class EmployeeAdmin(ModelAdmin):
list_display = ['name', 'department']
list_filter = ['department']
class DepartmentFilterEmployeeAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithNonStringValue, ]
class ListFiltersTests(TestCase):
def setUp(self):
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.one_week_ago = self.today - datetime.timedelta(days=7)
self.request_factory = RequestFactory()
# Users
self.alfred = User.objects.create_user('alfred', 'alfred@example.com')
self.bob = User.objects.create_user('bob', 'bob@example.com')
self.lisa = User.objects.create_user('lisa', 'lisa@example.com')
# Books
self.djangonaut_book = Book.objects.create(title='Djangonaut: an art of living', year=2009, author=self.alfred, is_best_seller=True, date_registered=self.today)
self.bio_book = Book.objects.create(title='Django: a biography', year=1999, author=self.alfred, is_best_seller=False, no=207)
self.django_book = Book.objects.create(title='The Django Book', year=None, author=self.bob, is_best_seller=None, date_registered=self.today, no=103)
self.gipsy_book = Book.objects.create(title='Gipsy guitar for dummies', year=2002, is_best_seller=True, date_registered=self.one_week_ago)
self.gipsy_book.contributors = [self.bob, self.lisa]
self.gipsy_book.save()
# Departments
self.dev = Department.objects.create(code='DEV', description='Development')
self.design = Department.objects.create(code='DSN', description='Design')
# Employees
self.john = Employee.objects.create(name='John Blue', department=self.dev)
self.jack = Employee.objects.create(name='Jack Red', department=self.design)
def get_changelist(self, request, model, modeladmin):
return ChangeList(request, model, modeladmin.list_display, modeladmin.list_display_links,
modeladmin.list_filter, modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page, modeladmin.list_max_show_all, modeladmin.list_editable, modeladmin)
def test_datefieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'date_registered__gte': self.today,
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_unicode(filterspec.title), u'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Today")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today, self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(day=1),
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
if (self.today.year, self.today.month) == (self.one_week_ago.year, self.one_week_ago.month):
# In case one week ago is in the same month.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_unicode(filterspec.title), u'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This month")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today.replace(day=1), self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(month=1, day=1),
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
if self.today.year == self.one_week_ago.year:
# In case one week ago is in the same year.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_unicode(filterspec.title), u'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This year")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today.replace(month=1, day=1), self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': str(self.one_week_ago),
'date_registered__lt': str(self.tomorrow)})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_unicode(filterspec.title), u'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Past 7 days")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (str(self.one_week_ago), str(self.tomorrow)))
@override_settings(USE_TZ=True)
def test_datefieldlistfilter_with_time_zone_support(self):
# Regression for #17830
self.test_datefieldlistfilter()
def test_allvaluesfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'year__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title), u'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?year__isnull=True')
request = self.request_factory.get('/', {'year': '2002'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title), u'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?year=2002')
def test_relatedfieldlistfilter_foreignkey(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'author__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEquals(force_unicode(filterspec.title), u'Verbose Author')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?author__isnull=True')
request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEquals(force_unicode(filterspec.title), u'Verbose Author')
# order of choices depends on User model, which has no order
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk)
def test_relatedfieldlistfilter_manytomany(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'contributors__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book, self.bio_book, self.djangonaut_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEquals(force_unicode(filterspec.title), u'Verbose Contributors')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True')
request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEquals(force_unicode(filterspec.title), u'Verbose Contributors')
choice = select_by(filterspec.choices(changelist), "display", "bob")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk)
def test_relatedfieldlistfilter_reverse_relationships(self):
modeladmin = CustomUserAdmin(User, site)
# FK relationship -----
request = self.request_factory.get('/', {'books_authored__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.lisa])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title), u'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_authored__isnull=True')
request = self.request_factory.get('/', {'books_authored__id__exact': self.bio_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title), u'book')
choice = select_by(filterspec.choices(changelist), "display", self.bio_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_authored__id__exact=%d' % self.bio_book.pk)
# M2M relationship -----
request = self.request_factory.get('/', {'books_contributed__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.alfred])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title), u'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_contributed__isnull=True')
request = self.request_factory.get('/', {'books_contributed__id__exact': self.django_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title), u'book')
choice = select_by(filterspec.choices(changelist), "display", self.django_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_contributed__id__exact=%d' % self.django_book.pk)
def test_booleanfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def test_booleanfieldlistfilter_tuple(self):
modeladmin = BookAdminWithTupleBooleanFilter(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def verify_booleanfieldlistfilter(self, modeladmin):
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'is_best_seller__exact': 0})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_unicode(filterspec.title), u'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "No")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=0')
request = self.request_factory.get('/', {'is_best_seller__exact': 1})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_unicode(filterspec.title), u'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Yes")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=1')
request = self.request_factory.get('/', {'is_best_seller__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_unicode(filterspec.title), u'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Unknown")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__isnull=True')
def test_simplelistfilter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
# Make sure that the first option is 'All' ---------------------------
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), list(Book.objects.all().order_by('-id')))
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], u'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
# Look for books in the 1980s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 80s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], u'the 1980\'s')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+80s')
# Look for books in the 1990s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], u'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+90s')
# Look for books in the 2000s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], u'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s')
# Combine multiple filters -------------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s', 'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.djangonaut_book])
# Make sure the correct choices are selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], u'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s&author__id__exact=%s' % self.alfred.pk)
filterspec = changelist.get_filters(request)[0][0]
self.assertEquals(force_unicode(filterspec.title), u'Verbose Author')
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?publication-decade=the+00s&author__id__exact=%s' % self.alfred.pk)
def test_listfilter_without_title(self):
"""
Any filter must define a title.
"""
modeladmin = DecadeFilterBookAdminWithoutTitle(Book, site)
request = self.request_factory.get('/', {})
self.assertRaisesRegexp(ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutTitle' does not specify a 'title'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_without_parameter(self):
"""
Any SimpleListFilter must define a parameter_name.
"""
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get('/', {})
self.assertRaisesRegexp(ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_none_returning_lookups(self):
"""
A SimpleListFilter lookups method can return None but disables the
filter completely.
"""
modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_filter_with_failing_queryset(self):
"""
Ensure that when a filter's queryset method fails, it fails loudly and
the corresponding exception doesn't get swallowed.
Refs #17828.
"""
modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site)
request = self.request_factory.get('/', {})
self.assertRaises(ZeroDivisionError, self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_queryset_based_lookups(self):
modeladmin = DecadeFilterBookAdminWithQuerysetBasedLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(len(choices), 3)
self.assertEqual(choices[0]['display'], u'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], u'the 1990\'s')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+90s')
self.assertEqual(choices[2]['display'], u'the 2000\'s')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+00s')
def test_two_characters_long_field(self):
"""
Ensure that list_filter works with two-characters long field names.
Refs #16080.
"""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'no': '207'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_unicode(filterspec.title), u'number')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?no=207')
def test_parameter_ends_with__in__or__isnull(self):
"""
Ensure that a SimpleListFilter's parameter name is not mistaken for a
model field if it ends with '__isnull' or '__in'.
Refs #17091.
"""
# When it ends with '__in' -----------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)
request = self.request_factory.get('/', {'decade__in': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], u'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__in=the+90s')
# When it ends with '__isnull' ---------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site)
request = self.request_factory.get('/', {'decade__isnull': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], u'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__isnull=the+90s')
def test_lookup_with_non_string_value(self):
"""
Ensure choices are set the selected class when using
non-string values for lookups in SimpleListFilters
Refs #19318
"""
modeladmin = DepartmentFilterEmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {'department': self.john.pk})
changelist = self.get_changelist(request, Employee, modeladmin)
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_unicode(filterspec.title), u'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'DEV')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department=%s' % self.john.pk)
def test_fk_with_to_field(self):
"""
Ensure that a filter on a FK respects the FK's to_field attribute.
Refs #17972.
"""
modeladmin = EmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.jack, self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_unicode(filterspec.title), u'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], u'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], u'Development')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], u'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
# Filter by Department=='Development' --------------------------------
request = self.request_factory.get('/', {'department__code__exact': 'DEV'})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_unicode(filterspec.title), u'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], u'All')
self.assertEqual(choices[0]['selected'], False)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], u'Development')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], u'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
| bsd-3-clause | -5,389,368,281,445,485,000 | 7,439,283,214,177,787,000 | 46.649123 | 168 | 0.655829 | false |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/traitlets/tests/_warnings.py | 17 | 4019 | # From scikit-image: https://github.com/scikit-image/scikit-image/blob/c2f8c4ab123ebe5f7b827bc495625a32bb225c10/skimage/_shared/_warnings.py
# Licensed under modified BSD license
__all__ = ['all_warnings', 'expected_warnings']
from contextlib import contextmanager
import sys
import warnings
import inspect
import re
@contextmanager
def all_warnings():
"""
Context for use in testing to ensure that all warnings are raised.
Examples
--------
>>> import warnings
>>> def foo():
... warnings.warn(RuntimeWarning("bar"))
We raise the warning once, while the warning filter is set to "once".
Hereafter, the warning is invisible, even with custom filters:
>>> with warnings.catch_warnings():
... warnings.simplefilter('once')
... foo()
We can now run ``foo()`` without a warning being raised:
>>> from numpy.testing import assert_warns
>>> foo()
To catch the warning, we call in the help of ``all_warnings``:
>>> with all_warnings():
... assert_warns(RuntimeWarning, foo)
"""
# Whenever a warning is triggered, Python adds a __warningregistry__
# member to the *calling* module. The exercize here is to find
# and eradicate all those breadcrumbs that were left lying around.
#
# We proceed by first searching all parent calling frames and explicitly
# clearing their warning registries (necessary for the doctests above to
# pass). Then, we search for all submodules of skimage and clear theirs
# as well (necessary for the skimage test suite to pass).
frame = inspect.currentframe()
if frame:
for f in inspect.getouterframes(frame):
f[0].f_locals['__warningregistry__'] = {}
del frame
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
try:
mod.__warningregistry__.clear()
except AttributeError:
pass
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
yield w
@contextmanager
def expected_warnings(matching):
"""Context for use in testing to catch known warnings matching regexes
Parameters
----------
matching : list of strings or compiled regexes
Regexes for the desired warning to catch
Examples
--------
>>> from skimage import data, img_as_ubyte, img_as_float
>>> with expected_warnings(['precision loss']):
... d = img_as_ubyte(img_as_float(data.coins()))
Notes
-----
Uses `all_warnings` to ensure all warnings are raised.
Upon exiting, it checks the recorded warnings for the desired matching
pattern(s).
Raises a ValueError if any match was not found or an unexpected
warning was raised.
Allows for three types of behaviors: "and", "or", and "optional" matches.
This is done to accomodate different build enviroments or loop conditions
that may produce different warnings. The behaviors can be combined.
If you pass multiple patterns, you get an orderless "and", where all of the
warnings must be raised.
If you use the "|" operator in a pattern, you can catch one of several warnings.
Finally, you can use "|\A\Z" in a pattern to signify it as optional.
"""
with all_warnings() as w:
# enter context
yield w
# exited user context, check the recorded warnings
remaining = [m for m in matching if not '\A\Z' in m.split('|')]
for warn in w:
found = False
for match in matching:
if re.search(match, str(warn.message)) is not None:
found = True
if match in remaining:
remaining.remove(match)
if not found:
raise ValueError('Unexpected warning: %s' % str(warn.message))
if len(remaining) > 0:
msg = 'No warning raised matching:\n%s' % '\n'.join(remaining)
raise ValueError(msg)
| apache-2.0 | 4,570,499,017,652,856,000 | 4,180,365,947,981,664,000 | 36.560748 | 140 | 0.639214 | false |
locke105/mclib | examples/wsgi.py | 1 | 1781 |
import cgi
import json
from wsgiref import simple_server
import falcon
from mclib import mc_info
class MCInfo(object):
def on_get(self, req, resp):
host = req.get_param('host', required=True)
port = req.get_param_as_int('port', min=1024,
max=65565)
try:
if port is not None:
info = mc_info.get_info(host=host,
port=port)
else:
info = mc_info.get_info(host=host)
except Exception:
raise Exception('Couldn\'t retrieve info.')
if '.json' in req.uri:
resp.body = self.get_json(info)
return
preferred = req.client_prefers(['application/json', 'text/html'])
if 'html' in preferred:
resp.content_type = 'text/html'
resp.body = self.get_html(info)
else:
resp.body = self.get_json(info)
def get_html(self, info):
html = """<body>
<style>
table,th,td
{
border:1px solid black;
border-collapse:collapse
}
th,td
{
padding: 5px
}
</style>
<table>
"""
for k,v in info.iteritems():
items = {'key': cgi.escape(k)}
if isinstance(v, basestring):
items['val'] = cgi.escape(v)
else:
items['val'] = v
html = html + '<tr><td>%(key)s</td><td>%(val)s</td></tr>' % items
html = html + '</table></body>'
return html
def get_json(self, info):
return json.dumps(info)
app = falcon.API()
mcinfo = MCInfo()
app.add_route('/mcinfo', mcinfo)
app.add_route('/mcinfo.json', mcinfo)
if __name__ == '__main__':
httpd = simple_server.make_server('0.0.0.0', 3000, app)
httpd.serve_forever()
| apache-2.0 | 5,668,512,917,991,380,000 | -7,540,774,517,685,403,000 | 21.2625 | 77 | 0.521617 | false |
Meertecha/LearnPythonTheGame | pyGameEngine.py | 1 | 3565 | ### Imports
import pickle, os, platform, random
### Functions
def main():
curPlayer = loadPlayer( 'Tory' )
curGame = loadGame( 'Python_Tutorial' )
startGame(curPlayer, curGame)
def banner():
'''
if platform.system() == "Windows":
clearCmd = "cls"
elif platform.system() == "Linux":
clearCmd = "clear"
else:
print ("Unknown operating system detected. Some operations may not perform correctly!\n")
os.system(clearCmd)
'''
version = 0.1
banner = (" **Welcome to the Python Learning Environment\n\
**Written by Tory Clasen - Version: " + str(version) + " \n\
**For help at any time please type '?' or 'help' \n\
**To exit the program type 'exit' or 'quit' \n\n")
print banner
def startGame(curPlayer, curGame):
try:
curScore = curPlayer['score'][curGame['gameName']]
except:
curScore = 0
while True:
#banner()
print '----------------------------------------\n' + curGame['gameName'] + ' has been loaded'
print curGame['banner'] + '\n----------------------------------------'
try:
pickle.dump( curPlayer, open( ( str(curPlayer['Name']) + ".plep"), "wb" ) )
except:
print "Error! Unable to save player profile at current location!"
print 'Your current score is: ' + str(curScore) + ' out of a total possible score of: ' + str(len(curGame['gameData']))
print "Question " + str(curScore) + ": \n" + str(curGame['gameData'][curScore]["Q"]) + "\n"
temp = curGame['gameData'][curScore]["D"]
data = eval(str(curGame['gameData'][curScore]["D"]))
print "Data " + str(curScore) + ": \n" + data
print '----------------------------------------\n'
try:
myAnswer = eval(str(getInput('What command do you want to submit? ')))
if myAnswer == (eval(str(curGame['gameData'][curScore]["A"]))):
print "Correct!"
curScore = curScore + 1
else:
print "Incorrect!"
except:
print 'The answer you submitted crashed the program, so it was probably wrong'
#break
def getInput(prompt):
theInput = raw_input( str(prompt) + "\n" )
if theInput == '?' or theInput.lower() == 'help':
print "HELP! HELP!"
elif theInput.lower() == 'exit' or theInput.lower() == 'quit':
raise SystemExit
else:
return theInput
def loadPlayer(playerName = ''):
#banner()
curPlayer = {}
if playerName == '':
playerName = getInput("I would like to load your profile. \nWhat is your name? ")
try:
# Attempt to load the player file.
curPlayer = pickle.load( open( ( str(playerName) + ".plep"), "rb" ) )
print "Player profile found... loading player data..."
except:
# Ask the player if they want to try to create a new profile file.
createNew = getInput( "Player profile not found for '" + str(playerName) + "'\nWould you like to create a new one? [Y/N]").lower()
curPlayer = {'Name':playerName}
if createNew == "y":
try:
pickle.dump( curPlayer, open( ( str(playerName) + ".plep"), "wb" ) )
print "Player profile successfully created!"
except:
print "Error! Unable to create player profile at current location!"
else:
print "Progress will not be saved for you..."
return curPlayer
def loadGame(gameName = ''):
banner()
curGame = {}
while True:
if gameName == '':
gameName = getInput("What game would you like to load? ")
try:
# Attempt to load the player file.
curGame = pickle.load( open( ( str(gameName) + ".pleg"), "rb" ) )
print "Game module found... loading game data..."
gameName = ''
break
except:
gameName = ''
print "Game module not found... please try again..."
return curGame
main()
| mit | 2,450,407,264,510,128,000 | -4,810,859,592,862,259,000 | 31.409091 | 133 | 0.615708 | false |
openbig/odoo-contract | partner_billing/wizard/sale_make_invoice_advance.py | 1 | 1615 | # -*- encoding: utf-8 -*-
##############################################################################
#
# partner_billing
# (C) 2015 Mikołaj Dziurzyński, Grzegorz Grzelak, Thorsten Vocks (big-consulting GmbH)
# All Rights reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp import fields, models
import logging
_logger = logging.getLogger(__name__)
class sale_advance_payment_inv(osv.osv_memory):
_inherit = "sale.advance.payment.inv"
def _prepare_advance_invoice_vals(self, cr, uid, ids, context=None):
res = super(sale_advance_payment_inv,self)._prepare_advance_invoice_vals(cr, uid, ids, context=context)
sale_order_obj = self.pool.get('sale.order')
for pair in res:
for sale in sale_order_obj.browse(cr, uid, [pair[0]]):
pair[1]['associated_partner'] = sale.associated_partner and sale.associated_partner.id or False
return res
| agpl-3.0 | -7,322,380,040,655,112,000 | -8,807,736,039,183,865,000 | 39.325 | 105 | 0.651581 | false |
ingo-m/pyprf | pyprf/analysis/pyprf_main.py | 2 | 14174 | # -*- coding: utf-8 -*-
"""Find best fitting model time courses for population receptive fields.
Use `import pRF_config as cfg` for static pRF analysis.
Use `import pRF_config_motion as cfg` for pRF analysis with motion stimuli.
"""
# Part of py_pRF_mapping library
# Copyright (C) 2016 Ingo Marquardt
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import time
import numpy as np
import nibabel as nb
import h5py
from pyprf.analysis.load_config import load_config
from pyprf.analysis.utilities import cls_set_config
from pyprf.analysis.model_creation_main import model_creation
from pyprf.analysis.preprocessing_main import pre_pro_models
from pyprf.analysis.preprocessing_main import pre_pro_func
from pyprf.analysis.preprocessing_hdf5 import pre_pro_models_hdf5
from pyprf.analysis.preprocessing_hdf5 import pre_pro_func_hdf5
from pyprf.analysis.find_prf import find_prf
def pyprf(strCsvCnfg, lgcTest=False): #noqa
"""
Main function for pRF mapping.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
"""
# *************************************************************************
# *** Check time
print('---pRF analysis')
varTme01 = time.time()
# *************************************************************************
# *************************************************************************
# *** Preparations
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# Convert preprocessing parameters (for temporal and spatial smoothing)
# from SI units (i.e. [s] and [mm]) into units of data array (volumes and
# voxels):
cfg.varSdSmthTmp = np.divide(cfg.varSdSmthTmp, cfg.varTr)
cfg.varSdSmthSpt = np.divide(cfg.varSdSmthSpt, cfg.varVoxRes)
# For the GPU version, we need to set down the parallelisation to 1 now,
# because no separate CPU threads are to be created. We may still use CPU
# parallelisation for preprocessing, which is why the parallelisation
# factor is only reduced now, not earlier.
if cfg.strVersion == 'gpu':
cfg.varPar = 1
# *************************************************************************
# *************************************************************************
# *** Create or load pRF time course models
# In case of a multi-run experiment, the data may not fit into memory.
# (Both pRF model time courses and the fMRI data may be large in this
# case.) Therefore, we switch to hdf5 mode, where model time courses and
# fMRI data are hold in hdf5 files (on disk). The location of the hdf5 file
# for model time courses is specified by 'strPathMdl' (in the config file).
# The hdf5 file with fMRI data are stored at the same location as the input
# nii files.
# Array with pRF time course models, shape:
# aryPrfTc[x-position, y-position, SD, condition, volume].
# If in hdf5 mode, `aryPrfTc` is `None`.
aryPrfTc = model_creation(dicCnfg, lgcHdf5=cfg.lgcHdf5)
# *************************************************************************
# *************************************************************************
# *** Preprocessing
if cfg.lgcHdf5:
print('---Hdf5 mode.')
# Preprocessing of functional data:
vecLgcMsk, hdrMsk, aryAff, vecLgcVar, tplNiiShp, strPthHdf5Func = \
pre_pro_func_hdf5(cfg.strPathNiiMask,
cfg.lstPathNiiFunc,
lgcLinTrnd=cfg.lgcLinTrnd,
varSdSmthTmp=cfg.varSdSmthTmp,
varSdSmthSpt=cfg.varSdSmthSpt)
# Preprocessing of pRF model time courses:
strPrfTc, aryLgcMdlVar = \
pre_pro_models_hdf5(cfg.strPathMdl,
varSdSmthTmp=cfg.varSdSmthTmp,
strVersion=cfg.strVersion,
varPar=cfg.varPar)
# Dummy pRF time courses (for compatibility with regular mode):
aryPrfTc = None
# ---Makeshift solution for small data after masking---
# TODO: IMPLEMENT FULL HDF5 MODE FOR READING OF FUNCTIONAL DATA.
# Read hdf5 file (masked timecourses of current run):
fleHdfFunc = h5py.File(strPthHdf5Func, 'r')
# Access dataset in current hdf5 file:
dtsFunc = fleHdfFunc['func']
aryFunc = dtsFunc[:, :]
aryFunc = np.copy(aryFunc)
fleHdfFunc.close()
else:
# Preprocessing of pRF model time courses:
aryPrfTc = pre_pro_models(aryPrfTc,
varSdSmthTmp=cfg.varSdSmthTmp,
varPar=cfg.varPar)
# Preprocessing of functional data:
vecLgcMsk, hdrMsk, aryAff, vecLgcVar, aryFunc, tplNiiShp = \
pre_pro_func(cfg.strPathNiiMask,
cfg.lstPathNiiFunc,
lgcLinTrnd=cfg.lgcLinTrnd,
varSdSmthTmp=cfg.varSdSmthTmp,
varSdSmthSpt=cfg.varSdSmthSpt,
varPar=cfg.varPar)
# Dummy variables (for compatibility with hdf5 mode):
strPrfTc = None
aryLgcMdlVar = None
# *************************************************************************
# *************************************************************************
# *** Find pRF models for voxel time courses.
lstPrfRes = find_prf(dicCnfg, aryFunc, aryPrfTc=aryPrfTc,
aryLgcMdlVar=aryLgcMdlVar, strPrfTc=strPrfTc)
# *************************************************************************
# *************************************************************************
# *** Merge results from parallel processes
print('---------Prepare pRF finding results for export')
# Create list for vectors with fitting results, in order to put the results
# into the correct order:
lstResXpos = [None] * cfg.varPar
lstResYpos = [None] * cfg.varPar
lstResSd = [None] * cfg.varPar
lstResR2 = [None] * cfg.varPar
lstResPe = [None] * cfg.varPar
# Put output into correct order:
for idxRes in range(cfg.varPar):
# Index of results (first item in output list):
varTmpIdx = lstPrfRes[idxRes][0]
# Put fitting results into list, in correct order:
lstResXpos[varTmpIdx] = lstPrfRes[idxRes][1]
lstResYpos[varTmpIdx] = lstPrfRes[idxRes][2]
lstResSd[varTmpIdx] = lstPrfRes[idxRes][3]
lstResR2[varTmpIdx] = lstPrfRes[idxRes][4]
lstResPe[varTmpIdx] = lstPrfRes[idxRes][5]
# Concatenate output vectors (into the same order as the voxels that were
# included in the fitting):
aryBstXpos = np.concatenate(lstResXpos, axis=0).astype(np.float32)
aryBstYpos = np.concatenate(lstResYpos, axis=0).astype(np.float32)
aryBstSd = np.concatenate(lstResSd, axis=0).astype(np.float32)
aryBstR2 = np.concatenate(lstResR2, axis=0).astype(np.float32)
# aryBstXpos = np.zeros(0, dtype=np.float32)
# aryBstYpos = np.zeros(0, dtype=np.float32)
# aryBstSd = np.zeros(0, dtype=np.float32)
# aryBstR2 = np.zeros(0, dtype=np.float32)
# for idxRes in range(0, cfg.varPar):
# aryBstXpos = np.append(aryBstXpos, lstResXpos[idxRes])
# aryBstYpos = np.append(aryBstYpos, lstResYpos[idxRes])
# aryBstSd = np.append(aryBstSd, lstResSd[idxRes])
# aryBstR2 = np.append(aryBstR2, lstResR2[idxRes])
# Concatenate PEs, shape: aryBstPe[varNumVox, varNumCon].
aryBstPe = np.concatenate(lstResPe, axis=0).astype(np.float32)
varNumCon = aryBstPe.shape[1]
# Delete unneeded large objects:
del(lstPrfRes)
del(lstResXpos)
del(lstResYpos)
del(lstResSd)
del(lstResR2)
del(lstResPe)
# *************************************************************************
# *************************************************************************
# *** Reshape spatial parameters
# Put results form pRF finding into array (they originally needed to be
# saved in a list due to parallelisation). Voxels were selected for pRF
# model finding in two stages: First, a mask was applied. Second, voxels
# with low variance were removed. Voxels are put back into the original
# format accordingly.
# Number of voxels that were included in the mask:
varNumVoxMsk = np.sum(vecLgcMsk)
# Array for pRF finding results, of the form aryPrfRes[voxel-count, 0:3],
# where the 2nd dimension contains the parameters of the best-fitting pRF
# model for the voxel, in the order (0) pRF-x-pos, (1) pRF-y-pos, (2)
# pRF-SD, (3) pRF-R2. At this step, only the voxels included in the mask
# are represented.
aryPrfRes01 = np.zeros((varNumVoxMsk, 6), dtype=np.float32)
# Place voxels based on low-variance exlusion:
aryPrfRes01[vecLgcVar, 0] = aryBstXpos
aryPrfRes01[vecLgcVar, 1] = aryBstYpos
aryPrfRes01[vecLgcVar, 2] = aryBstSd
aryPrfRes01[vecLgcVar, 3] = aryBstR2
# Total number of voxels:
varNumVoxTlt = (tplNiiShp[0] * tplNiiShp[1] * tplNiiShp[2])
# Place voxels based on mask-exclusion:
aryPrfRes02 = np.zeros((varNumVoxTlt, 6), dtype=np.float32)
aryPrfRes02[vecLgcMsk, 0] = aryPrfRes01[:, 0]
aryPrfRes02[vecLgcMsk, 1] = aryPrfRes01[:, 1]
aryPrfRes02[vecLgcMsk, 2] = aryPrfRes01[:, 2]
aryPrfRes02[vecLgcMsk, 3] = aryPrfRes01[:, 3]
# Reshape pRF finding results into original image dimensions:
aryPrfRes = np.reshape(aryPrfRes02,
[tplNiiShp[0],
tplNiiShp[1],
tplNiiShp[2],
6])
del(aryPrfRes01)
del(aryPrfRes02)
# *************************************************************************
# *************************************************************************
# *** Reshape parameter estimates (betas)
# Bring PEs into original data shape. First, account for binary (brain)
# mask:
aryPrfRes01 = np.zeros((varNumVoxMsk, varNumCon), dtype=np.float32)
# Place voxels based on low-variance exlusion:
aryPrfRes01[vecLgcVar, :] = aryBstPe
# Place voxels based on mask-exclusion:
aryPrfRes02 = np.zeros((varNumVoxTlt, varNumCon), dtype=np.float32)
aryPrfRes02[vecLgcMsk, :] = aryPrfRes01
# Reshape pRF finding results into original image dimensions:
aryBstPe = np.reshape(aryPrfRes02,
[tplNiiShp[0],
tplNiiShp[1],
tplNiiShp[2],
varNumCon])
# New shape: aryBstPe[x, y, z, varNumCon]
del(aryPrfRes01)
del(aryPrfRes02)
# *************************************************************************
# *************************************************************************
# *** Export results
# The nii header of the mask will be used for creation of result nii files.
# Set dtype to float32 to avoid precision loss (in case mask is int).
hdrMsk.set_data_dtype(np.float32)
# Calculate polar angle map:
aryPrfRes[:, :, :, 4] = np.arctan2(aryPrfRes[:, :, :, 1],
aryPrfRes[:, :, :, 0])
# Calculate eccentricity map (r = sqrt( x^2 + y^2 ) ):
aryPrfRes[:, :, :, 5] = np.sqrt(np.add(np.power(aryPrfRes[:, :, :, 0],
2.0),
np.power(aryPrfRes[:, :, :, 1],
2.0)))
# List with name suffices of output images:
lstNiiNames = ['_x_pos',
'_y_pos',
'_SD',
'_R2',
'_polar_angle',
'_eccentricity']
print('---------Exporting results')
# Save spatial pRF parameters to nii:
for idxOut in range(6):
# Create nii object for results:
niiOut = nb.Nifti1Image(aryPrfRes[:, :, :, idxOut],
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = (cfg.strPathOut + lstNiiNames[idxOut] + '.nii.gz')
nb.save(niiOut, strTmp)
# Save PEs to nii (not implemented for gpu mode):
if cfg.strVersion != 'gpu':
for idxCon in range(varNumCon):
# Create nii object for results:
niiOut = nb.Nifti1Image(aryBstPe[:, :, :, idxCon],
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = (cfg.strPathOut
+ '_PE_'
+ str(idxCon + 1).zfill(2)
+ '.nii.gz')
nb.save(niiOut, strTmp)
# *************************************************************************
# *************************************************************************
# *** Report time
varTme02 = time.time()
varTme03 = varTme02 - varTme01
print('---Elapsed time: ' + str(varTme03) + ' s')
print('---Done.')
# *************************************************************************
| gpl-3.0 | -6,134,122,255,251,097,000 | -3,364,617,049,194,272,000 | 38.814607 | 79 | 0.543742 | false |