repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
contentful/contentful.py
|
contentful/locale.py
|
Python
|
mit
| 1,165 | 0.002575 |
from .resource import Resource
"""
contentful.locale
~~~~~~~~~~~~~~~~~
This module implements the Locale class.
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/localization
:copyright: (c) 2016 by Contentful GmbH.
:license: MIT, see LICENSE for more details.
"""
class Locale(Resource):
"""
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/localization
"""
def __init__(self, item, **kwargs):
super(Locale, self).__init__(item, **kwargs)
self.code = item.get('code', '')
self.name = item.get('name', '')
self.fallback_code = item.get('fallbackCode', '')
|
self.default = item.get('default', False)
self.optional = item.get('optional', False)
def __repr__(self):
return "<Locale[{0}] code='{1}' default={2} fallback_code={3} optional={4}>".format(
self.name,
|
self.code,
self.default,
"'{0}'".format(
self.fallback_code
) if self.fallback_code is not None else 'None',
self.optional
)
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_KEGG.py
|
Python
|
gpl-2.0
| 2,438 | 0 |
# Copyright 2001 by Tarjei Mikkelsen. All rights reserved.
# Revisions copyright 2007 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests the basic functionality of the KEGG parsers."""
from __future__ import print_function
import os
from Bio.KEGG import Enzyme
from Bio.KEGG import Compound
from Bio.KEGG import Map
from Bio.Pathway import System
# TODO - use unittest instead of print-and-compare testing
test_KEGG_Enzyme_files = ["enzyme.sample", "enzyme.irregular", "enzyme.new"]
test_KEGG_Compound_files = ["compound.sample", "compound.irregular"]
test_KEGG_Map_files = ["map00950.rea"]
def t_KEGG_Enzyme(testfiles):
"""Tests Bio.KEGG.Enzyme functionality."""
for file in testfiles:
fh = open(os.path.join("KEGG", file))
print("Te
|
sting Bio.KEGG.Enzyme on " + file + "\n\n")
records = Enzyme.parse(fh)
for record in records:
print(record)
print("\n")
|
fh.close()
def t_KEGG_Compound(testfiles):
"""Tests Bio.KEGG.Compound functionality."""
for file in testfiles:
fh = open(os.path.join("KEGG", file))
print("Testing Bio.KEGG.Compound on " + file + "\n\n")
records = Compound.parse(fh)
for record in records:
print(record)
print("\n")
fh.close()
def t_KEGG_Map(testfiles):
"""Tests Bio.KEGG.Map functionality."""
for file in testfiles:
fh = open(os.path.join("KEGG", file))
print("Testing Bio.KEGG.Map on " + file + "\n\n")
reactions = Map.parse(fh)
system = System()
for reaction in reactions:
system.add_reaction(reaction)
# sort the reaction output by the string names, so that the
# output will be consistent between python versions
# def str_cmp(first, second):
# return cmp(str(first), str(second))
rxs = system.reactions()
# sort: key instead of compare function (for py3 support)
# The function str_cmp above can be removed if the
# solution below proves resilient
rxs.sort(key=lambda x: str(x))
for x in rxs:
print(str(x))
fh.close()
t_KEGG_Enzyme(test_KEGG_Enzyme_files)
t_KEGG_Compound(test_KEGG_Compound_files)
t_KEGG_Map(test_KEGG_Map_files)
|
sapfo/medeas
|
src/old_scripts/main_simul_eigenvalues_distribution.py
|
Python
|
gpl-3.0
| 7,741 | 0.027774 |
#!/usr/bin/env python
"""
Created Wed Oct 7 15:04:36 CEST 2015
@author: sapfo
"""
import matplotlib
#matplotlib.use('Agg')
import simul_ms
import python_cmdscale
#import python_pca
import exp
import sys
import numpy as np
import pylab as py
from scipy.stats import norm
'''
We want to pick n1, n2, D, T?
Simulate data
Compute the distance matrix
MDS the distance matrix
Get coordinates
Get eigenvalues, eigenvectors
Plot comparing with the other eigenvalues
'''
#################### FIXED #############
n = 30
n1 = 5
n2 = 5
n3 = 5
D = 0.4
D1 = 0.1
|
#(D1<D)
nreps = 1000
## simulate data
rescaling = 2.0
verbose = False
########### 1 population ##############
print "##########
|
# 1 population ##############"
## expected tree length for one population
exp_tree_length = 0
for i in range(2,n+1):
exp_tree_length += 2./(i-1)
nsnps = [100]
T_mds = {}
T_pca = {}
Eigenvalues_mds = []
Distances_noise = []
Expected_Delta = np.zeros((n,n))
for kk in range(1,n):
Expected_Delta += np.eye(n,k=kk)
Expected_Delta += np.eye(n,k=-kk)
Expected_Delta *= 2./exp_tree_length
print Expected_Delta
for nsnp in nsnps:
T_mds[nsnp] = []
T_pca[nsnp] = []
for iteration in range(nreps):
params,data,tree_lengths = simul_ms.ms_one_pops(n=n,nreps=nsnp,verbose=0)
Delta = simul_ms.distance_matrix(data=data,verbose=0)
if verbose: print "Delta: ",Delta
Diff = Delta - Expected_Delta
if verbose: print "Diff: ",Diff
Distances_noise += list(Diff.flatten())
#Expected_Delta = zeros
evals_mds, evecs_mds, Y_mds = python_cmdscale.cmdscale(Delta)
Eigenvalues_mds += list(evals_mds[:-1])
#evals_pca, evecs_pca, Y_pca = python_pca.PCA(data.T)
#print "params: ",params
if verbose: print "average tree length (computed with ms): ",rescaling*np.average(tree_lengths)
if verbose: print "expected tree length (analytical coal): ",exp_tree_length
# mds expected total tree length, bias, rmse
t_mds = (2./(np.average(evals_mds[:-1])))**(1/2.)
T_mds[nsnp].append(t_mds)
if verbose: print "expected T (mds) from eigenvalues: ",T_mds
# pca expected tree length, bias, rmse
#t_pca = 1./np.average(evals_pca[:-1])
#T_pca[nsnp].append(t_pca)
#if verbose: print "expected T (pca) from eigenvalues: ",T_pca
print "expected lambda1 (mds) for (Ivan analytical): ",2./((exp_tree_length)**2)
#print "expected lambda1 (pca) for (Ivan analytical): ",1./((exp_tree_length))
#print "observed lambda1 (mds procedure): ",evals_mds[0]
#print "observed lambda1 (pca procedure): ",evals_pca[0]
#print "observed average lambdas (mds): ",np.average(evals_mds[:-1])
#print "observed average lambdas (pca): ",np.average(evals_pca[:-1])
#print "evals (first 10): ",evals_mds[:10]
mu1,std1 = norm.fit(Eigenvalues_mds)
mu2,std2 = norm.fit(Distances_noise)
fig = py.figure()
py.suptitle("1 population, %s snps, %s rep"%(nsnp,nreps))
ax1 = fig.add_subplot(2,1,1)
py.title("Eigenvalues")
py.hist(Eigenvalues_mds,normed=True,alpha=0.5)
py.vlines(2./((exp_tree_length)**2),0,10,color='red')
xmin,xmax=py.xlim()
x = np.linspace(xmin,xmax,100)
p = norm.pdf(x,mu1,std1)
py.plot(x,p,'k',linewidth=2)
ax1 = fig.add_subplot(2,1,2)
py.title("Distances")
py.hist(Distances_noise,normed=True,alpha=0.5)
xmin,xmax=py.xlim()
x = np.linspace(xmin,xmax,100)
p = norm.pdf(x,mu2,std2)
py.plot(x,p,'k',linewidth=2)
#py.savefig("Eigenvalues_mds_1pop.pdf")
py.show()
sys.exit()
### plotting one population ###
py.plot(Y[:,0],(Y[:,1]),'o',color='blue')
py.title("simulations 1 population n = %s, nreps = %s "%(n,nreps))
py.xlabel("dim 1 (%.2f %%)"%(1.*evals[0]/np.average(evals[:-1])))
py.ylabel("dim 2 (%.2f %%)"%(1.*evals[1]/np.average(evals[:-1])))
########### 2 populations ##############
print "########### 2 populations ##############"
#ms simul
params_2pops,data_2pops,tree_lengths_2pops = simul_ms.ms_two_pops(n1=n1, n2=n2, D=1./rescaling*D,nreps=nreps,verbose=0)
avg_tree_length_2pops = rescaling*np.average(tree_lengths_2pops)
Delta_2pops = simul_ms.distance_matrix(data=data_2pops,verbose=0)
#cmdscale
evals_2pops, evecs_2pops, Y_2pops = python_cmdscale.cmdscale(Delta_2pops)
exp.T_D_two_pops(eigenvalues = evals_2pops,n1=n1,n2=n2,diploid=2)
# analytical
params_exp_2pops,evals_exp_2pops, evec_exp_2pops = exp.two_pops(n1=n1, n2=n2, D=D, T=avg_tree_length_2pops)
print "params_2pops (ms): ",params_2pops
print "params_exp_2pops: ",params_exp_2pops
print "average tree length (ms): ",rescaling*np.average(tree_lengths_2pops)
#print "expected tree length (coal): ",exp_tree_length
print "expected lambda1 (analytical): ",evals_exp_2pops[0]
print "observed lambda1 (cmdscale): ",evals_2pops[0]
print "expected lambda2 (analytical): ",evals_exp_2pops[1]
print "observed lambda2 (cmdscale): ",evals_2pops[1]
print "average observed lambda2...n-1 (cmdscale): ",np.average(evals_2pops[1:-1])
print evals_exp_2pops[:10]
print evals_2pops[:10]
#print "observed lambda1 (mds): ",evals[0]
#print "observed average lambdas (mds): ",np.average(evals[:-1])
### plotting two population ###
py.figure()
py.plot(Y_2pops[:,0][:n1],Y_2pops[:,1][:n1],'x',color='orange')
py.plot(Y_2pops[:,0][n1:],Y_2pops[:,1][n1:],'o',color='blue')
py.title("simulations 2 pops n1 = %s, n2 = %s, D = %s, nreps = %s "%(n1,n2,D,nreps))
py.xlabel("dim 1")
py.ylabel("dim 2")
#py.xlabel("dim 1 (%.2f %%)"%(1.*evals[0]/np.average(evals[:-1])))
#py.ylabel("dim 2 (%.2f %%)"%(1.*evals[1]/np.average(evals[:-1])))
#py.show()
########### 3 populations ##############
print "########### 3 populations ##############"
nreps = 100
#ms simul
params_3pops,data_3pops,tree_lengths_3pops = simul_ms.ms_three_pops(n1=n1, n2=n2, n3=n3, D=1./rescaling*D, D1 = 1./rescaling*D1,nreps=nreps,verbose=0)
avg_tree_length_3pops = rescaling*np.average(tree_lengths_3pops)
Delta_3pops = simul_ms.distance_matrix(data=data_3pops,verbose=0)
#cmdscale
evals_3pops, evecs_3pops, Y_3pops = python_cmdscale.cmdscale(Delta_3pops)
try:
Texp,Dexp,D1exp,Drescaledexp,D1rescaledexp = exp.T_D_D1_three_pops(eigenvalues = evals_3pops,n1=n1,n2=n2,n3=n3,diploid=2)
except:
Texp,Dexp,D1exp,Drescaledexp,D1rescaledexp= 1,1,1,1,1
print "average tree length (ms): ",rescaling*np.average(tree_lengths_3pops)
print "params_3pops (ms): ",params_3pops
# analytical
params_exp_3pops,evals_exp_3pops, evec_exp_3pops = exp.three_pops(n1=n1, n2=n2, n3=n3, D=D, D1=D1, T=avg_tree_length_3pops)
print "params_3pops (ms): ",params_3pops
print "params_exp_3pops: ",params_exp_3pops
print "average tree length (ms): ",rescaling*np.average(tree_lengths_3pops)
#print "expected tree length (coal): ",exp_tree_length
print "expected lambda1 (analytical): ",evals_exp_3pops[0]
print "observed lambda1 (cmdscale): ",evals_3pops[0]
print ""
print "expected lambda2 (analytical): ",evals_exp_3pops[1]
print "observed lambda2 (cmdscale): ",evals_3pops[1]
print ""
print "expected lambda3 (analytical): ",evals_exp_3pops[2]
print "observed lambda3 (cmdscale): ",evals_3pops[2]
print "average observed lambda3...n-1 (cmdscale): ",np.average(evals_3pops[2:-1])
print evals_exp_3pops[:10]
print evals_3pops[:10]
sys.exit()
### plotting three population ###
py.figure()
py.plot(Y_3pops[:,0][:n1],Y_3pops[:,1][:n1],'D',color='orange')
py.plot(Y_3pops[:,0][n1:n1+n2],Y_3pops[:,1][n1:n1+n2],'o',color='blue')
py.plot(Y_3pops[:,0][n1+n2:],Y_3pops[:,1][n1+n2:],'v',color='green')
py.title("simulations 3 pops n1 = %(n1)s, n2 = %(n2)s, n3 = %(n3)s, D = %(D)s, D1 = %(D1)s, nreps = %(nreps)s "%params_3pops)
py.xlabel("dim 1")
py.ylabel("dim 2")
py.show()
########### 4 populations and above ##############
|
xxdede/SoCo
|
soco/core.py
|
Python
|
mit
| 72,281 | 0 |
# -*- coding: utf-8 -*-
# pylint: disable=C0302,fixme, protected-access
""" The core module contains the SoCo class that implements
the main entry to the SoCo functionality
"""
from __future__ import unicode_literals
import socket
import logging
import re
import requests
from .services import DeviceProperties, ContentDirectory
from .services import RenderingControl, AVTransport, ZoneGroupTopology
from .services import AlarmClock
from .groups import ZoneGroup
from .exceptions import DIDLMetadataError, SoCoUPnPException
from .data_structures import DidlPlaylistContainer,\
SearchResult, Queue, DidlObject, DidlMusicAlbum,\
from_didl_string, to_didl_string, DidlResource
from .utils import really_utf8, camel_to_underscore, really_unicode,\
url_escape_path
from .xml import XML
from soco import config
_LOG = logging.getLogger(__name__)
class _ArgsSingleton(type):
""" A metaclass which permits only a single instance of each derived class
sharing the same `_class_group` class attribute to exist for any given set
of positional arguments.
Attempts to instantiate a second instance of a derived class, or another
class with the same `_class_group`, with the same args will return the
existing instance.
For example:
>>> class ArgsSingletonBase(object):
... __metaclass__ = _ArgsSingleton
...
>>> class First(ArgsSingletonBase):
... _class_group = "greeting"
... def __init__(self, param):
... pass
...
>>> class Second(ArgsSingletonBase):
... _class_group = "greeting"
... def __init__(self, param):
... pass
>>> assert First('hi') is First('hi')
>>> assert First('hi') is First('bye')
AssertionError
>>> assert First('hi') is Second('hi')
"""
_instances = {}
def __call__(cls, *args, **kwargs):
key = cls._class_group if hasattr(cls, '_class_group') else cls
if key not in cls._instances:
cls._instances[key] = {}
if args not in cls._instances[key]:
cls._instances[key][args] = super(_ArgsSingleton, cls).__call__(
*args, **kwargs)
return cls._instances[key][args]
class _SocoSingletonBase( # pylint: disable=too-few-public-methods,no-init
_ArgsSingleton(str('ArgsSingletonMeta'), (object,), {})):
""" The base class for the SoCo class.
Uses a Python 2 and 3 compatible method of declaring a metaclass. See, eg,
here: http://www.artima.com/weblogs/viewpost.jsp?thread=236234 and
here: http://mikewatkins.ca/2008/11/29/python-2-and-3-metaclasses/
"""
pass
# pylint: disable=R0904,too-many-instance-attributes
class SoCo(_SocoSingletonBase):
"""A simple class for controlling a Sonos speaker.
For any given set of arguments to __init__, only one instance of this class
may be created. Subsequent attempts to create an instance with the same
arguments will return the previously created instance. This means that all
SoCo instances created with the same ip address are in fact the *same* SoCo
instance, reflecting the real world position.
Public functions::
play -- Plays the current item.
play_uri -- Plays a track or a music stream by URI.
play_from_queue -- Plays an item in the queue.
pause -- Pause the currently playing track.
stop -- Stop the currently playing track.
seek -- Move the currently playing track a given elapsed time.
next -- Go to the next track.
previous -- Go back to the previous track.
switch_to_line_in -- Switch the speaker's input to line-in.
switch_to_tv -- Switch the speaker's input to TV.
get_current_track_info -- Get information about the currently playing
track.
get_speaker_info -- Get information about the Sonos speaker.
partymode -- Put all the speakers in the network in the same group.
join -- Join this speaker to another "master" speaker.
unjoin -- Remove this speaker from a group.
get_queue -- Get information about the queue.
get_artists -- Get artists from the music library
get_album_artists -- Get album artists from the music library
get_albums -- Get albums from the music library
get_genres -- Get genres from the music library
get_composers -- Get composers from the music library
get_tracks -- Get tracks from the music library
get_playlists -- Get playlists from the music library
get_music_library_information -- Get information from the music library
get_current_transport_info -- get speakers playing state
browse_by_idstring -- Browse (get sub-elements) a given type
add_uri_to_queue -- Adds an URI to the q
|
ueue
add_to_queue -- Add a track to the end of the queue
remove_from_queue -- Remove a track from the queue
clear_queue -- Remove all tracks from queue
get_favorite_radio_shows -- Get favorite radio shows from Sonos'
Radio app.
get_favorite_radio_stations -- Get favorite radio stations.
create_so
|
nos_playlist -- Create a new empty Sonos playlist
create_sonos_playlist_from_queue -- Create a new Sonos playlist
from the current queue.
add_item_to_sonos_playlist -- Adds a queueable item to a Sonos'
playlist
get_item_album_art_uri -- Get an item's Album Art absolute URI.
search_track -- Search for an artist, artist's albums, or track.
get_albums_for_artist -- Get albums for an artist.
get_tracks_for_album -- Get tracks for an artist's album.
start_library_update -- Trigger an update of the music library.
Properties::
uid -- The speaker's unique identifier
mute -- The speaker's mute status.
volume -- The speaker's volume.
bass -- The speaker's bass EQ.
treble -- The speaker's treble EQ.
loudness -- The status of the speaker's loudness compensation.
cross_fade -- The status of the speaker's crossfade.
status_light -- The state of the Sonos status light.
player_name -- The speaker's name.
play_mode -- The queue's repeat/shuffle settings.
queue_size -- Get size of queue.
library_updating -- Whether music library update is in progress.
album_artist_display_option -- album artist display option
.. warning::
These properties are not cached and will obtain information over the
network, so may take longer than expected to set or return a value. It
may be a good idea for you to cache the value in your own code.
"""
_class_group = 'SoCo'
# Key words used when performing searches
SEARCH_TRANSLATION = {'artists': 'A:ARTIST',
'album_artists': 'A:ALBUMARTIST',
'albums': 'A:ALBUM',
'genres': 'A:GENRE',
'composers': 'A:COMPOSER',
'tracks': 'A:TRACKS',
'playlists': 'A:PLAYLISTS',
'share': 'S:',
'sonos_playlists': 'SQ:',
'categories': 'A:'}
# pylint: disable=super-on-old-class
def __init__(self, ip_address):
# Note: Creation of a SoCo instance should be as cheap and quick as
# possible. Do not make any network calls here
super(SoCo, self).__init__()
# Check if ip_address is a valid IPv4 representation.
# Sonos does not (yet) support IPv6
try:
socket.inet_aton(ip_address)
except socket.error:
raise ValueError("Not a valid IP address string")
#: The speaker's ip address
self.ip_address = ip_address
self.speaker_info = {} # Stores information about the current speaker
# The services which we use
# pylint: disable=invalid-name
self.avTransport = AVTransport(self)
self.cont
|
zacherytapp/wedding
|
weddingapp/apps/registry/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 1,051 | 0.001903 |
# -
|
*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Registry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', mod
|
els.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('display_order', models.IntegerField()),
('visibility', models.CharField(default=b'Unpusblished', max_length=20, choices=[(b'Published', b'Published'), (b'Unpusblished', b'Unpusblished')])),
('title', models.CharField(max_length=50)),
('alt_text', models.CharField(max_length=200)),
('registry_image', models.ImageField(upload_to=b'')),
],
options={
'abstract': False,
},
),
]
|
GabrielBrascher/cloudstack
|
test/integration/component/test_stopped_vm.py
|
Python
|
apache-2.0
| 58,065 | 0.000224 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 for stopped Virtual Maschine life cycle
"""
# Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import cleanup_resources
from marvin.lib.base import (Account,
VirtualMachine,
ServiceOffering,
Volume,
Router,
DiskOffering,
Host,
Iso,
Cluster,
StoragePool,
Template)
from marvin.lib.common import (get_zone,
get_domain,
get_template,
get_builtin_template_info,
update_resource_limit,
find_storage_pool_type)
from marvin.codes import PASS
class TestDeployVM(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestDeployVM, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.skip = False
if cls.hypervisor.lower() == 'lxc':
if not find_storage_pool_type(cls.apiclient, storagetype='rbd'):
cls.skip = True
return
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
# Create service offerings, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.testdata["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
if self.skip:
self.skipTest("RBD storage type is required for data volumes for LXC")
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.testdata["virtual_machine"]["zoneid"] = self.zone.id
self.testdata["iso"]["zoneid"] = self.zone.id
self.testdata["virtual_machine"]["template"] = self.template.id
self.account = Account.create(
self.apiclient,
self.testdata["account"],
domainid=self.domain.id
)
self.cleanup = [self
|
.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
|
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags=["advanced", "eip", "advancedns"], required_hardware="false")
def test_01_deploy_vm_no_startvm(self):
"""Test Deploy Virtual Machine with no startVM parameter
"""
# Validate the following:
# 1. deploy Vm without specifying the startvm parameter
# 2. Should be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.zone.networktype
)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.RUNNING)
self.assertEqual(response[0], PASS, response[1])
return
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="false")
def test_02_deploy_vm_startvm_true(self):
"""Test Deploy Virtual Machine with startVM=true parameter
"""
# Validate the following:
# 1. deploy Vm with the startvm=true
# 2. Should be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=True,
mode=self.zone.networktype
)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.RUNNING)
self.assertEqual(response[0], PASS, response[1])
return
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="false")
def test_03_deploy_vm_startvm_false(self):
"""Test Deploy Virtual Machine with startVM=false parameter
"""
# Validate the following:
# 1. deploy Vm with the startvm=false
# 2. Should not be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 4. Check listRouters call for that account. List routers should
# return empty response
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.STOPPED)
self.assertEqual(response[0], PASS, response[1])
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
routers,
None,
"List routers should return empty response"
)
self.debug("Destroying instance: %s" % self.virtual_machine.name)
self.virtual_machine.delete(self.apiclient, expunge=True)
|
SOM-st/RPySOM
|
src/rtruffle/base_node_2.py
|
Python
|
mit
| 229 | 0 |
from rtruff
|
le.abstract_node import AbstractNode, NodeInitializeMetaClass
class BaseNode(AbstractNode):
__me
|
taclass__ = NodeInitializeMetaClass
_immutable_fields_ = ['_source_section', '_parent']
_child_nodes_ = []
|
Ban3/Limnoria
|
plugins/Misc/plugin.py
|
Python
|
bsd-3-clause
| 27,456 | 0.001603 |
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software no
|
r the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import re
import os
import imp
import sys
import json
import time
import supybot
import supybot.conf as conf
from supybot import commands
import supybot.utils as utils
from supybot.commands import *
import supybot.ircdb as ircdb
import supybot.irclib as irclib
import supybot.utils.minisix as minisix
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import supybot.registry as registry
from supybot import commands
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Misc')
if minisix.PY2:
from itertools import ifilter as filter
def get_suffix(file):
for suffix in imp.get_suffixes():
if file[-len(suffix[0]):] == suffix[0]:
return suffix
return None
def getPluginsInDirectory(directory):
# get modules in a given directory
plugins = []
for filename in os.listdir(directory):
pluginPath = os.path.join(directory, filename)
if os.path.isdir(pluginPath):
if all(os.path.isfile(os.path.join(pluginPath, x))
for x in ['__init__.py', 'config.py', 'plugin.py']):
plugins.append(filename)
return plugins
class RegexpTimeout(Exception):
pass
class Misc(callbacks.Plugin):
"""Miscellaneous commands to access Supybot core. This is a core
Supybot plugin that should not be removed!"""
def __init__(self, irc):
self.__parent = super(Misc, self)
self.__parent.__init__(irc)
self.invalidCommands = \
ircutils.FloodQueue(conf.supybot.abuse.flood.interval())
conf.supybot.abuse.flood.interval.addCallback(self.setFloodQueueTimeout)
def setFloodQueueTimeout(self, *args, **kwargs):
self.invalidCommands.timeout = conf.supybot.abuse.flood.interval()
def callPrecedence(self, irc):
return ([cb for cb in irc.callbacks if cb is not self], [])
def invalidCommand(self, irc, msg, tokens):
assert not msg.repliedTo, 'repliedTo msg in Misc.invalidCommand.'
assert self is irc.callbacks[-1], 'Misc isn\'t last callback.'
assert msg.command in ('PRIVMSG', 'NOTICE')
self.log.debug('Misc.invalidCommand called (tokens %s)', tokens)
# First, we check for invalidCommand floods. This is rightfully done
# here since this will be the last invalidCommand called, and thus it
# will only be called if this is *truly* an invalid command.
maximum = conf.supybot.abuse.flood.command.invalid.maximum()
self.invalidCommands.enqueue(msg)
if self.invalidCommands.len(msg) > maximum and \
conf.supybot.abuse.flood.command.invalid() and \
not ircdb.checkCapability(msg.prefix, 'owner'):
punishment = conf.supybot.abuse.flood.command.invalid.punishment()
banmask = '*!%s@%s' % (msg.user, msg.host)
self.log.info('Ignoring %s for %s seconds due to an apparent '
'invalid command flood.', banmask, punishment)
if tokens and tokens[0] == 'Error:':
self.log.warning('Apparent error loop with another Supybot '
'observed. Consider ignoring this bot '
'permanently.')
ircdb.ignores.add(banmask, time.time() + punishment)
if conf.supybot.abuse.flood.command.invalid.notify():
irc.reply(_('You\'ve given me %s invalid commands within the last '
'%i seconds; I\'m now ignoring you for %s.') %
(maximum,
conf.supybot.abuse.flood.interval(),
utils.timeElapsed(punishment, seconds=False)))
return
# Now, for normal handling.
channel = msg.args[0]
# Only bother with the invaildCommand flood handling if it's actually
# enabled
if conf.supybot.abuse.flood.command.invalid():
# First, we check for invalidCommand floods. This is rightfully done
# here since this will be the last invalidCommand called, and thus it
# will only be called if this is *truly* an invalid command.
maximum = conf.supybot.abuse.flood.command.invalid.maximum()
banmasker = conf.supybot.protocols.irc.banmask.makeBanmask
if self.invalidCommands.len(msg) > maximum and \
not ircdb.checkCapability(msg.prefix, 'owner') and \
msg.prefix != irc.prefix and \
ircutils.isUserHostmask(msg.prefix):
penalty = conf.supybot.abuse.flood.command.invalid.punishment()
banmask = banmasker(msg.prefix, channel=None)
self.log.info('Ignoring %s for %s seconds due to an apparent '
'invalid command flood.', banmask, penalty)
if tokens and tokens[0] == 'Error:':
self.log.warning('Apparent error loop with another Supybot '
'observed. Consider ignoring this bot '
'permanently.')
ircdb.ignores.add(banmask, time.time() + penalty)
if conf.supybot.abuse.flood.command.invalid.notify():
irc.reply('You\'ve given me %s invalid commands within '
'the last minute; I\'m now ignoring you for %s.' %
(maximum,
utils.timeElapsed(penalty, seconds=False)))
return
# Now, for normal handling.
if conf.get(conf.supybot.reply.whenNotCommand, channel):
if len(tokens) >= 2:
cb = irc.getCallback(tokens[0])
if cb:
plugin = cb.name()
irc.error(format(_('The %q plugin is loaded, but there is '
'no command named %q in it. Try "list '
'%s" to see the commands in the %q '
'plugin.'), plugin, tokens[1],
plugin, plugin))
else:
irc.errorInvalid(_('command'), tokens[0], repr=False)
else:
command = tokens and tokens[0] or ''
irc.errorInvalid(_('command'), command, repr=False)
else:
if tokens:
# echo [] will get us an empty token set, but there's no
|
leosartaj/credit
|
credit/tests/main_tests/test_total_all_net.py
|
Python
|
mit
| 1,235 | 0.00081 |
#!/usr/bin/env python2
import os
from credit import main, exce
from credit import jsonhelper as jh
from credit.tests import testData as td
import unittest
class Test_total_all_net(unittest.TestCase):
def setUp(self):
self.fnum = 10
self.days = 10
self.startname = 'test_display'
self.files = [(self.startname + str(i) + main.SHEETEXT) for i in \
range(self.fnum)]
self.bal = 0
for index, name in enumerate(self.files):
money = (index + 1) * 100
self.bal += money
fakeDict = td.fakeDict(self.days, money)
with open(name, 'w') as f:
|
f.write(jh.dict_to_json(fakeDict))
def test_total_all(self):
num_files = 0
totals = 0
for sheetname, total in main.total_all():
self.assertTrue((sheetname + main.SHEETEXT) in self.files)
num_files += 1
totals += total
self.assertEqual(num_files, self.fnum)
self.assertTrue(abs(totals - self.bal) < td.ERROR)
def test_net(self):
self.assertTrue(abs(main
|
.net() - self.bal) < td.ERROR)
def tearDown(self):
for name in self.files:
os.remove(name)
|
adrienpacifico/openfisca-france
|
openfisca_france/model/caracteristiques_socio_demographiques/demographie.py
|
Python
|
agpl-3.0
| 9,098 | 0.024722 |
# -*- coding: utf-8 -*-
from ..base import * # noqa
build_column('idmen', IntCol(is_permanent = True, label = u"Identifiant du ménage"))
build_column('idfoy', IntCol(is_permanent = True, label = u"Identifiant du foyer"))
build_column('idfam', IntCol(is_permanent = True, label = u"Identifiant de la famille"))
build_column('quimen', EnumCol(QUIMEN, is_permanent = True))
build_column('quifoy', EnumCol(QUIFOY, is_permanent = True))
build_column('quifam', EnumCol(QUIFAM, is_permanent = True))
build_column('birth', DateCol(default = date(1970, 1, 1), is_permanent = True, label = u"Date de naissance"))
build_column('adoption', BoolCol(entity = "ind", label = u"Enfant adopté"))
build_column('alt', BoolCol(label = u'Enfant en garde alternée')) # TODO: cerfa_field
build_column('activite', EnumCol(label = u'Activité',
enum = Enum([u'Actif occupé',
u'Chômeur',
u'Étudiant, élève',
u'Retraité',
u'Autre inactif']), default = 4))
build_column('enceinte', BoolCol(entity = 'ind', label = u"Est enceinte"))
build_column('statmarit', EnumCol(label = u"Statut marital",
default = 2,
enum = Enum([u"Marié",
u"Célibataire",
u"Divorcé",
u"Veuf",
u"Pacsé",
u"Jeune veuf"], start = 1)))
build_column('nbN', PeriodSizeIndependentIntCol(cerfa_field = u'N', entity = 'foy',
label = u"Nombre d'enfants mariés/pacsés et d'enfants non mariés chargés de famille"))
build_column('nbR', PeriodSizeIndependentIntCol(cerfa_field = u'R', entity = 'foy',
label = u"Nombre de titulaires (autres que les enfants) de la carte invalidité d'au moins 80 %"))
build_column('caseE', BoolCol(label = u"Situation pouvant donner droit à une demi-part supplémentaire : vous vivez seul au 1er janvier de l'année de perception des revenus et vous avez élevé un enfant pendant moins de 5 ans durant la période où vous viviez seul",
entity = 'foy',
cerfa_field = u'E', end = date(2012, 12, 31)))
build_column('caseF', BoolCol(label = u"Situation pouvant donner droit à une demi-part supplémentaire : conjoint titulaire d'une pension ou d'une carte d'invalidité (vivant ou décédé l'année de perception des revenus)",
entity = 'foy',
cerfa_field = u'F'))
build_column('caseG', BoolCol(label = u"Titulaire d'une pension de veuve de guerre",
entity = 'foy',
cerfa_field = u'G')) # attention, ne pas confondre caseG et nbG qui se rapportent toutes les 2 à une "case" G, l'une étant une vraie case que l'on remplt et l'autre une case que l'on coche
build_column('caseH', PeriodSizeIndependentIntCol(label = u"Année de naissance des enfants à charge en garde alternée", entity = 'foy',
cerfa_field = u'H'))
# il ne s'agit pas à proprement parlé de la case H, les cases permettant d'indiquer l'année de naissance
# se rapportent bien à nbH mais ne sont pas nommées, choisissons nous de laisser cerfa_field = u'H' pour caseH ?
# De plus les caseH peuvent être multiples puisqu'il peut y avoir plusieurs enfants? donc faut-il les nommer caseH1, caseH2...caseH6 (les 6 présentes dans la déclaration) ?
# il faut aussi créer les cases F, G, R et I qui donnent également les années de naissances des PAC
build_column('caseK', BoolCol(label = u"Situation pouvant donner droit à une demi-part supplémentaire: vous avez eu un enfant décédé après l’âge de 16 ans ou par suite de faits de guerre",
entity = 'foy',
cerfa_field = u'K', end = date(2011, 12, 31)))
build_column('caseL', BoolCol(label = u"Situation pouvant donner droit à une demi-part supplémentaire: vous vivez seul au 1er janvier de l'année de perception des revenus et vous avez élevé un enfant pendant au moins 5 ans durant la période où vous viviez seul",
entity = 'foy',
cerfa_field = u'L'))
build_column('caseN', BoolCol(label = u"Vous ne viviez pas seul au 1er janvier de l'année de perception des revenus",
entity = 'foy',
cerfa_field = u'N'))
build_column('caseP', BoolCol(label = u"Titulaire d'une pension pour une invalidité d'au moins 40 % ou d'une carte d'invalidité d'au moins 80%",
entity = 'foy',
cerfa_field = u'P'))
build_column('caseS', BoolCol(label = u"Vous êtes mariés/pacsés et l'un des deux déclarants âgé de plus de 75 ans est titulaire de la carte du combattant ou d'une pension militaire d'invalidité ou de victime de guerre",
entity = 'foy',
cerfa_field = u'S'))
build_column('caseT', BoolCol(label = u"Vous êtes parent isolé au 1er janvier de l'année de perception des revenus",
entity = 'foy',
cerfa_field = u'T'))
build_column('caseW', BoolCol(label = u"Vous ou votre conjoint (même s'il est décédé), âgés de plus de 75 ans, êtes titulaire de la carte du combattant ou d'une pension militaire d'invalidité ou de victime de guerre",
entity = 'foy',
cerfa_field = u'W'))
# pour inv, il faut que tu regardes si tu es d'accord et si c'est bien la bonne case,
# la case P exsite déjà plus bas ligne 339 sous le nom caseP
build_column('invalide', BoolCol(label = u'Invalide')) # TODO: cerfa_field
class nb_par(Variable):
column = PeriodSizeIndependentIntCol(default = 0)
entity_class = Familles
label = u"Nombre d'adultes (parents) dans la famille"
def function(self, simulation, period):
# Note : Cette variable est "instantanée" : quelque soit la période demandée, elle retourne la valeur au premier
# jour, sans changer la période.
quifam_holder = simulation.compute('quifam', period)
quifam = self.filter_role(quifam_holder, role = PART)
return period, 1 + 1 * (quifam == PART)
class maries(Variable):
column = BoolCol(default = False)
entity_class = Familles
label = u"maries"
def function(self, simulation, period)
|
:
"""couple = 1 si couple marié sinon 0 TODO: faire un choix avec couple ?"""
# Note : Cette variable est "instantanée" : quelque soit la période demandée, elle retourne la valeur au premier
# jour, sans changer la période.
statmarit_holder = simulation.compute('statmarit', period)
statmarit = self.filter_r
|
ole(statmarit_holder, role = CHEF)
return period, statmarit == 1
class concub(Variable):
column = BoolCol(default = False)
entity_class = Familles
label = u"Indicatrice de vie en couple"
def function(self, simulation, period):
'''
concub = 1 si vie en couple TODO: pas très heureux
'''
# Note : Cette variable est "instantanée" : quelque soit la période demandée, elle retourne la valeur au premier
# jour, sans changer la période.
nb_par = simulation.calculate('nb_par', period)
# TODO: concub n'est pas égal à 1 pour les conjoints
return period, nb_par == 2
class isol(Variable):
column = BoolCol(default = False)
entity_class = Familles
label = u"Parent (s'il y a lieu) isolé"
def function(self, simulation, period):
# Note : Cette variable est "instantanée" : quelque soit la période demandée, elle retourne la valeur au premier
# jour, sans changer la période.
nb_par = simulation.calculate('nb_par', period)
return period, nb_par == 1
class est_enfant_dans_famille(Variable):
column = BoolCol
entity_class = Individus
label = u"Indique qe l'individu est un enfant dans une famille"
def function(self, simulation, period):
quifam = simulation.calculate('quifam', period)
return period, quifam > PART
class etu(Variable):
column = BoolCol(default = False)
entity_class = Individus
label = u"Indicatrice individuelle étudiant"
|
madhuni/AstroBox
|
src/ext/makerbot_driver/GcodeProcessors/AnchorProcessor.py
|
Python
|
agpl-3.0
| 5,212 | 0.002686 |
from __future__ import absolute_import
import re
import math
import contextlib
from .LineTransformProcessor import LineTransformProcessor
import makerbot_driver
class AnchorProcessor(LineTransformProcessor):
def __init__(self):
super(AnchorProcessor, self).__init__()
self.is_bundleable = True
self.code_map = {
re.compile('[^(;]*[gG]1 [XY]-?\d'): self._transform_anchor,
}
self.looking_for_first_move = True
self.speed = 1000
self.width_over_height = .8
def _grab_extruder(self, match):
self.extruder = match.group(2)
def _transform_anchor(self, match):
return_lines = [match.string]
if self.looking_for_first_move:
start_position = self.get_start_position()
return_lines = list(
self.create_anchor_command(start_position, return_lines[0]))
return_lines.append(match.string)
self.looking_for_first_move = False
return return_lines
def create_z_move_if_necessary(self, start_movement_codes, end_movement_codes):
"""
The platform must be moved up to the extruder to successfully anchor across the platform.
This function checks the location of the platform, and emits the correct G1 command to
move the platform
@param str start_movement_codes: Where the machine is moving from
@param str end_movement_codes: Where the machine is moving to
@return list: List of movements commands to move the platform
"""
return_codes = []
if 'Z' in start_movement_codes and 'Z' in end_movement_codes:
start_z = start_movement_codes['Z']
end_z = end_movement_codes['Z']
if start_z - end_z is not 0:
return_codes.append('G1 Z%f F%i\n' % (end_z, self.speed))
return return_codes
def create_anchor_command(self, start_position, end_position):
"""
Given two G1 commands, draws an anchor between them. Moves the platform if
necessary
@param str start_position: Where the machine is moving from
@param str end_position: Where the machine is moving to
@return list: The anchor commands
"""
assert start_position is not None and end_position is not None
start_movement_codes = makerbot_driver.Gcode.parse_li
|
ne(
start_position)[0] # Where the Bot is moving from
end_movement_codes = makerbot_driver.Gcode.parse_line(end_position)[0] # Where the bot is moving to
# Construct the next G1 command based on where the bot is moving
|
to
anchor_command = "G1 "
for d in ['X', 'Y', 'Z']:
if d in end_movement_codes:
part = d + str(end_movement_codes[d]) # The next [XYZ] code
anchor_command += part
anchor_command += ' '
anchor_command += 'F%i ' % (self.speed)
extruder = "E"
extrusion_distance = self.find_extrusion_distance(
start_movement_codes, end_movement_codes)
anchor_command += extruder + str(extrusion_distance) + "\n"
reset_command = "G92 %s0" % (extruder) + "\n"
return_codes = self.create_z_move_if_necessary(start_movement_codes, end_movement_codes)
return_codes.extend([anchor_command, reset_command])
return return_codes
def get_extruder(self, codes):
extruder = 'A'
if 'B' in codes:
extruder = 'B'
elif 'E' in codes:
extruder = 'E'
return extruder
def find_extrusion_distance(self, start_position_codes, end_position_codes):
layer_height = end_position_codes.get('Z', 0)
start_position_point = []
end_position_point = []
for d in ['X', 'Y']:
start_position_point.append(start_position_codes.get(d, 0))
end_position_point.append(end_position_codes.get(d, 0))
distance = self.calc_euclidean_distance(
start_position_point, end_position_point)
cross_section = self.feed_cross_section_area(
float(layer_height), self.width_over_height)
extrusion_distance = cross_section * distance
return extrusion_distance
def feed_cross_section_area(self, height, width):
"""
Taken from MG, (hopefully not wrongfully) assumed to work
"""
radius = height / 2.0
tau = math.pi * 2
return (tau / 2.0) * (radius * radius) + height * (width - height)
def calc_euclidean_distance(self, p1, p2):
assert len(p1) == len(p2)
distance = 0.0
for a, b in zip(p1, p2):
distance += pow(a - b, 2)
distance = math.sqrt(distance)
return distance
def get_start_position(self):
start_position = (-112, -73, 150)
if hasattr(self, 'profile') and None != self.profile:
sp = self.profile.values['print_start_sequence']['start_position']
start_position = (sp['start_x'], sp['start_y'], sp['start_z'])
start_codes = "G1 X%s Y%s Z%s F3300.0 (move to waiting position)"
start_codes = start_codes % start_position
return start_codes
|
yfdyh000/pontoon
|
pontoon/base/tests/test_utils.py
|
Python
|
bsd-3-clause
| 644 | 0 |
from django_nose.tools import assert_false, assert_true
from pontoon.base.tests import TestCase
from pontoon.base.utils import extension_in
class UtilsTests(TestCase):
def test_extension_in(self):
assert_true(ex
|
tension_in('filename.txt', ['bat', 'txt']))
assert_true(extension_in('filename.biff', ['biff']))
assert_true(extension_in('filename.tar.gz', ['gz']))
assert_false(extension_in('filename.txt', ['png', 'jpg
|
']))
assert_false(extension_in('.dotfile', ['bat', 'txt']))
# Unintuitive, but that's how splitext works.
assert_false(extension_in('filename.tar.gz', ['tar.gz']))
|
ManiacalLabs/BiblioPixel
|
bibliopixel/animation/strip.py
|
Python
|
mit
| 543 | 0.001842 |
from . animation import Animation
from .. layout import
|
strip
class Strip(Animation):
LAYOUT_CLASS = strip.Strip
LAYOUT_ARGS = 'num',
def __init__(self, layout, start=0, end=-1, **kwds):
super().__init__(layout, **kwds)
self._start = max(start, 0)
self._end = end
if self._end < 0 or self._end >= self.layout.numLEDs:
self._end = self.layout.numLEDs - 1
self._size = self._end - self._
|
start + 1
from .. import deprecated
if deprecated.allowed():
BaseStripAnim = Strip
|
wwitzel3/awx
|
awx/main/tests/functional/api/test_job_runtime_params.py
|
Python
|
apache-2.0
| 26,351 | 0.004326 |
import mock
import pytest
import yaml
import json
from awx.api.serializers import JobLaunchSerializer
from awx.main.models.credential import Credential
from awx.main.models.inventory import Inventory, Host
from awx.main.models.jobs import Job, JobTemplate, UnifiedJobTemplate
from awx.api.versioning import reverse
@pytest.fixture
def runtime_data(organization, credentialtype_ssh):
cred_obj = Credential.objects.create(
name='runtime-cred',
credential_type=credentialtype_ssh,
inputs={
'username': 'test_user2',
'password': 'pas4word2'
}
)
inv_obj = organization.inventories.create(name="runtime-inv")
return dict(
extra_vars='{"job_launch_var": 4}',
limit='test-servers',
job_type='check',
job_tags='provision',
skip_tags='restart',
inventory=inv_obj.pk,
credentials=[cred_obj.pk],
diff_mode=True,
verbosity=2
)
@pytest.fixture
def job_with_links(machine_credential, inventory):
return Job.objects.create(name='existing-job', credential=machine_credential, inventory=inventory)
@pytest.fixture
def job_template_prompts(project, inventory, machine_credential):
def rf(on_off):
jt = JobTemplate.objects.create(
job_type='run',
project=project,
inventory=inventory,
name='deploy-job-template',
# JT values must differ from prompted vals in order to register
limit='webservers',
job_tags = 'foobar',
skip_tags = 'barfoo',
ask_variables_on_launch=on_off,
ask_tags_on_launch=on_off,
ask_skip_tags_on_launch=on_off,
ask_job_type_on_launch=on_off,
ask_inventory_on_launch=on_off,
ask_limit_on_launch=on_off,
ask_credential_on_launch=on_off,
ask_diff_mode_on_launc
|
h=on_off,
ask_verbosity_on_launch=on_off,
)
jt.credentials.add(machine_credential)
return jt
return rf
@pytest.fixture
def job_template_prompts_null(project):
return JobTemplate.objects.create(
job_type='run',
project=project,
|
inventory=None,
name='deploy-job-template',
ask_variables_on_launch=True,
ask_tags_on_launch=True,
ask_skip_tags_on_launch=True,
ask_job_type_on_launch=True,
ask_inventory_on_launch=True,
ask_limit_on_launch=True,
ask_credential_on_launch=True,
ask_diff_mode_on_launch=True,
ask_verbosity_on_launch=True,
)
def data_to_internal(data):
'''
returns internal representation, model objects, dictionaries, etc
as opposed to integer primary keys and JSON strings
'''
internal = data.copy()
if 'extra_vars' in data:
internal['extra_vars'] = json.loads(data['extra_vars'])
if 'credentials' in data:
internal['credentials'] = set(Credential.objects.get(pk=_id) for _id in data['credentials'])
if 'inventory' in data:
internal['inventory'] = Inventory.objects.get(pk=data['inventory'])
return internal
# End of setup, tests start here
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_ignore_unprompted_vars(runtime_data, job_template_prompts, post, admin_user, mocker):
job_template = job_template_prompts(False)
mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data)
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation'):
response = post(reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
runtime_data, admin_user, expect=201)
assert JobTemplate.create_unified_job.called
assert JobTemplate.create_unified_job.call_args == ()
# Check that job is serialized correctly
job_id = response.data['job']
assert job_id == 968
# If job is created with no arguments, it will inherit JT attributes
mock_job.signal_start.assert_called_once()
# Check that response tells us what things were ignored
assert 'job_launch_var' in response.data['ignored_fields']['extra_vars']
assert 'job_type' in response.data['ignored_fields']
assert 'limit' in response.data['ignored_fields']
assert 'inventory' in response.data['ignored_fields']
assert 'credentials' in response.data['ignored_fields']
assert 'job_tags' in response.data['ignored_fields']
assert 'skip_tags' in response.data['ignored_fields']
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_accept_prompted_vars(runtime_data, job_template_prompts, post, admin_user, mocker):
job_template = job_template_prompts(True)
mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data)
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation'):
response = post(reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
runtime_data, admin_user, expect=201)
assert JobTemplate.create_unified_job.called
called_with = data_to_internal(runtime_data)
JobTemplate.create_unified_job.assert_called_with(**called_with)
job_id = response.data['job']
assert job_id == 968
mock_job.signal_start.assert_called_once()
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_accept_empty_tags(job_template_prompts, post, admin_user, mocker):
job_template = job_template_prompts(True)
mock_job = mocker.MagicMock(spec=Job, id=968)
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation'):
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}),
{'job_tags': '', 'skip_tags': ''}, admin_user, expect=201)
assert JobTemplate.create_unified_job.called
assert JobTemplate.create_unified_job.call_args == ({'job_tags':'', 'skip_tags':''},)
mock_job.signal_start.assert_called_once()
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null, post, rando, mocker):
job_template = job_template_prompts_null
# Give user permission to execute the job template
job_template.execute_role.members.add(rando)
# Give user permission to use inventory and credential at runtime
credential = Credential.objects.get(pk=runtime_data['credentials'][0])
credential.use_role.members.add(rando)
inventory = Inventory.objects.get(pk=runtime_data['inventory'])
inventory.use_role.members.add(rando)
mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data)
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation'):
response = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}),
runtime_data, rando, expect=201)
assert JobTemplate.create_unified_job.called
expected_call = data_to_internal(runtime_data)
assert JobTemplate.create_unified_job.call_args == (expected_call,)
job_id = response.data['job']
assert job_id == 968
mock_job.signal_start.assert_called_once()
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_reject_invalid_prompted_vars(runtime_data, job_template_prompts, post, admin_user):
job_template = job_template_prompts(True)
response = post(
reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
dict(job_type='foobicate', # foobicate is not a valid job type
inventory=87865, credentials=[48474]), admin_user, expect=400)
assert response.data['job_type'] == [u'"foobicate" is not a valid choice.']
assert response.data['inventory'] == [u'Invalid pk "87865" - obje
|
Vongo/anna
|
src/talk/db.py
|
Python
|
gpl-2.0
| 5,871 | 0.027764 |
from py2neo.server import GraphServer
from py2neo import Node,Relationship
HISTO_LENGTH = 5
def insert(sentence, tokensAndType):
"""
Take a sentence and it's associate tokens and type and store all of it in the db as the last sentence of the dialogue
@type sentence: string
@param sentence: The inserted sentence
@type tokensAndType: list
@param tokensAndType: The sentence's tokens and its SentenceTypes
"""
server = GraphServer("../../../neo4j")
graph=server.graph
# Retrieve all the sentences of the dialogue
sentences = graph.cypher.execute("MATCH (n:Histo)-[r*0..5]->(st:SentenceHisto) RETURN st")
print sentences
numberOfSentences = len(sentences)
# Create a node to insert as the last sentence of the dialogue
sentence = Node("SentenceHisto", sentence=sentence)
sentenceType = graph.find_one("SentenceType",
property_key="label",
property_value = tokensAndType[1][0])
sentenceForm = graph.find_one("SentenceType",
property_key="label",
property_value = tokensAndType[1][1])
# Link the sentence with it's type and it's form
is_of_type = Relationship(sentence, "is_of_type", sentenceType)
is_of_form = Relationship(sentence, "is_of_type", sentenceForm) # pos / neg
graph.create(is_of_type)
graph.create(is_of_form)
print 'nb sentences : ' + str(numberOfSentences)
# If we have just started the dialogue we create the root node and store the sentence as its child
if numberOfSentences == 0:
histo = graph.find_one("Histo",
property_key="label",
property_value = "histo")
has = Relationship(histo, "is_followed_by", sentence)
graph.create(has)
# We only keep an history of the dialogue of HISTO_LENGTH sentences long
# So we delete the first sentence if the length is of HISTO_LENGTH
elif numberOfSentences == HISTO_LENGTH:
graph.cypher.execute("MATCH (n:Histo)-[r:is_followed_by*1]->(:SentenceHisto) FOREACH( rel IN r| DELETE rel)")
histo = graph.find_one("Histo",
property_key="label",
property_value = "histo")
has = Relationship(histo, "is_followed_by", sentences[1][0])
graph.create(has)
is_followed_by = Relationship(sentences[-1][0], "is_followed_by", sentence)
graph.create(is_followed_by)
# We insert the sentence in the histo
else:
is_followed_by = Relationship(sentences[-1][0], "is_followed_by", sentence)
graph.create(is_followed_by)
for t
|
oken in tokensAndType[0]:
print token
tokenNode = graph.find_one("TokenHisto",
property_key="token",
|
property_value = token[0])
if tokenNode is None:
tokenNode = Node("TokenHisto", token=token[0], pos=token[1])
is_composed_of = Relationship(sentence, "is_composed_of", tokenNode)
graph.create(is_composed_of)
# Delete the potential existing historic of dialogue before starting a new one
def clean_histo():
"""
Delete the potential existing historic of dialogue before starting a new one
"""
server = GraphServer("../../../neo4j")
graph=server.graph
graph.cypher.execute("MATCH (n:SentenceHisto)-[rels]-(),(t:TokenHisto) delete rels, n,t")
# Extract all the characters from a movie given a sentence of this movie
def get_sentencesMovieCharacters(sentenceId):
"""
Extract all the characters from a movie given a sentence of this movie
@type sentenceId: integer
@param sentenceId: The id of the sentence
@return: A RecordList of Characters
"""
server = GraphServer("../../../neo4j")
graph=server.graph
query = "MATCH (n:Sentence{id:{sentenceId}})<-[r:IS_COMPOSED_OF*2]-(m:Movie), (m:Movie)-[:IS_COMPOSED_OF*2]->(:Sentence)-[IS_SPOKEN_BY]->(c:Character) RETURN COLLECT(DISTINCT c.full_name) as chars"
results = graph.cypher.execute_one(query, sentenceId=sentenceId)
return results
# Given a historic length (how far should we look into it), we compute the next sentence type (affirmative positive for instance) using pre-processed statistics
def findNextSentenceType(lenghtHisto, depthHisto):
"""
Given a historic length (how far should we look into it), we compute the next sentence type (affirmative positive for instance) using pre-processed statistics
@type lenghtHisto: integer
@param lenghtHisto: The maximal size of the historic
@type depthHisto: integer
@param depthHisto: The number of sentences we consider
@return: The next sentence's type
"""
server = GraphServer("../../../neo4j")
graph = server.graph
types =graph.cypher.execute("MATCH (n:Histo)-[r*0.."+str(lenghtHisto)+"]->(sh:SentenceHisto)-[is_of_type]->(st:SentenceType) RETURN st.label AS label")
# Build SentenceType "path"
listTypes=[]
for i in range(len(types)/2):
listTypes.append(types[2*i+1].label +' ' + types[2*i].label)
# Sublist with the good length
if len(listTypes) > depthHisto:
queryTypes = listTypes[-depthHisto:]
else:
queryTypes = listTypes
# Model query :
queryString= "MATCH (s:Stats)"
for label in queryTypes:
queryString+="-->(:TypeStat{label:\'" + label +"\'})"
queryString+="-->(ts:TypeStat) RETURN ts.label AS label ORDER BY ts.prob DESC LIMIT 1"
nextType = graph.cypher.execute(queryString)
return nextType[0].label
# Get the token distribution in the historic, only NN* are taken into account
def computeHistoTokenFrequency(lenghtHisto):
"""
Get the token distribution in the historic, only NN* are taken into account
@type lenghtHisto: integer
@param lenghtHisto: The maximal size of the historic
@return: A RecordList of tokens distribution
"""
server = GraphServer("../../../neo4j")
graph = server.graph
query = "MATCH (n:Histo)-[:is_followed_by*0.."+str(lenghtHisto)+"]->(sh:SentenceHisto)-[:is_composed_of]->(t:TokenHisto) WHERE t.pos =~ 'NN*' RETURN t.token as token,count(t) as total ORDER by total desc LIMIT 10"
return graph.cypher.execute(query)
|
JakeColtman/bartpy
|
bartpy/diagnostics/features.py
|
Python
|
mit
| 8,265 | 0.00363 |
from collections import Counter
from typing import List, Mapping, Union, Optional
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from bartpy.runner import run_models
from bartpy.sklearnmodel import SklearnModel
ImportanceMap = Mapping[int, float]
ImportanceDistributionMap = Mapping[int, List[float]]
def feature_split_proportions(model: SklearnModel, columns: Optional[List[int]]=None) -> Mapping[int, float]:
split_variables = []
for sample in model.model_samples:
for tree in sample.trees:
for node in tree.nodes:
splitting_var = node.split.splitting_variable
split_variables.append(splitting_var)
counter = Counter(split_variables)
if columns is None:
columns = sorted(list([x for x in counter.keys() if x is not None]))
proportions = {}
for column in columns:
if column in counter.keys():
proportions[column] = counter[column] / len(split_variables)
else:
proportions[column] = 0.0
return proportions
def plot_feature_split_proportions(model: SklearnModel, ax=None):
if ax is None:
_, ax = plt.subplots(1, 1)
proportions = feature_split_proportions(model)
y_pos = np.arange(len(proportions))
name, count = list(proportions.keys()), list(proportions.values())
props = pd.DataFrame({"name": name, "counts": count}).sort_values("name", ascending=True)
plt.barh(y_pos, props.counts, align='center', alpha=0.5)
plt.yticks(y_pos, props.name)
plt.xlabel('Proportion of all splits')
plt.ylabel('Feature')
plt.title('Proportion of Splits Made on Each Variable')
return ax
def null_feature_split_proportions_distribution(model: SklearnModel,
X: Union[pd.DataFrame, np.ndarray],
y: np.ndarray,
n_permutations: int=10) -> Mapping[int, List[float]]:
"""
Calculate a null distribution of proportion of splits on each variable in X
Works by randomly permuting y to remove any true dependence of y on X and calculating feature importance
Parameters
----------
model: SklearnModel
Model specification to work with
X: np.ndarray
Covariate matrix
y: np.ndarray
Target data
n_permutations: int
How many permutations to run
The higher the number of permutations, the more accurate the null distribution, but the longer it will take to run
Returns
-------
Mapping[int, List[float]]
A list of inclusion proportions for each variable in X
"""
inclusion_dict = {x: [] for x in range(X.shape[1])}
y_s = [np.random.permutation(y) for _ in range(n_permutations)]
X_s = [X for _ in y_s]
fit_models = run_models(model, X_s, y_s)
for model in fit_models:
splits_run = feature_split_proportions(model, list(range(X.shape[1])))
for key, value in splits_run.items():
inclusion_dict[key].append(value)
return inclusion_dict
def plot_null_feature_importance_distributions(null_distributions: Mapping[int, List[float]], ax=None) -> None:
if ax is None:
_, ax = plt.subplots(1, 1)
df = pd.DataFrame(null_distributions)
df = pd.DataFrame(df.unstack()).reset_index().drop("level_1", axis=1)
df.columns = ["variable", "p"]
sns.boxplot(x="variable", y="p", data=df, ax=ax)
ax.set_title("Null Feature Importance Distribution")
return ax
def local_thresholds(null_distributions: ImportanceDistributionMap, percentile: float) -> Mapping[int, float]:
"""
Calculate the required proportion of splits to be selected by variable
Creates a null distribution for each variable based on the % of splits including that variable in each of the permuted models
Each variable has its own threshold that is independent of the other variables
Note - this is significantly less stringent than the global threshold
Parameters
----------
null_distributions: ImportanceDistributionMap
A mapping from variable to distribution of split inclusion proportions under the null
percentile: float
The percentile of the null distribution to use as a cutoff.
The closer to 1.0, the more stringent the threshold
Returns
-------
Mapping[int, float]
A lookup from column to % inclusion threshold
"""
return {feature: np.percentile(null_distributions[feature], percentile) for feature in null_distributions}
def global_thresholds(null_distributions: ImportanceDistributionMap, percentile: float) -> Mapping[int, float]:
"""
Calculate the required proportion of splits to be selected by variable
Creates a distribution of the _highest_ inclusion percentage of any variable in each of the permuted models
Threshold is set as a percentile of this distribution
All variables have the same threshold
Note that this is significantly more stringent than the local threshold
Parameters
----------
null_distributions: ImportanceDistributionMap
A mapping from variable to distribution of split inclusion proportions under the null
percentile: float
The percentile of the null distribution to use as a cutoff.
The closer to 1.0, the more stringent the threshold
Returns
-------
Mapping[int, float]
A lookup from column to % inclusion threshold
"""
q_s = []
df = pd.DataFrame(
|
null_distributions)
for row in df.iter_rows():
q_s.append(np.max(row))
threshold = np.percentile(q_s, percentile)
return {feature: threshold for feature in null_distributions}
def kept_features(feature_proportions: Mapping[int, float], thresholds: Mapping[int, float]) -> List[int]:
"""
Extract the features to keep
Parameters
----------
feature_proportions: Mapping[int, float]
|
Lookup from variable to % of splits in the model that use that variable
thresholds: Mapping[int, float]
Lookup from variable to required % of splits in the model to be kept
Returns
-------
List[int]
Variable selected for inclusion in the final model
"""
return [x[0] for x in zip(sorted(feature_proportions.keys()), is_kept(feature_proportions, thresholds)) if x[1]]
def is_kept(feature_proportions: Mapping[int, float], thresholds: Mapping[int, float]) -> List[bool]:
"""
Determine whether each variable should be kept after selection
Parameters
----------
feature_proportions: Mapping[int, float]
Lookup from variable to % of splits in the model that use that variable
thresholds: Mapping[int, float]
Lookup from variable to required % of splits in the model to be kept
Returns
-------
List[bool]
An array of length equal to the width of the covariate matrix
True if the variable should be kept, False otherwise
"""
print(sorted(list(feature_proportions.keys())))
return [feature_proportions[feature] > thresholds[feature] for feature in sorted(list(feature_proportions.keys()))]
def partition_into_passed_and_failed_features(feature_proportions, thresholds):
kept = kept_features(feature_proportions, thresholds)
passed_features = {x[0]: x[1] for x in feature_proportions.items() if x[0] in kept}
failed_features = {x[0]: x[1] for x in feature_proportions.items() if x[0] not in kept}
return passed_features, failed_features
def plot_feature_proportions_against_thresholds(feature_proportions, thresholds, ax=None):
if ax is None:
_, ax = plt.subplots(1, 1)
passed_features, failed_features = partition_into_passed_and_failed_features(feature_proportions, thresholds)
ax.bar(thresholds.keys(), [x * 100 for x in thresholds.values()], width=0.01, color="black", alpha=0.5)
ax.scatter(passed_features.keys(), [x * 100 for x in passed_features.values()], c="g")
ax.scatter(failed_features.keys(), [x * 100 for x in failed_features.values()], c="r")
ax.set_title("Feature Importance Compared to
|
congpc/DjangoExample
|
mysite/polls/tests.py
|
Python
|
bsd-2-clause
| 5,010 | 0.001198 |
import datetime
from django.utils import timezone
from django.test import TestCase
from django.urls import reverse
from .models import Question
class QuestionMethodTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is i
|
n the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=30)
old_question = Question(pub_date=
|
time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() should return True for questions whose
pub_date is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=1)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Creates a question with the given `question_text` and published the
given number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionViewTests(TestCase):
def test_index_view_with_no_questions(self):
"""
If no questions exist, an appropriate message should be displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_index_view_with_a_past_question(self):
"""
Questions with a pub_date in the past should be displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_index_view_with_a_future_question(self):
"""
Questions with a pub_date in the future should not be displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_index_view_with_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
should be displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_index_view_with_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionIndexDetailTests(TestCase):
def test_detail_view_with_a_future_question(self):
"""
The detail view of a question with a pub_date in the future should
return a 404 not found.
"""
future_question = create_question(question_text='Future question.', days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_detail_view_with_a_past_question(self):
"""
The detail view of a question with a pub_date in the past should
display the question's text.
"""
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
|
heyf/cloaked-octo-adventure
|
leetcode/0103_binary-tree-zigzag-level-order-traversal.py
|
Python
|
mit
| 1,261 | 0.008723 |
#
# @lc app=leetcode id=103 lang=python3
#
# [103] Binary Tree Zigzag Level Order Traversal
#
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# @lc code=start
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
ret = []
child = [root]
flag = True
while child:
new_child = []
current_layer = []
for node in child:
if node:
current_layer.append(node.val)
if node.left:
new_child.append(n
|
ode.left)
if node.right:
new_child.append(node.right)
child = new_child
if current_layer:
if flag:
ret.append(current_layer)
else:
ret.append(current_layer[::-1])
flag = not flag
return ret
# @lc code=end
b = [1,2,3,4,None,None,5] # WA1
a = TreeNode(1)
a.left = TreeNode(2)
a.right = Tree
|
Node(3)
a.left.left = TreeNode(4)
a.right.right = TreeNode(5)
s = Solution()
print(s.zigzagLevelOrder(a))
|
LaveyD/spider
|
spider/items.py
|
Python
|
gpl-3.0
| 957 | 0.003135 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import
|
Item, Field
class SpiderItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
brand = Field()
name = Field()
type = Field()
category = Field()
shopname = Field()
productionName = Field()
|
productId = Field()
url = Field()
price = Field()
promotionInfo = Field()
monthlySalesVolume = Field()
evaluationNum = Field()
#goodEvaluationNum = Field()
date = Field()
commentCount = Field()
averageScore = Field()
goodCount = Field()
goodRate = Field()
generalCount = Field()
generalRate = Field()
poorCount = Field()
poorRate = Field()
showCount = Field()#the comment with picture
commentListPageNum = Field()
imageUrl = Field()
imagePath = Field()
|
nextgis/entels_front_demo
|
entels_demo_tornado/__init__.py
|
Python
|
gpl-2.0
| 548 | 0 |
fr
|
om sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
)
from zope.sqlalchemy import ZopeTr
|
ansactionExtension
import tornado.web
from handlers.index import IndexHandler
from handlers.sensors import SensorsHandler
import logging
logging.getLogger().setLevel(logging.DEBUG)
app = tornado.web.Application([
(r'/', IndexHandler),
(r'/sensors', SensorsHandler)
])
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
|
evangelistalab/forte
|
tests/pytest-methods/detci/test_detci-4.py
|
Python
|
lgpl-3.0
| 986 | 0.002028 |
import pytest
from forte.solvers import solver_factory, HF, ActiveSpaceSolver
def test_detci_4():
"""CASCI test of Forte DETCI using the SparseList algorithm to build the sigma vector"""
ref_hf_energy = -99.977636678461636
ref_fci_energy = -100.113732484560970
xyz = """
F
H 1 1.0
"""
input = solver_factory(molecule=xyz, basis='6-31g')
state = input.state(charge=0, multiplicity=1, sym='a1')
hf = HF(input, state=state, e_convergence=1.0e-12, d_convergence=1.0e-8)
# create a detci solver
fci = ActiveSpaceSolver(
hf,
type='detci',
states=state,
mo_spaces=input.mo_spaces(frozen_docc=[1, 0, 0, 0]),
options={'active_ref_type': 'cas'}
)
|
fci.run()
# check results
assert hf.value('hf energy') == pytest.approx(ref_hf_energy, 1.0e-10)
assert fci.value('active space energy')[state] == pytest.approx([ref_fci_energy], 1.0e-10)
if
|
__name__ == "__main__":
test_detci_4()
|
udoprog/mimeprovider
|
mimeprovider/__init__.py
|
Python
|
gpl-3.0
| 5,322 | 0 |
import logging
from mimeprovider.documenttype import get_default_document_types
from mimeprovider.client import get_default_client
from mimeprovider.exceptions import MimeException
from mimeprovider.exceptions import MimeBadRequest
from mimeprovider.mim
|
erenderer import MimeRenderer
from m
|
imeprovider.validators import get_default_validator
__all__ = ["MimeProvider"]
__version__ = "0.1.5"
log = logging.getLogger(__name__)
def build_json_ref(request):
def json_ref(route, document=None, **kw):
ref = dict()
ref["$ref"] = request.route_path(route, **kw)
rel_default = None
if document:
rel_default = getattr(document, "object_type",
document.__class__.__name__)
else:
rel_default = route
ref["rel"] = kw.pop("rel_", rel_default)
return ref
return json_ref
class MimeProvider(object):
def __init__(self, documents=[], **kw):
self.renderer_name = kw.get("renderer_name", "mime")
self.attribute_name = kw.get("attribute_name", "mime_body")
self.error_handler = kw.get("error_handler", None)
self.set_default_renderer = kw.get("set_default_renderer", False)
self.validator = kw.get("validator")
if self.validator is None:
self.validator = get_default_validator()
types = kw.get("types")
if types is None:
types = get_default_document_types()
if not types:
raise ValueError("No document types specified")
self.client = kw.get("client")
if self.client is None:
self.client = get_default_client()
self.type_instances = [t() for t in types]
self.mimeobjects = dict()
self.mimetypes = dict(self._generate_base_mimetypes())
self.error_document_type = kw.get(
"error_document_type",
self.type_instances[0])
self.register(*documents)
def _validate(self, document):
if not hasattr(document, "object_type"):
raise ValueError(
("Object does not have required 'object_type' "
"attribute {0!r}").format(document))
def _generate_base_mimetypes(self):
"""
Generate the base mimetypes as described by non customized document
types.
"""
for t in self.type_instances:
if t.custom_mime:
continue
yield t.mime, (t, None, None)
def _generate_document_mimetypes(self, documents):
for t in self.type_instances:
if not t.custom_mime:
continue
for o in documents:
mimetype = t.mime.format(o=o)
validator = None
if hasattr(o, "schema"):
validator = self.validator(o.schema)
m_value = (mimetype, (t, o, validator))
o_value = (o, (t, mimetype, validator))
yield m_value, o_value
def register(self, *documents):
documents = list(documents)
for document in documents:
self._validate(document)
generator = self._generate_document_mimetypes(documents)
for (m, m_value), (o, o_value) in generator:
self.mimeobjects.setdefault(o, []).append(o_value)
if m not in self.mimetypes:
self.mimetypes[m] = m_value
continue
_, cls, validator = self.mimetypes[m]
_, new_cls, validator = m_value
raise ValueError(
"Conflicting handler for {0}, {1} and {2}".format(
m, cls, new_cls))
def get_client(self, *args, **kw):
return self.client(self.mimetypes, self.mimeobjects, *args, **kw)
def get_mime_body(self, request):
if not request.body or not request.content_type:
return None
result = self.mimetypes.get(request.content_type)
if result is None:
raise MimeBadRequest(
"Unsupported Content-Type: " + request.content_type)
document_type, cls, validator = result
# the specific document does not support deserialization.
if not hasattr(cls, "from_data"):
raise MimeBadRequest(
"Unsupported Content-Type: " +
request.content_type)
return document_type.parse(validator, cls, request.body)
@property
def renderer(self):
if self.error_handler is None:
raise ValueError("No 'error_handler' available")
def setup_renderer(helper):
return MimeRenderer(self.mimetypes, self.error_document_type,
self.error_handler, validator=self.validator)
return setup_renderer
def add_config(self, config):
config.add_renderer(self.renderer_name, self.renderer)
if self.set_default_renderer:
config.add_renderer(None, self.renderer)
config.set_request_property(self.get_mime_body, self.attribute_name,
reify=True)
config.set_request_property(build_json_ref, "json_ref", reify=True)
config.add_view(self.error_handler, context=MimeException,
renderer=self.renderer_name)
|
basnijholt/holoviews
|
holoviews/tests/plotting/matplotlib/testpointplot.py
|
Python
|
bsd-3-clause
| 15,100 | 0.002318 |
import numpy as np
from holoviews.core.overlay import NdOverlay
from holoviews.core.spaces import HoloMap
from holoviews.element import Points
from .testplot import TestMPLPlot, mpl_renderer
from ..utils import ParamLogStream
try:
from matplotlib import pyplot
except:
pass
class TestPointPlot(TestMPLPlot):
def test_points_non_numeric_size_warning(self):
data = (np.arange(10), np.arange(10), list(map(chr, range(94,104))))
points = Points(data, vdims=['z']).opts(plot=dict(size_index=2))
with ParamLogStream() as log:
plot = mpl_renderer.get_plot(points)
log_msg = log.stream.read()
warning = ('%s: z dimension is not numeric, '
'cannot use to scale Points size.\n' % plot.name)
self.assertEqual(log_msg, warning)
def test_points_cbar_extend_both(self):
img = Points(([0, 1], [0, 3])).redim(y=dict(range=(1,2)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True, color_index=1)))
self.assertEqual(plot.handles['cbar'].extend, 'both')
def test_points_cbar_extend_min(self):
img = Points(([0, 1], [0, 3])).redim(y=dict(range=(1, None)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True, color_index=1)))
self.assertEqual(plot.handles['cbar'].extend, 'min')
def test_points_cbar_extend_max(self):
img = Points(([0, 1], [0, 3])).redim(y=dict(range=(None, 2)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True, color_index=1)))
self.assertEqual(plot.handles['cbar'].extend, 'max')
def test_points_cbar_extend_clime(self):
img = Points(([0, 1], [0, 3])).opts(style=dict(clim=(None, None)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True, color_index=1)))
self.assertEqual(plot.handles['cbar'].extend, 'neither')
def test_points_rcparams_do_not_persist(self):
opts = dict(f
|
ig_rcparams={'text.usetex': True})
points = Points(([0, 1], [0, 3])).opts(plot=opts)
m
|
pl_renderer.get_plot(points)
self.assertFalse(pyplot.rcParams['text.usetex'])
def test_points_rcparams_used(self):
opts = dict(fig_rcparams={'grid.color': 'red'})
points = Points(([0, 1], [0, 3])).opts(plot=opts)
plot = mpl_renderer.get_plot(points)
ax = plot.state.axes[0]
lines = ax.get_xgridlines()
self.assertEqual(lines[0].get_color(), 'red')
def test_points_padding_square(self):
points = Points([1, 2, 3]).options(padding=0.1)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.2)
self.assertEqual(x_range[1], 2.2)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_curve_padding_square_per_axis(self):
curve = Points([1, 2, 3]).options(padding=((0, 0.1), (0.1, 0.2)))
plot = mpl_renderer.get_plot(curve)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0)
self.assertEqual(x_range[1], 2.2)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.4)
def test_points_padding_hard_xrange(self):
points = Points([1, 2, 3]).redim.range(x=(0, 3)).options(padding=0.1)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0)
self.assertEqual(x_range[1], 3)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_soft_xrange(self):
points = Points([1, 2, 3]).redim.soft_range(x=(0, 3)).options(padding=0.1)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.2)
self.assertEqual(x_range[1], 3)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_unequal(self):
points = Points([1, 2, 3]).options(padding=(0.05, 0.1))
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.1)
self.assertEqual(x_range[1], 2.1)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_nonsquare(self):
points = Points([1, 2, 3]).options(padding=0.1, aspect=2)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.1)
self.assertEqual(x_range[1], 2.1)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_logx(self):
points = Points([(1, 1), (2, 2), (3,3)]).options(padding=0.1, logx=True)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0.89595845984076228)
self.assertEqual(x_range[1], 3.3483695221017129)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_logy(self):
points = Points([1, 2, 3]).options(padding=0.1, logy=True)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.2)
self.assertEqual(x_range[1], 2.2)
self.assertEqual(y_range[0], 0.89595845984076228)
self.assertEqual(y_range[1], 3.3483695221017129)
def test_points_padding_datetime_square(self):
points = Points([(np.datetime64('2016-04-0%d' % i), i) for i in range(1, 4)]).options(
padding=0.1
)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 736054.80000000005)
self.assertEqual(x_range[1], 736057.19999999995)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_datetime_nonsquare(self):
points = Points([(np.datetime64('2016-04-0%d' % i), i) for i in range(1, 4)]).options(
padding=0.1, aspect=2
)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 736054.90000000002)
self.assertEqual(x_range[1], 736057.09999999998)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_sizes_scalar_update(self):
hmap = HoloMap({i: Points([1, 2, 3]).opts(s=i*10) for i in range(1, 3)})
plot = mpl_renderer.get_plot(hmap)
artist = plot.handles['artist']
plot.update((1,))
self.assertEqual(artist.get_sizes(), np.array([10]))
plot.update((2,))
self.assertEqual(artist.get_sizes(), np.array([20]))
###########################
# Styling mapping #
###########################
def test_point_color_op(self):
points = Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color').options(color='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_facecolors(),
np.array([[0, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1]]))
def test_point_color_op_update(self):
points = HoloMap({0: Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color'),
1: Points([(0, 0, '#0000FF'), (0, 1, '#00FF00'), (0, 2, '#FF0000')],
vdims='color')}).options(color='color')
plot = mpl_renderer.g
|
tgroh/incubator-beam
|
sdks/python/apache_beam/io/range_trackers.py
|
Python
|
apache-2.0
| 14,379 | 0.00918 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""iobase.RangeTracker implementations provided with Apache Beam.
"""
import logging
import math
import threading
from six import integer_types
from apache_beam.io import iobase
__all__ = ['OffsetRangeTracker', 'LexicographicKeyRangeTracker',
'OrderedPositionRangeTracker', 'UnsplittableRangeTracker']
class OffsetRangeTracker(iobase.RangeTracker):
"""A 'RangeTracker' for non-negative positions of type 'long'."""
# Offset corresponding to infinity. This can only be used as the upper-bound
# of a range, and indicates reading all of the records until the end without
# specifying exactly what the end is.
# Infinite ranges cannot be split because it is impossible to estimate
# progress within them.
OFFSET_INFINITY = float('inf')
def __init__(self, start, end):
super(OffsetRangeTracker, self).__init__()
if start is None:
raise ValueError('Start offset must not be \'None\'')
if end is None:
raise ValueError('End offset must not be \'None\'')
assert isinstance(start, integer_types)
if end != self.OFFSET_INFINITY:
assert isinstance(end, integer_types)
assert start <= end
self._start_offset = start
self._stop_offset = end
self._last_record_start = -1
self._offset_of_last_split_point = -1
self._lock = threading.Lock()
self._split_points_seen = 0
self._split_points_unclaimed_callback = None
def start_position(self):
return self._start_offset
def stop_position(self):
return self._stop_offset
@property
def last_record_start(self):
return self._last_record_start
def _validate_record_start(self, record_start, split_point):
# This function must only be called under the lock self.lock.
if not self._lock.locked():
raise ValueError(
'This function must only be called under the lock self.lock.')
if record_start < self._last_record_start:
raise ValueError(
'Trying to return a record [starting at %d] which is before the '
'last-returned record [starting at %d]' %
(record_start, self._last_record_start))
if split_point:
if (self._offset_of_last_split_point != -1 and
record_start == self._offset_of_last_split_point):
raise ValueError(
'Record at a split point has same offset as the previous split '
'point: %d' % record_start)
elif self._last_record_
|
start == -1:
raise ValueError(
'The first record [starting at %d] must be at a split point' %
record_start)
if (split_point and self._offset_of_last_split_point != -1 and
record_start == self._offset_of_las
|
t_split_point):
raise ValueError(
'Record at a split point has same offset as the previous split '
'point: %d' % record_start)
if not split_point and self._last_record_start == -1:
raise ValueError(
'The first record [starting at %d] must be at a split point' %
record_start)
def try_claim(self, record_start):
with self._lock:
self._validate_record_start(record_start, True)
if record_start >= self.stop_position():
return False
self._offset_of_last_split_point = record_start
self._last_record_start = record_start
self._split_points_seen += 1
return True
def set_current_position(self, record_start):
with self._lock:
self._validate_record_start(record_start, False)
self._last_record_start = record_start
def try_split(self, split_offset):
assert isinstance(split_offset, integer_types)
with self._lock:
if self._stop_offset == OffsetRangeTracker.OFFSET_INFINITY:
logging.debug('refusing to split %r at %d: stop position unspecified',
self, split_offset)
return
if self._last_record_start == -1:
logging.debug('Refusing to split %r at %d: unstarted', self,
split_offset)
return
if split_offset <= self._last_record_start:
logging.debug(
'Refusing to split %r at %d: already past proposed stop offset',
self, split_offset)
return
if (split_offset < self.start_position()
or split_offset >= self.stop_position()):
logging.debug(
'Refusing to split %r at %d: proposed split position out of range',
self, split_offset)
return
logging.debug('Agreeing to split %r at %d', self, split_offset)
split_fraction = (float(split_offset - self._start_offset) / (
self._stop_offset - self._start_offset))
self._stop_offset = split_offset
return self._stop_offset, split_fraction
def fraction_consumed(self):
with self._lock:
fraction = ((1.0 * (self._last_record_start - self.start_position()) /
(self.stop_position() - self.start_position())) if
self.stop_position() != self.start_position() else 0.0)
# self.last_record_start may become larger than self.end_offset when
# reading the records since any record that starts before the first 'split
# point' at or after the defined 'stop offset' is considered to be within
# the range of the OffsetRangeTracker. Hence fraction could be > 1.
# self.last_record_start is initialized to -1, hence fraction may be < 0.
# Bounding the to range [0, 1].
return max(0.0, min(1.0, fraction))
def position_at_fraction(self, fraction):
if self.stop_position() == OffsetRangeTracker.OFFSET_INFINITY:
raise Exception(
'get_position_for_fraction_consumed is not applicable for an '
'unbounded range')
return int(math.ceil(self.start_position() + fraction * (
self.stop_position() - self.start_position())))
def split_points(self):
with self._lock:
split_points_consumed = (
0 if self._split_points_seen == 0 else self._split_points_seen - 1)
split_points_unclaimed = (
self._split_points_unclaimed_callback(self.stop_position())
if self._split_points_unclaimed_callback
else iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
split_points_remaining = (
iobase.RangeTracker.SPLIT_POINTS_UNKNOWN if
split_points_unclaimed == iobase.RangeTracker.SPLIT_POINTS_UNKNOWN
else (split_points_unclaimed + 1))
return (split_points_consumed, split_points_remaining)
def set_split_points_unclaimed_callback(self, callback):
self._split_points_unclaimed_callback = callback
class OrderedPositionRangeTracker(iobase.RangeTracker):
"""
An abstract base class for range trackers whose positions are comparable.
Subclasses only need to implement the mapping from position ranges
to and from the closed interval [0, 1].
"""
UNSTARTED = object()
def __init__(self, start_position=None, stop_position=None):
self._start_position = start_position
self._stop_position = stop_position
self._lock = threading.Lock()
self._last_claim = self.UNSTARTED
def start_position(self):
return self._start_position
def stop_position(self):
with self._lock:
return self._stop_position
def try_claim(self, position):
with self._lock:
if self._last_claim is not self.UNSTARTED and position < self._last_claim:
raise ValueError(
"Positions must be claimed
|
wagtail/wagtail
|
wagtail/admin/views/pages/utils.py
|
Python
|
bsd-3-clause
| 339 | 0 |
from django.utils.http import url_has_allowed_host_and_scheme
def get_valid_next_url_from_request(request):
next_url = request.POST.
|
get("next") or request.GET.get("next")
if not next_url or not url_has_allowed_host_and_scheme(
url=next_url, allowed_hosts=
|
{request.get_host()}
):
return ""
return next_url
|
xiaonanln/myleetcode-python
|
src/683. K Empty Slots.py
|
Python
|
apache-2.0
| 479 | 0.043841 |
from bisect import bisect_
|
left
class Solution(object):
def kEmptySlots(self, flowers, k):
"""
:type flowers: List[int]
:type k: int
:rtype: int
"""
S = []
for ithday, n in enumerate(flowers):
idx = bisect_left(S, n)
if idx > 0 and n - S[idx-1] == k+1:
return ithday + 1
elif idx < len(S) and S[idx] - n == k+1:
return ithday + 1
S.insert(idx, n)
return -1
print Solution().kEmptySlots([1,3,2], 1)
print Solution().kEmptySlots([1,2,3
|
], 1)
|
badbytes/pymeg
|
meg/grid.py
|
Python
|
gpl-3.0
| 2,412 | 0.01534 |
# grid.py
#
# Copyright 2009 danc <quaninux@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import numpy as np
from meg import euclid
def cube(location, gridsize, spacing):
'''make 3d grid with given location of center, gridsize, and spacing
g = grid.doit(array([1,1,1]),2,1)
makes a grid (g) centered around location 1,1,1 of size 3, with a spacing of 1'''
gridtmp = np.ones([gridsize,gridsize,gridsize])
grid = spacing * np.squeeze(np.array([np.where(gridtmp)]))
z = np.tile(location,[np.size(grid,1),1])
gridind = (grid + z.T)
newgrid = gridind - np.array([np.mean(gridind,axis=1)]).T + gridind
gridfinal = newgrid.T - (gridind.T-location)
return gridfinal.T#,gridind#, gridtmp,np.array([np.mean(gridtmp,axis=1)]).T,test#,z
def sphere(location, gridsize, spacing):#, radius):
'''make 3d sphere grid with given location of center, gridsize, spacing, and radius
g = grid.sphere(array([1,1,1]),12,.5)
makes a grid (g) centered around location 1,1,1 of size 12, with a spacing of 1'''
radius = (gridsize*spacing
|
)/2.
cgrid = cube(location, gridsize, spacing)
print cgrid.shape, location
e = np.zeros(np.size(cgrid,1))
g = np.copy(e)
for i in range(0,np.size(cgrid,1)):
#e[:,i] = euclid.dist(location[0],cgrid[0][i],location[1],cgrid[1][i],location[2],cgrid[2][i])
e[i] = euclid.dist(location,cgrid[:,i])
#e = e*10
print 'diam
|
eter', e.max(), 'mm'
sgrid = cgrid[:,e < radius].reshape([3,np.size(cgrid[:,e < radius])/3])
#cgrid[e > radius].reshape([3,np.size(cgrid[e > radius])/3]) == 0
return sgrid#,cgrid
|
rob-nn/python
|
first_book/maitre_d.py
|
Python
|
gpl-2.0
| 377 | 0.007958 |
# Maitre D'
# Demonstrates treating a value as a condition
print("Welcome to the Chateau D' Food")
print("It seems we are quite full this evening.\n")
money = int(input("How many dollars do you slip the Maitre D'"))
if money:
print("Ah, I am reminded of a table. R
|
ight this way.")
else:
print("Please, sit. It may be a while.")
input("\n\nPr
|
ess the enter key to exit.")
|
caskdata/cdap
|
cdap-docs/_common/tabbed-parsed-literal.py
|
Python
|
apache-2.0
| 30,649 | 0.00757 |
# -*- coding: utf-8 -*-
# Copyright © 2016 Cask Data, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Simple, inelegant Sphinx extension which adds a directive for a
tabbed parsed-literals that may be switched between in HTML.
version: 0.4
The directive adds these parameters, both optional:
:languages: comma-separated list of pygments languages; default "console"
:tabs: comma-separated list of tabs; default "Linux,Windows"
:mapping: comma-separated list of linked-tabs; default "Linux,Windows"
:copyable: flag to indicate that all text can be "copied"
:single: flag to indicate that only one tab should be used, with no label (not yet implemented)
:independent: flag to indicate that this tab set does not link to another tabs
:dependent: name of tab set this tab belongs to; default "linux-windows"
Separate the code blocks with matching comment lines. Tabs must follow in order of :tabs:
option. Comment labels are for convenience, and don't need to match. Note example uses a
tab label with a space in it, and is enclosed in quotes. Note that the comma-separated
lists must not have spaces in them (outside of quotes); ie, use "java,scala", not
"java, scala".
The mapping maps a tab that is displayed to the trigger that will display it.
For example, you could have a set of tabs:
:tabs: "Mac OS X",Windows
:mapping: linux,windows
:dependent: linux-windows
Clicking on a "Linux" tab in another tab-set would activate the "Mac OS X" tab in this tab set.
The mappings can not use special characters. If a tab uses a special character, a mapping is required.
An error is raised, as it cannot be resolved using the defaults.
Note that slightly different rule operate for replacements: a replacement such as
"\|replace|" will work, and the backslash will be interpreted as a single backslash rather
than as escaping the "|".
If there is only one tab, the node is set to "independent" automatically, as there is
nothing to switch. If :languages: is not supplied for the single tab, "shell-session" is
used.
Lines that begin with "$", "#", ">", ">", "cdap >", "cdap >" are treated as command
lines and the text following is auto-selected for copying on mouse-over. (On Safari,
command-V is still required for copying; other browser support click-copying to the
clipboard.)
FIXME: Implement the ":single:" flag.
Examples:
.. tabbed-parsed-literal::
|
:langu
|
ages: console,shell-session
:tabs: "Linux or OS/X",Windows
.. Linux
$ cdap-cli.sh start flow HelloWorld.WhoFlow
Successfully started flow 'WhoFlow' of application 'HelloWorld' with stored runtime arguments '{}'
$ curl -o /etc/yum.repos.d/cask.repo http://repository.cask.co/centos/6/x86_64/cdap/|short-version|/cask.repo
.. Windows
> cdap-cli.bat start flow HelloWorld.WhoFlow
Successfully started flow 'WhoFlow' of application 'HelloWorld' with stored runtime arguments '{}'
> <CDAP-SDK-HOME>\libexec\bin\curl.exe -d c:\|release| -X POST 'http://repository.cask.co/centos/6/x86_64/cdap/|short-version|/cask.repo'
If you pass a single set of commands, without comments, the directive will create a
two-tabbed "Linux" and "Windows" with a generated Windows-equivalent command set. Check
the results carefully, and file an issue if it is unable to create the correct command.
Worst-case: you have to use the full format and enter the two commands. Note that any JSON
strings in the commands must be on a single line to convert successfully.
.. tabbed-parsed-literal::
$ cdap-cli.sh start flow HelloWorld.WhoFlow
Successfully started flow 'WhoFlow' of application 'HelloWorld' with stored runtime arguments '{}'
$ curl -o /etc/yum.repos.d/cask.repo http://repository.cask.co/centos/6/x86_64/cdap/|short-version|/cask.repo
.. tabbed-parsed-literal::
:copyable:
:single:
SELECT * FROM dataset_uniquevisitcount ORDER BY value DESC LIMIT 5
Tab sets are either independent or dependent. Independent tabs do not participate in page or site tab setting.
In other words, clicking on a tab does not change any other tabs. Dependent tabs do. Clicking on the "Linux"
tab will change all other tabs to "Linux". You may need to include a mapping listing the relationship, such as this:
.. tabbed-parsed-literal::
:tabs: Linux,Windows,"Distributed CDAP"
:mapping: Linux,Windows,Linux
:languages: console,shell-session,console
...
This maps the tab "Distributed CDAP" to the other "Linux" tabs on the site. Clicking that
tab would change other tabs to the "linux" tab. (Changing to "linux" from another tab will
cause the first "linux" tab to be selected.)
JavaScript and design of tabs was taken from the Apache Spark Project:
http://spark.apache.org/examples.html
"""
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.body import ParsedLiteral
from docutils.parsers.rst.roles import set_classes
DEFAULT_LANGUAGES = ['console', 'shell-session']
DEFAULT_TABS = ['linux', 'windows']
DEFAULT_TAB_LABELS = ['Linux', 'Windows']
DEFAULT_TAB_SET = 'linux-windows'
TPL_COUNTER = 0
# Sets the handlers for the tabs used by a particular instance of tabbed parsed literal
# Note doubled {{ to pass them through formatting
DEPENDENT_JS_TPL = """\
<script type="text/javascript">
$(function {div_name}() {{
var tabs = {tab_links};
var mapping = {mapping};
var tabSetID = {tabSetID};
for (var i = 0; i < tabs.length; i++) {{
var tab = tabs[i];
$("#{div_name} .example-tab-" + tab).click(changeExampleTab(tab, mapping, "{div_name}", tabSetID));
}}
}});
</script>
"""
# Note doubled {{ to pass them through formatting
INDEPENDENT_JS_TPL = """\
<script type="text/javascript">
function change_{div_name}_ExampleTab(tab) {{
return function(e) {{
e.preventDefault();
var scrollOffset = $(this).offset().top - $(document).scrollTop();
$("#{div_name} .tab-pane").removeClass("active");
$("#{div_name} .tab-pane-" + tab).addClass("active");
$("#{div_name} .example-tab").removeClass("active");
$("#{div_name} .example-tab-" + tab).addClass("active");
$(document).scrollTop($(this).offset().top - scrollOffset);
}}
}}
$(function() {{
var tabs = {tab_links};
for (var i = 0; i < tabs.length; i++) {{
var tab = tabs[i];
$("#{div_name} .example-tab-" + tab).click(change_{div_name}_ExampleTab(tab));
}}
}});
</script>
"""
DIV_START = """
<div id="{div_name}" class="{class}">
"""
NAV_TABS = """
<ul class="nav nav-tabs">
%s</ul>
"""
NAV_TABS_ENTRY = """\
<li class="example-tab example-tab-{tab_link} {active}"><a href="#">{tab_name}</a></li>
"""
TAB_CONTENT_START = """\
<div class="tab-contents">
"""
DIV_END = """
</div>
"""
TAB_CONTENT_ENTRY_START = """\
<div class="tab-pane tab-pane-{tab_link} {active}">
<div class="code code-tab">
"""
DIV_DIV_END = """
</div>
</div>
"""
def dequote(text):
"""
If text has single or double quotes around it, remove them.
Make sure the pair of quotes match.
If a matching pair of quotes is not found, return the text unchanged.
"""
if (text[0] == text[-1]) and text.startswith(("'", '"')):
return text[1:-1]
return text
def clean_alphanumeric(text):
"""
If text has any non-alphanumeric characters, replace them with a hyphen.
"""
text_clean = ''
for charc in text:
text_clean += charc if charc.isalnum() else '-'
return text_clean
def convert(c, state={}):
"""
Converts a Linux command to a Windows-equivalent following a few simple rules:
- Converts a starting '$' to '>'
-
|
TobiasLundby/UAST
|
Module6/build/simulation/catkin_generated/pkg.develspace.context.pc.py
|
Python
|
bsd-3-clause
| 377 | 0 |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INC
|
LUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "simulation"
PROJECT_SPACE_DIR = "/home/stagsted/UAST/Module6/devel"
PROJECT_VERS
|
ION = "0.0.0"
|
sdake/pacemaker-cloud
|
src/pcloudsh/openstack_deployable.py
|
Python
|
gpl-2.0
| 4,878 | 0.001845 |
#
# Copyright (C) 2011 Red Hat, Inc.
#
# Author: Angus Salkeld <asalkeld@redhat.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received
|
a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
import os
import time
import libxml2
import exceptions
import uuid
import subprocess
import shutil
from pwd import getpwnam
from nova import flags
from nova import log
from nova import exception
from nova
|
import utils
from nova.auth import manager
from pcloudsh import pcmkconfig
from pcloudsh import deployable
from pcloudsh import assembly
from pcloudsh import assembly_factory
FLAGS = flags.FLAGS
FLAGS.logging_context_format_string = ' %(levelname)s %(message)s'
FLAGS.logging_default_format_string = ' %(levelname)s %(message)s'
FLAGS.logging_debug_format_suffix = ' [%(filename)s:%(lineno)d]'
log.setup()
class OpenstackDeployable(deployable.Deployable):
def __init__(self, factory, name, username):
self.infrastructure = 'openstack'
self.username = username
deployable.Deployable.__init__(self, factory, name)
# TODO flagfile
FLAGS.state_path = '/var/lib/nova'
FLAGS.lock_path = '/var/lib/nova/tmp'
FLAGS.credentials_template = '/usr/share/nova/novarc.template'
FLAGS.sql_connection = 'mysql://nova:nova@localhost/nova'
self.conf.load_novarc(name)
def create(self):
nova_manager = manager.AuthManager()
uid = 0
gid = 0
try:
user_info = getpwnam(self.username)
uid = user_info[2]
gid = user_info[3]
except KeyError as ex:
print ex
return False
proj_exists = True
try:
projs = nova_manager.get_projects(self.username)
if not self.name in projs:
proj_exists = False
except:
proj_exists = False
try:
if not proj_exists:
nova_manager.create_project(self.name, self.username,
'Project %s created by pcloudsh' % (self.name))
except (exception.UserNotFound, exception.ProjectExists) as ex:
print ex
return False
os.mkdir(os.path.join(self.conf.dbdir, self.name))
zipfilename = os.path.join(self.conf.dbdir, self.name, 'nova.zip')
try:
zip_data = nova_manager.get_credentials(self.username, self.name)
with open(zipfilename, 'w') as f:
f.write(zip_data)
except (exception.UserNotFound, exception.ProjectNotFound) as ex:
print ex
return False
except db.api.NoMoreNetworks:
print ('*** No more networks available. If this is a new '
'installation, you need\nto call something like this:\n\n'
' nova-manage network create pvt 10.0.0.0/8 10 64\n\n')
return False
except exception.ProcessExecutionError, e:
print e
print ("*** The above error may show that the certificate db has "
"not been created.\nPlease create a database by running "
"a nova-api server on this host.")
return False
os.chmod(zipfilename, 0600)
os.chown(zipfilename, uid, gid)
novacreds = os.path.join(self.conf.dbdir, self.name, 'novacreds')
os.mkdir(novacreds)
os.system('unzip %s -d %s' % (zipfilename, novacreds))
os.system('ssh-keygen -f %s' % os.path.join(novacreds, 'nova_key'))
self.conf.load_novarc(self.name)
cwd = os.getcwd()
os.chdir(novacreds)
os.system('euca-add-keypair nova_key > nova_key.priv')
os.chdir(cwd)
for fn in os.listdir(novacreds):
if 'nova' in fn:
os.chown(os.path.join(novacreds, fn), uid, gid)
os.chmod(os.path.join(novacreds, fn), 0600)
return True
def delete(self):
nova_manager = manager.AuthManager()
if os.access(os.path.join(self.conf.dbdir, self.name), os.R_OK):
shutil.rmtree(os.path.join(self.conf.dbdir, self.name))
print ' deleted nova project key and environment'
try:
nova_manager.delete_project(self.name)
print ' deleted nova project'
except exception.ProjectNotFound as ex:
print ex
|
FireBladeNooT/Medusa_1_6
|
lib/fake_useragent/__init__.py
|
Python
|
gpl-3.0
| 36 | 0 |
from .fake
|
import UserAgent # noqa
| |
alihanniba/tornado-awesome
|
scrapy/taobaomm.py
|
Python
|
apache-2.0
| 3,677 | 0.004402 |
#_*_coding: utf-8 _*_
#__author__ = 'Alihanniba'
import urllib.request
# from urllib.request import urlopen
import urllib.error
import re
import os
import taobaotool
import time
class Spider:
def __init__(self):
self.siteUrl = 'http://mm.taobao.com/json/request_top_list.htm'
self.tool = taobaotool.Tool()
def getPage(self, pageIndex):
url = self.siteUrl + '?page=' + str(pageIndex)
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
content = response.read().decode('gbk')
return content
def getContents(self, pageIndex):
page = self.getPage(pageIndex)
pattern = re.compile('<div class="list-item".*?pic-word.*?<a href="(.*?)".*?<img src="(.*?)".*?<a class="lady-name.*?>(.*?)</a>.*?<strong>(.*?)</strong>.*?<span>(.*?)</span>',re.S)
items = re.findall(pattern, str(page))
contents = []
for item in items:
contents.append([item[0], item[1], item[2], item[3], item[4]])
print(item[0], item[1], item[2], item[3], item[4])
return contents
def getDetailPage(self, infoURL):
response = urllib.request.urlopen(infoURL)
return response.read().decode('gbk')
def getBrief(self, page):
pattern = re.compile('<div class="mm-aixiu-content".*?>(.*?)<!--',re.S)
result = re.search(pattern, str(page))
return self.tool.replace(result.group(1))
def getAllImg(self, page):
pattern = re.compile('<div class="mm-aixiu-content".*?>(.*?)<!--',re.S)
content = re.search(pattern, str(page))
patternImg = re.compile('<img.*?src="(.*?)"',re.S)
images = re.findall(patternImg, str(content.group(1)))
return images
def saveImgs(self, images, name):
number = 1
print(u'发现', name, u'共有', len(images), u'张图片')
for imageURL in images:
splitPage = imageURL.split('.')
fTail = splitPage.pop()
if len(fTail) > 3:
fTail = 'jpg'
fileName = name + '/' + str(number) + '.' + fTail
self.saveImg(imageURL, fileName)
number += 1
def saveImg(self, imgUrl, fileName):
u = urllib.request.urlopen(imgUrl)
data = u.read()
f = open(fileName, 'wb')
f.write(data)
print('正在保存图片为', fileName)
f.close()
def saveIcon(self, iconURL, name):
splitPath = iconURL.split('.')
fTail = splitPath.pop()
fileName = name + '/icon.' + fTail
self.saveImg(iconURL, fileName)
def saveBrief(self, content, name):
fileName = name + '/' + name + '.txt'
f = open(fileName, 'w+')
print(u"正在保存信息为", fileName)
f.write(content.encode('UTF-8'))
def mkdir(self, path):
path = path.strip()
isExists = os.exists(path)
if not isExists:
os.makedirs(path)
re
|
turn True
else:
return False
def savePageInfo(self, pageIndex):
contents = self.getContents(pageIndex)
for item in contents:
detailURL = item[0]
detailPage = self.getDetailPage(detailURL)
brief = self.getBrie
|
f(detailPage)
images = self.getAllImg(detailPage)
self.mkdir(item[2])
self.saveBrief(brief, item[2])
self.saveIcon(item[1], item[2])
self.saveImgs(images, item[2])
def savePagesInfo(self, start, end):
for i in range(start, end + 1):
self.savePageInfo(i)
spider = Spider()
spider.savePagesInfo(2, 10)
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_algorithms/itertools_repeat_zip.py
|
Python
|
apache-2.0
| 96 | 0.010417 |
from it
|
ertools import *
for i, s in zip(count(), repeat('over
|
-and-over', 5)):
print(i, s)
|
sbremer/hybrid_rs
|
evaluation/evaluation.py
|
Python
|
apache-2.0
| 5,761 | 0.002083 |
from typing import Dict, List
import numpy as np
import hybrid_model
from evaluation import evaluation_metrics
from evaluation import evaluation_parting
metrics_rmse = {'rmse': evaluation_metrics.Rmse()}
metrics_rmse_prec = {'rmse': evaluation_metrics.Rmse(),
'prec@5': evaluation_metrics.Precision(5)}
metrics_all = {'rmse': evaluation_metrics.Rmse(),
'mae': evaluation_metrics.Mae(),
'prec@5': evaluation_metrics.Precision(5),
'ndcg@5': evaluation_metrics.Ndcg(5)}
parting_full = {'full': evaluation_parting.Full()}
def get_parting_all(n_bins):
parting = {'full': evaluation_parting.Full()}
parting.update({'user_{}'.format(i+1):
evaluation_parting.BinningUser(n_bins, i) for i in range(n_bins)})
parting.update({'item_{}'.format(i+1):
evaluation_parting.
|
BinningItem(n_bins, i) for i in range(n_bins)})
return parting
class Evaluation:
def __init__(self,
metrics: Dict[str, evaluation_metrics.Metric] = metrics_rmse_prec,
parts: Dict[str, evaluation_parting.Parting] = parting_full):
self.metrics = metrics
self.parts = parts
def evaluate_hybrid(self, model: 'hybrid_model.hybrid.HybridModel', x_test: List[np.ndarray], y_test: np.ndarray) \
-> 'EvaluationR
|
esultHybrid':
result = EvaluationResultHybrid()
result.cf = self.evaluate(model.model_cf, x_test, y_test)
result.md = self.evaluate(model.model_md, x_test, y_test)
return result
def evaluate(self, model: 'hybrid_model.models.AbstractModel', x_test: List[np.ndarray], y_test: np.ndarray) \
-> 'EvaluationResult':
result = EvaluationResult()
for part, parting in self.parts.items():
x_test_part, y_test_part = parting.part(x_test, y_test)
result_part = self.evaluate_part(model, x_test_part, y_test_part)
result.parts[part] = result_part
return result
def evaluate_part(self, model: 'hybrid_model.models.AbstractModel', x_test: List[np.ndarray], y_test: np.ndarray) \
-> 'EvaluationResultPart':
result = EvaluationResultPart()
y_pred = model.predict(x_test)
for measure, metric in self.metrics.items():
result.results[measure] = metric.calculate(y_test, y_pred, x_test)
return result
def get_results_class(self):
return EvaluationResults(self.metrics, self.parts)
def get_results_hybrid_class(self):
return EvaluationResultsHybrid(self.metrics, self.parts)
def update_parts(self, user_dist, item_dist):
for part in self.parts.keys():
self.parts[part].update(user_dist, item_dist)
# === Single Evaluation Results
class EvaluationResultHybrid:
def __init__(self):
self.cf = EvaluationResult()
self.md = EvaluationResult()
def __str__(self):
s = 'CF:\n'
s += str(self.cf)
s += 'MD:\n'
s += str(self.md)
return s
class EvaluationResult:
def __init__(self):
self.parts: Dict[str, EvaluationResultPart] = {}
def __str__(self):
s = ''
for part, result in self.parts.items():
s += '=== Part {}\n'.format(part)
s += str(result)
s += '\n'
return s
def rmse(self):
return self.parts['full'].results['rmse']
class EvaluationResultPart:
def __init__(self):
self.results: Dict[str, float] = {}
def __str__(self):
s = ''
for metric, result in self.results.items():
s += '{}: {:.4f} '.format(metric, result)
return s
# === Multiple Evaluation Results (from Folds)
class EvaluationResultsHybrid:
def __init__(self, metrics: List[str] = metrics_rmse.keys(), parts: List[str] = parting_full.keys()):
self.cf = EvaluationResults(metrics, parts)
self.md = EvaluationResults(metrics, parts)
def add(self, result: EvaluationResultHybrid):
self.cf.add(result.cf)
self.md.add(result.md)
def __str__(self):
s = 'CF:\n'
s += str(self.cf)
s += 'MD:\n'
s += str(self.md)
return s
def mean_rmse_cf(self):
rmse = self.cf.rmse()
return rmse
def mean_rmse_md(self):
"""
Custom hacky function for Gridsearch
"""
rmse = self.md.rmse()
return rmse
class EvaluationResults:
def __init__(self, metrics: List[str] = metrics_rmse.keys(), parts: List[str] = parting_full.keys()):
self.parts: Dict[str, EvaluationResultsPart] = dict((key, EvaluationResultsPart(metrics)) for key in parts)
def add(self, result: EvaluationResult):
for part in self.parts.keys():
self.parts[part].add(result.parts[part])
def __str__(self):
s = ''
for part, result in self.parts.items():
s += '=== Part {}\n'.format(part)
s += str(result)
s += '\n'
return s
def rmse(self):
return self.parts['full'].mean('rmse')
class EvaluationResultsPart:
def __init__(self, metrics):
self.results: Dict[str, List[float]] = dict((key, []) for key in metrics)
def __str__(self):
s = ''
for metric, result in self.results.items():
mean = np.mean(result)
std = np.std(result)
s += '{}: {:.4f} ± {:.4f} '.format(metric, mean, std)
return s
def add(self, result: EvaluationResultPart):
for metric in self.results.keys():
self.results[metric].append(result.results[metric])
def mean(self, metric):
return np.mean(self.results[metric])
|
andrei14vl/cubrid
|
contrib/python/samples/sample_CUBRIDdb.py
|
Python
|
gpl-3.0
| 872 | 0.006912 |
# -*- encoding:utf-8 -*-
# sample_CUBRIDdb.py
import CUBRIDdb
con = CUBRIDdb.connect('CUBRID:localhost:33000:demodb:::', 'public')
cur = con.cursor()
cur.execute('DROP TABLE IF EXISTS test_cubrid')
cur.execute('CREATE TABLE test_cubrid (id NUMERIC AUTO_INCREMENT(2009122350, 1), name VARCHAR(50))')
cur.execute("insert into test_cubrid (name) values ('Zhang San'), ('Li Si'), ('Wang Wu'), ('Ma Liu'), ('Niu Qi')")
cur.execute("insert
|
into test_cubrid (name) values (?), (?)", ['中文zh-cn', 'John'])
cur.execute("insert into test_cubrid
|
(name) values (?)", ['Tom',])
cur.execute('select * from test_cubrid')
# fetch result use fetchone()
row = cur.fetchone()
print(row)
print('')
# fetch result use fetchmany()
rows = cur.fetchmany(2)
for row in rows:
print(row)
print("")
rows = cur.fetchall()
for row in rows:
print(row)
cur.close()
con.close()
|
ccherrett/oom
|
share/pybridge/examples/addtrack.py
|
Python
|
gpl-2.0
| 930 | 0.025806 |
"""
//=========================================================
// OOMidi
// OpenOctave Midi and
|
Audio Editor
// (C) Copyright 2009 Mathias Gyllengahm (lunar_shuttle@users.sf.net)
//=========================================================
"""
import Pyro.core
import time
oom=Pyro.core.getProxyForURI('PYRONAME://:Default.oom')
for j in range(0,5):
for i in range(0,30):
oom.addMidiTrack("amiditrack" + str(i))
for i in range(0,30):
oom.deleteTrack("amiditrack" + str(i))
for i in range(0, 10
|
):
print i
oom.addMidiTrack("amiditrack")
oom.addWaveTrack("awavetrack")
oom.addOutput("anoutput")
oom.addInput("aninput")
oom.setMute("aninput", False)
oom.setAudioTrackVolume("aninput",1.0)
oom.deleteTrack("amiditrack")
oom.deleteTrack("awavetrack")
oom.deleteTrack("anoutput")
oom.deleteTrack("aninput")
time.sleep(1)
|
jj1bdx/bdxlog
|
scripts/logsc.py
|
Python
|
mit
| 1,491 | 0.022133 |
#!/usr/local/bin/python
#$Id: logsc.py,v 1.7 2013/11/15 15:07:06 kenji Exp $
from sqlite3 import dbapi2 as sqlite
import sys
# change integer to string if found
def int2str(p):
if type(p) == int:
return str(p)
else:
return p
if __name__ == '__main__':
con = sqlite.connect("/home/kenji/txt/hamradio/LOGS/SQLite-log/hamradio_log.sqlite")
# enable extension loading
con.enable_load_extension(True)
# load regexp extension
con.load_extension("/home/kenji/txt/hamradio/LOGS/scripts/sqlite3-pcre/pcre.so")
# disable extension loading after loading necessary extensions
con.enable_load_extension(False)
cur = con.cursor()
for arg in sys.argv[1:]:
t = (arg,)
# use "(?i)" (case insensitive) internal option prefix for PCRE
cur.execute("""
select `qso_date`, `time_on`, `my_call`, `call`, `band`, `mode`,
`rst_sent`, `qsl_sent`, `qsl_via`, `comment`, `my_qso_id` from qso
where `call` regexp \'(?i)\' || ? and `qsl_rcvd` <> \'I\'
order by `qso_date` || `time_on`
""", t)
for row in cur.fetchall():
print "-----------"
print "qso_date: ", row[0]
print "time_on: ", row[
|
1]
print "my_call: ", row[2]
print "call: ", row[3]
print "band: ", row[4
|
]
print "mode: ", row[5]
print "rst_sent: ", row[6]
print "qsl_sent: ", row[7]
print "qsl_via: ", row[8]
print "comment: ", row[9]
print "my_qso_id: ", row[10]
cur.close()
|
Metaswitch/calico-nova
|
nova/tests/unit/api/openstack/compute/contrib/test_extended_evacuate_find_host.py
|
Python
|
apache-2.0
| 4,623 | 0.000649 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo.serialization import jsonutils
import webob
from nova.compute import vm_states
from nova import context
from nova.objects import instance as instance_obj
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
class ExtendedEvacuateFindHostTest(test.NoDBTestCase):
def setUp(self):
super(ExtendedEvacuateFindHostTest, self).setUp()
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Extended_evacuate_find_host',
'Evacuate'])
self.UUID = uuid.uuid4()
def _get_admin_context(self, user_id='fake', project_id='fake'):
ctxt = context.get_admin_context()
ctxt.user_id = user_id
ctxt.project_id = project_id
return ctxt
def _fake_compute_api(*args, **kwargs):
return True
def _fake_compute_api_get(self, context, instance_id, **kwargs):
instance = fake_instance.fake_db_instance(id=1, uuid=uuid,
task_state=None,
host='host1',
vm_state=vm_states.ACTIVE)
instance = instance_obj.Instance._from_db_object(context,
instance_obj.Instance(),
instance)
return instance
def _fake_service_get_by_compute_host(self, context, host):
return {'host_name': host,
'service': 'compute',
'zone': 'nova'
}
@mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
@mock.patch('nova.compute.api.API.get')
@mock.patch('nova.compute.api.API.evacuate')
def test_evacuate_instance_with_no_target(self, evacuate_mock,
api_get_mock,
service_get_mock):
service_get_mock.side_effects = self._fake_service_get_by_compute_host
api_get_mock.side_effects = self._fake_compute_api_get
evacuate_mock.side_effects = self._fake_compute_api
ctxt = self._get_admin_context()
app = fakes.wsgi_app(fake_auth_context=ctxt)
req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(200, res.status_int)
evacuate_mock.assert_called_once_with(mock.ANY, mock.ANY, None,
mock.ANY, mock.ANY)
@mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
@mock.patch('nova.compute.api.API.get')
def test_no_target_fails_if_extension_not_loaded(self, api_get_mock,
service_get_mock):
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Evacuate'])
service_get_mock.side_effects = self._fake_service_get_by_compute_host
api_get_mock.side_effects = self._fake_compute_api_get
ctxt = self._get_admin_context()
app = fake
|
s.wsgi_app(fake_auth_context=ctxt)
req = webob.Request.blank('/v2/fake/s
|
ervers/%s/action' % self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(400, res.status_int)
|
idle-code/ContextShell
|
tests/functional/ShellTestsBase.py
|
Python
|
mit
| 1,673 | 0 |
import unittest
from abc import ABC, abstractmethod
from contextshell.action impo
|
rt ActionExecutor, Executor
from contextshell.backends.node import NodeTreeRoot
from contextshell.backends.virtual import VirtualTree
from contextshell.command import CommandInterpreter
from contextshell.path import NodePath
from contextshell.shell import Shell
class ShellScriptTestsBase(unit
|
test.TestCase, ABC):
@abstractmethod
def create_shell(self) -> Shell:
raise NotImplementedError()
class TreeRootTestsBase(ShellScriptTestsBase):
@abstractmethod
def create_tree_root(self):
raise NotImplementedError()
def create_shell(self):
self.tree_root = self.create_tree_root()
self.configure_tree_root(self.tree_root)
interpreter = CommandInterpreter(self.tree_root)
shell = Shell(interpreter)
return shell
def configure_tree_root(self, tree_root):
pass
# TODO: is this class needed when testing single TreeRoot-based class?
class VirtualTreeTestsBase(TreeRootTestsBase):
def create_tree_root(self):
return VirtualTree()
def configure_tree_root(self, tree_root):
self.configure_virtual_tree(tree_root)
@abstractmethod
def configure_virtual_tree(self, virtual_tree: VirtualTree):
raise NotImplementedError()
class NodeTreeTestsBase(VirtualTreeTestsBase): # TODO: move to NodeTree tests
def configure_virtual_tree(self, virtual_tree: VirtualTree):
tree_root = NodeTreeRoot()
self.configure_node_tree(tree_root)
virtual_tree.mount(NodePath("."), tree_root)
def configure_node_tree(self, tree: NodeTreeRoot):
pass
|
rohitranjan1991/home-assistant
|
tests/components/mqtt/test_climate.py
|
Python
|
mit
| 64,945 | 0.000939 |
"""The tests for the mqtt climate component."""
import copy
import json
from unittest.mock import call, patch
import pytest
import voluptuous as vol
from homeassistant.components import climate
from homeassistant.components.climate import DEFAULT_MAX_TEMP, DEFAULT_MIN_TEMP
from homeassistant.components.climate.const import (
ATTR_AUX_HEAT,
ATTR_CURRENT_TEMPERATURE,
ATTR_FAN_MODE,
ATTR_HVAC_ACTION,
ATTR_PRESET_MODE,
ATTR_SWING_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_ACTIONS,
DOMAIN as CLIMATE_DOMAIN,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
PRESET_AWAY,
PRESET_ECO,
PRESET_NONE,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.components.mqtt.climate import MQTT_CLIMATE_ATTRIBUTES_BLOCKED
from homeassistant.const import ATTR_TEMPERATURE, STATE_OFF
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_encoding_subscribable_topics,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_publishing_with_custom_encoding,
help_test_reloadable,
help_test_reloadable_late,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_setting_blocked_attribute_via_mqtt_json_message,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message
from tests.components.climate import common
ENTITY_CLIMATE = "climate.test"
DEFAULT_CONFIG = {
CLIMATE_DOMAIN: {
"platform": "mqtt",
"name": "test",
"mode_command_topic": "mode-topic",
"temperature_command_topic": "temperature-topic",
"temperature_low_command_topic": "temperature-low-topic",
"temperature_high_command_topic": "temperature-high-topic",
"fan_mode_command_topic": "fan-mode-topic",
"swing_mode_command_topic": "swing-mode-topic",
"aux_command_topic": "aux-topic",
"preset_mode_command_topic": "preset-mode-topic",
"preset_modes": [
"eco",
"away",
"boost",
"comfort",
"home",
"sleep",
"activity",
],
}
}
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
DEFAULT_LEGACY_CONFIG = {
CLIMATE_DOMAIN: {
"platform": "mqtt",
"name": "test",
"mode_command_topic": "mode-topic",
"temperature_command_topic": "temperature-topic",
"temperature_low_command_topic": "temperature-low-topic",
"temperature_high_command_topic": "temperature-high-topic",
"fan_mode_command_topic": "fan-mode-topic",
"swing_mode_command_topic": "swing-mode-topic",
"aux_command_topic": "aux-topic",
"away_mode_command_topic": "away-mode-topic",
"hold_command_topic": "hold-topic",
}
}
async def test_setup_params(hass, mqtt_mock):
"""Test the initial parameters."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("temperature") == 21
assert state.attributes.get("fan_mode") == "low"
assert state.attributes.get("swing_mode") == "off"
assert state.state == "off"
assert state.attributes.get("min_temp") == DEFAULT_MIN_TEMP
assert state.attributes.get("max_temp") == DEFAULT_MAX_TEMP
async def test_preset_none_in_preset_modes(hass, mqtt_mock, caplog):
"""Test the preset mode payload reset configuration."""
config = copy.deepcopy(DEFAULT_CONFIG[CLIMATE_DOMAIN])
config["preset_modes"].append("none")
assert await async_setup_component(hass, CLIMATE_DOMAIN, {CLIMATE_DOMAIN: config})
await hass.async_block_till_done()
assert "Invalid config for [climate.mqtt]: not a valid value" in caplog.text
state = hass.states.get(ENTITY_CLIMATE)
assert state is None
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
@pytest.mark.p
|
arametrize(
"parameter,config_value",
[
|
("away_mode_command_topic", "away-mode-command-topic"),
("away_mode_state_topic", "away-mode-state-topic"),
("away_mode_state_template", "{{ value_json }}"),
("hold_mode_command_topic", "hold-mode-command-topic"),
("hold_mode_command_template", "hold-mode-command-template"),
("hold_mode_state_topic", "hold-mode-state-topic"),
("hold_mode_state_template", "{{ value_json }}"),
],
)
async def test_preset_modes_deprecation_guard(
hass, mqtt_mock, caplog, parameter, config_value
):
"""Test the configuration for invalid legacy parameters."""
config = copy.deepcopy(DEFAULT_CONFIG[CLIMATE_DOMAIN])
config[parameter] = config_value
assert await async_setup_component(hass, CLIMATE_DOMAIN, {CLIMATE_DOMAIN: config})
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state is None
async def test_supported_features(hass, mqtt_mock):
"""Test the supported_features."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
support = (
SUPPORT_TARGET_TEMPERATURE
| SUPPORT_SWING_MODE
| SUPPORT_FAN_MODE
| SUPPORT_PRESET_MODE
| SUPPORT_AUX_HEAT
| SUPPORT_TARGET_TEMPERATURE_RANGE
)
assert state.attributes.get("supported_features") == support
async def test_get_hvac_modes(hass, mqtt_mock):
"""Test that the operation list returns the correct modes."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
modes = state.attributes.get("hvac_modes")
assert [
HVAC_MODE_AUTO,
STATE_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
] == modes
async def test_set_operation_bad_attr_and_state(hass, mqtt_mock, caplog):
"""Test setting operation mode without required attribute.
Also check the state.
"""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "off"
with pytest.raises(vol.Invalid) as excinfo:
await common.async_set_hvac_mode(hass, None, ENTITY_CLIMATE)
assert (
"value must be one of ['auto', 'cool', 'dry', 'fan_only', 'heat', 'heat_cool', 'off'] for dictionary value @ data['hvac_mode']"
) in str(excinfo.value)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "off"
async def test_set_operation(hass, mqtt_mock):
"""Test setting of new operation mode."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "off"
await common.async_set_hvac_mode(hass, "cool", ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "cool"
assert state.state == "cool"
mqtt_mock.async_publish.assert_
|
undeath/joinmarket-clientserver
|
test/test_full_coinjoin.py
|
Python
|
gpl-3.0
| 6,442 | 0.001242 |
#! /usr/bin/env python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import * # noqa: F401
'''Runs a full joinmarket pit (using `nirc` miniircd servers,
with `nirc` options specified as an option to pytest),in
bitcoin regtest mode with 3 maker bots and 1 taker bot,
and does 1 coinjoin. This is intended as an E2E sanity check
but certainly could be extended further.
'''
from common import make_wallets
import pytest
import sys
from jmclient import YieldGeneratorBasic, load_test_config, jm_single,\
sync_wallet, JMClientProtocolFactory, start_reactor, Taker, \
random_under_max_order_choose
from jmbase.support import get_log
from twisted.internet import reactor
from twisted.python.log import startLogging
log = get_log()
# Note that this parametrization is inherited (i.e. copied) from
# the previous 'ygrunner.py' script which is intended to be run
# manually to test out complex scenarios. Here, we only run one
# simple test with honest makers (and for simplicity malicious
# makers are not included in the code). Vars are left in in case
# we want to do more complex stuff in the automated tests later.
@pytest.mark.parametrize(
"num_ygs, wallet_structures, mean_amt, malicious, deterministic",
[
# 1sp 3yg, honest makers
(3, [[1, 3, 0, 0, 0]] * 4, 2, 0, False),
])
def test_cj(setup_full_coinjoin, num_ygs, wallet_structures, mean_amt,
malicious, deterministic):
"""Starts by setting up wallets for maker and taker bots; then,
instantiates a single taker with the final wallet.
The remaining wallets are used to set up YieldGenerators (basic form).
All the wallets are given coins according to the rules of make_wallets,
using the parameters for the values.
The final start_reactor call is the only one that actually starts the
reactor; the others only set up protocol instances.
Inline are custom callbacks for the Taker, and these are basically
copies of those in the `sendpayment.py` script for now, but they could
be customized later for testing.
The Taker's schedule is a single coinjoin, using basically random values,
again this could be easily edited or parametrized if we feel like it.
"""
# Set up some wallets, for the ygs and 1 sp.
wallets = make_wallets(num_ygs + 1,
wallet_structures=wallet_structures,
mean_amt=mean_amt)
#the sendpayment bot uses the last wallet in the list
wallet = wallets[num_ygs]['wallet']
sync_wallet(wallet, fast=True)
# grab a dest addr from the wallet
destaddr = wallet.get_external_addr(4)
coinjoin_amt = 20000000
schedule = [[1, coinjoin_amt, 2, destaddr,
0.0, False]]
""" The following two callback functions are as simple as possible
modifications of the same in scripts/sendpayment.py
"""
def filter_orders_callback(orders_fees, cjamount):
return True
def taker_finished(res, fromtx=False, waittime=0.0, txdetails=None):
def final_checks():
sync_wallet(wallet, fast=True)
newbal = wallet.get_balance_by_mixdepth()[4]
oldbal = wallet.get_balance_by_mixdepth()[1]
# These are our check that the coinjoin succeeded
assert newbal == coinjoin_amt
# TODO: parametrize these; cj fees = 38K (.001 x 20M x 2 makers)
# minus 1K tx fee contribution each; 600M is original balance
# in mixdep
|
th 1
assert oldbal +
|
newbal + (40000 - 2000) + taker.total_txfee == 600000000
if fromtx == "unconfirmed":
#If final entry, stop *here*, don't wait for confirmation
if taker.schedule_index + 1 == len(taker.schedule):
reactor.stop()
final_checks()
return
if fromtx:
# currently this test uses a schedule with only one entry
assert False, "taker_finished was called with fromtx=True"
reactor.stop()
return
else:
if not res:
assert False, "Did not complete successfully, shutting down"
# Note that this is required in both conditional branches,
# especially in testing, because it's possible to receive the
# confirmed callback before the unconfirmed.
reactor.stop()
final_checks()
# twisted logging is required for debugging:
startLogging(sys.stdout)
taker = Taker(wallet,
schedule,
order_chooser=random_under_max_order_choose,
max_cj_fee=(0.1, 200),
callbacks=(filter_orders_callback, None, taker_finished))
clientfactory = JMClientProtocolFactory(taker)
nodaemon = jm_single().config.getint("DAEMON", "no_daemon")
daemon = True if nodaemon == 1 else False
start_reactor(jm_single().config.get("DAEMON", "daemon_host"),
jm_single().config.getint("DAEMON", "daemon_port"),
clientfactory, daemon=daemon, rs=False)
txfee = 1000
cjfee_a = 4200
cjfee_r = '0.001'
ordertype = 'swreloffer'
minsize = 100000
ygclass = YieldGeneratorBasic
# As noted above, this is not currently used but can be in future:
if malicious or deterministic:
raise NotImplementedError
for i in range(num_ygs):
cfg = [txfee, cjfee_a, cjfee_r, ordertype, minsize]
sync_wallet(wallets[i]["wallet"], fast=True)
yg = ygclass(wallets[i]["wallet"], cfg)
if malicious:
yg.set_maliciousness(malicious, mtype="tx")
clientfactory = JMClientProtocolFactory(yg, proto_type="MAKER")
nodaemon = jm_single().config.getint("DAEMON", "no_daemon")
daemon = True if nodaemon == 1 else False
# As noted above, only the final start_reactor() call will
# actually start it!
rs = True if i == num_ygs - 1 else False
start_reactor(jm_single().config.get("DAEMON", "daemon_host"),
jm_single().config.getint("DAEMON", "daemon_port"),
clientfactory, daemon=daemon, rs=rs)
@pytest.fixture(scope="module")
def setup_full_coinjoin():
load_test_config()
jm_single().bc_interface.tick_forward_chain_interval = 10
jm_single().bc_interface.simulate_blocks()
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_ConstantTrend_Seasonal_MonthOfYear_LSTM.py
|
Python
|
bsd-3-clause
| 171 | 0.046784 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
|
testmod.build_model( ['Quantization'] , ['ConstantTrend'] , ['Seasonal_MonthOfYear'] , ['LSTM'] );
|
|
UAVCAN/pyuavcan
|
pyuavcan/transport/udp/_session/__init__.py
|
Python
|
mit
| 729 | 0.002743 |
# Copyright (c) 2019 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel@uavcan.org>
from ._input import UDPInputSession as UDPInputSession
from ._input import PromiscuousUDPInputSession as PromiscuousUDPInputSession
from ._input import SelectiveUDPInputSession as SelectiveUDPInputSession
from ._input import UDPInputSessionStatistics as UDPInputSessionStatistics
from ._input import PromiscuousUDPInputSessionStatistics as PromiscuousUDPInputSe
|
ssionStatistics
from ._input import SelectiveUDPInputSessionStatistics as SelectiveUDPInputSessionStatistics
from ._output im
|
port UDPOutputSession as UDPOutputSession
from ._output import UDPFeedback as UDPFeedback
|
boltnev/iktomi
|
iktomi/cli/lazy.py
|
Python
|
mit
| 1,023 | 0.001955 |
from iktomi.utils import cached_property
from .base import Cli
class LazyCli(Cli):
'''
Wrapper for creating lazy command digests.
Sometimes it is not needed to i
|
mport all of application parts to start
a particular command. LazyCli allows you to define all imports in a
function called only on the command::
@LazyCli
def db_command():
import admin
from admin.environment import db_maker
from models import initial
from iktomi.cli import sqla
return sqla.Sqla(db_m
|
aker, initial=initial.install)
# ...
def run(args=sys.argv):
manage(dict(db=db_command, ), args)
'''
def __init__(self, func):
self.get_digest = func
@cached_property
def digest(self):
return self.get_digest()
def description(self, *args, **kwargs):
return self.digest.description(*args, **kwargs)
def __call__(self, *args, **kwargs):
return self.digest(*args, **kwargs)
|
AltSchool/django-allauth
|
allauth/socialaccount/providers/disqus/tests.py
|
Python
|
mit
| 2,347 | 0 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib.auth.models import User
from django.test.utils import override_settings
from allauth.account import app_settings as account_settings
from allauth.account.models import EmailAddress
from allauth.socialaccount.models import SocialAccount
from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
from .provider import DisqusProvider
@override_settings(
SOCIALACCOUNT_AUTO_SIGNUP=True,
ACCOUNT_SIGNUP_FORM_CLASS=None,
ACCOUNT_EMAIL_VERIFICATION=account_settings
.EmailVerificationMethod.MANDATORY)
class DisqusTests(OAuth2TestsMixin, TestCase):
provider_id = DisqusProvider.id
def get_mocked_response(self,
name='Raymond Penners',
email="raymond.penners@example.com"):
return MockedResponse(200, """
{"response": {"name": "%s",
"avatar": {
"permalink": "https://lh5.googleusercontent.com/photo.jpg"
},
"email": "%s",
"profileUrl": "https://plus.google.com/108204268033311374519",
"id": "108204268033311374519" }}
""" % (name, email))
def test_account_connect(self):
email = "user@example.com"
user = User.objects.create(username='user',
is_active=True,
email=email)
user.set_password('test')
user.save()
EmailAddress.objects.create(user=user,
email=email,
primary=True,
verified=True)
self.client.login(username=user.usern
|
ame,
password='test')
self.login(self.get_mocked_response(), process='connect')
# Check if we connected...
self.assertTrue(SocialAccount.objects.filter(
user=user,
provider=DisqusProvider.id).exists())
# For now, we do not pick up any new e-mail addresses on connect
self.assertEqual(EmailAddress.objects.filter(user=user).count(), 1)
self.assertEqual(EmailAddress.objects.filter(
user=user,
|
email=email).count(), 1)
|
hsoft/pdfmasher
|
ebooks/epub/output.py
|
Python
|
gpl-3.0
| 3,361 | 0.005356 |
# Copyright 2009, Kovid Goyal <kovid@kovidgoyal.net>
# Copyright 2013 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPL v3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/gplv3_license
import os, logging
from uuid import uuid4
from ..ptempfile import TemporaryDirectory
from ..utils.zipfile import zip_add_dir
from ..oeb.transforms.filenames import FlatFilenames, UniqueFilenames
from ..oeb.transforms.split import Split
from ..oeb.transforms.cover import CoverManager
from ..oeb.base import XPath, OPF
from ..oeb.output import OEBOutput
from . import initialize_container
from lxml import etree
def upshift_markup(oeb):
'Upgrade markup to comply with XHTML 1.1 where possible'
for x in oeb.spine:
root = x.data
body = XPath('//h:body')(root)
if body:
body = body[0]
if not hasattr(body, 'xpath'):
continue
for u in XPath('//h:u')(root):
u.tag = 'span'
u.set('style', 'text-decoration:underline')
def convert(oeb, output_path, epub_flatten=False, dont_split_on_page_breaks=False,
flow_size=260, no_default_epub_cover=False, no_svg_cover=False,
preserve_cover_aspect_ratio=False, pretty_print=False):
if epub_flatten:
FlatFilenames()(oeb)
else:
UniqueFilenames()(oeb)
upshift_markup(oeb)
split = Split(not dont_split_on_page_breaks, max_flow_
|
size=flow_size*1024)
split(oeb)
cm = CoverManager(no_default_cover=no_default_epub_cover, no_svg_cover=no_svg_cover,
preserve_aspect_ratio=preserve_cover_aspect_ratio)
cm(oeb)
if oeb.toc.count() == 0:
logging.warn('This EPUB file has no Table of Contents. Creating a default TOC')
|
first = next(iter(oeb.spine))
oeb.toc.add('Start', first.href)
identifiers = oeb.metadata['identifier']
uuid = None
for x in identifiers:
if x.get(OPF('scheme'), None).lower() == 'uuid' or str(x).startswith('urn:uuid:'):
uuid = str(x).split(':')[-1]
break
if uuid is None:
logging.warn('No UUID identifier found')
uuid = str(uuid4())
oeb.metadata.add('identifier', uuid, scheme='uuid', id=uuid)
with TemporaryDirectory('_epub_output') as tdir:
metadata_xml = None
extra_entries = []
oeb_output = OEBOutput()
oeb_output.convert(oeb, tdir, None)
opf = [x for x in os.listdir(tdir) if x.endswith('.opf')][0]
if pretty_print:
condense_ncx([os.path.join(tdir, x) for x in os.listdir(tdir) if x.endswith('.ncx')][0])
with initialize_container(output_path, os.path.basename(opf),
extra_entries=extra_entries) as epub:
zip_add_dir(epub, tdir)
if metadata_xml is not None:
epub.writestr('META-INF/metadata.xml', metadata_xml.encode('utf-8'))
def condense_ncx(ncx_path):
tree = etree.parse(ncx_path)
for tag in tree.getroot().iter(tag=etree.Element):
if tag.text:
tag.text = tag.text.strip()
if tag.tail:
tag.tail = tag.tail.strip()
compressed = etree.tostring(tree.getroot(), encoding='utf-8')
open(ncx_path, 'wb').write(compressed)
|
karelklic/flashfit
|
task.py
|
Python
|
gpl-3.0
| 695 | 0.004317 |
from PyQt4 import QtCore, QtGui
class Task(QtCore.QThread):
messageAdded = QtCore.pyqtSignal(QtCore.QString)
def __ini
|
t__(self, mainWindow, parent = None):
super(Task, self).__init__(parent)
self.mainWindow = mainWindow
self.finished.connect(self.postRun)
self.terminated.connect(self.postTerminated)
def run(self):
"""
The code in this method is run in another thread.
"""
pass
def postRun(self):
"""
The code in this method is run in GUI thread.
"""
pass
|
def postTerminated(self):
"""
The code in this method is run in GUI thread.
"""
pass
|
genialis/django-rest-framework-reactive
|
src/rest_framework_reactive/api_urls.py
|
Python
|
apache-2.0
| 133 | 0.007519 |
from djang
|
o.urls import path
from .
|
import views
urlpatterns = [path('unsubscribe', views.QueryObserverUnsubscribeView.as_view())]
|
NoiSek/whisper
|
whisper/models/whisperkey.py
|
Python
|
gpl-2.0
| 5,369 | 0.017322 |
import nacl.encoding
import nacl.public
import nacl.utils
class WhisperKey():
def __init__(self, key=None):
if key is None:
self.generate_keypair()
else:
if isinstance(key, bytes) or isinstance(key, str):
try:
self._private_key = nacl.public.PrivateKey(key, encoder=nacl.encoding.Base64Encoder)
except Exception as e:
raise Exception("Error generating key from given str or bytes object: ", e)
elif isinstance(key, nacl.public.PrivateKey):
self._private_key = key
else:
raise Exception("Not a valid key.")
def generate_keypair(self):
self._private_key = nacl.public.PrivateKey.generate()
def get_private_key(self, stringify=False, as_image=False, image=None):
if stringify:
return (
self._private_key
.encode(encoder=nacl.encoding.Base64Encoder)
.decode("utf-8")
)
elif as_image:
# Accessible afterwards by parsing all characters after 2321.
# If the image changes, the ability to parse it changes as well.
# Wise to include an identifier in the future for the use of custom images.
file_contents = None
if image:
file_contents = image.read()
else:
with open("whisper/static/img/key_small.png", "br") as f:
file_contents = f.read()
private_ke
|
y = self._private_key.encode(encoder=nacl.encoding.Base64Encoder)
file_contents += private_key
return file_contents
else:
return self._private_key
def get_public_key(self, stringify=False):
public_key = self._private_key.public_key
if stringify:
return (
public_key
.encode(encoder=nacl.encoding.Base64Encoder)
.decode("utf-8")
)
else:
return public_key
|
def encrypt_message(self, message, public_key, nonce=None):
# Verify that we can convert the public_key to an nacl.public.PublicKey instance
if isinstance(public_key, nacl.public.PublicKey):
pass
elif isinstance(public_key, str) or isinstance(public_key, bytes): # pragma: no cover
public_key = nacl.public.PublicKey(public_key, encoder=nacl.encoding.Base64Encoder)
elif isinstance(public_key, WhisperKey): # pragma: no cover
public_key = public_key.get_public_key()
else:
raise Exception("Invalid public key provided.")
# Make sure our message is a bytes object, or convert it to one.
if isinstance(message, bytes): # pragma: no cover
pass
elif isinstance(message, str):
message = bytes(message, "utf-8")
else: # pragma: no cover
raise Exception("Message is not bytes or str.")
box = nacl.public.Box(self._private_key, public_key)
nonce = nonce or nacl.utils.random(24)
# Message will be prepended with a 32 character nonce, which can be parsed out elsewhere.
encrypted_message = box.encrypt(message, nonce, encoder=nacl.encoding.Base64Encoder)
return encrypted_message.decode("utf-8")
def decrypt_message(self, message, public_key):
# Verify that we can convert the public_key to an nacl.public.PublicKey instance
if isinstance(public_key, nacl.public.PublicKey):
pass
elif isinstance(public_key, str) or isinstance(public_key, bytes): # pragma: no cover
public_key = nacl.public.PublicKey(public_key, encoder=nacl.encoding.Base64Encoder)
elif isinstance(public_key, WhisperKey): # pragma: no cover
public_key = public_key.get_public_key()
else:
raise Exception("Invalid public key provided.")
# Make sure our message is a bytes object, or convert it to one.
if isinstance(message, bytes): # pragma: no cover
pass
elif isinstance(message, str):
message = bytes(message, "utf-8")
else: # pragma: no cover
raise Exception("Message is not bytes or str.")
box = nacl.public.Box(self._private_key, public_key)
nonce = message[:32]
_message = message[32:]
encrypted_message = nacl.utils.EncryptedMessage(message)
decrypted = box.decrypt(encrypted_message, encoder=nacl.encoding.Base64Encoder)
return decrypted.decode("utf-8")
if __name__ == "__main__": # pragma: no cover
sender = WhisperKey()
receiver = WhisperKey()
nonce = bytes([x for x in range(24)])
out_message = sender.encrypt_message(
message="This is our test message, we'll see how it turns out in the end.",
public_key=receiver,
nonce=nonce
)
print("Our private key")
print("================================================")
print(sender.get_private_key(stringify=True))
print("\n")
print("Their public key")
print("================================================")
print(receiver.get_public_key(stringify=True))
print("\n")
print("Their private key")
print("================================================")
print(receiver.get_private_key(stringify=True))
print("\n")
print("Our public key")
print("================================================")
print(sender.get_public_key(stringify=True))
print("\n")
print("Final output message")
print("================================================")
print(out_message)
print("\n")
print("Decrypted")
print("================================================")
print(receiver.decrypt_message(message=out_message, public_key=sender.get_public_key()))
|
factorlibre/l10n-spain
|
l10n_es_pos/models/ir_sequence.py
|
Python
|
agpl-3.0
| 890 | 0 |
# Copyright
|
2018 David Vidal <david.vidal@tecnativa.com>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import _, api, models
from odoo.exceptions import UserError
class IrSequence(models.Model):
_inherit = 'ir.sequence'
@api.constrains('prefix', 'code')
def check_simplified_invoice_unique_prefix(self):
if self._context.get('copy_pos_config'):
return
for sequence in self.filtered(
|
lambda x: x.code == 'pos.config.simplified_invoice'):
if self.search_count([
('code', '=', 'pos.config.simplified_invoice'),
('prefix', '=', sequence.prefix)]) > 1:
raise UserError(_('There is already a simplified invoice '
'sequence with that prefix and it should be '
'unique.'))
|
mettadatalabs1/oncoscape-datapipeline
|
setup.py
|
Python
|
apache-2.0
| 191 | 0 |
# just listing list of requires
|
. will create a set
|
up using these
"""
airflow>=1.7.1,
numpy>=1.1,
requests>=2.1,
pymongo==3.4.0,
pytest>=3.0,
simplejson==3.10.0,
tox==2.6
PyYAML==3.12
"""
|
KaranToor/MA450
|
google-cloud-sdk/lib/surface/compute/regions/list.py
|
Python
|
apache-2.0
| 965 | 0.005181 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with t
|
he License.
# You may obtain
|
a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for listing regions."""
from googlecloudsdk.api_lib.compute import base_classes
class List(base_classes.GlobalLister):
"""List Google Compute Engine regions."""
@property
def service(self):
return self.compute.regions
@property
def resource_type(self):
return 'regions'
List.detailed_help = base_classes.GetGlobalListerHelp('regions')
|
cwmartin/rez
|
src/rez/cli/selftest.py
|
Python
|
lgpl-3.0
| 3,095 | 0.000323 |
'''
Run unit tests.
'''
import inspect
import os
import rez.vendor.argparse as argparse
from pkgutil import iter_modules
cli_dir = os.path.dirname(inspect.getfile(inspect.currentframe()))
src_rez_dir = os.path.dirname(cli_dir)
tests_dir = os.path.join(src_rez_dir, 'tests')
all_module_tests = []
def setup_parser(parser, completions=False):
parser.add_argument(
"tests", metavar="NAMED_TEST", default=[], nargs="*",
help="a specific test module/class/method to run; may be repeated "
"multiple times; if no tests are given, through this or other flags, "
"all tests are run")
parser.add_argument(
"-s", "--only-shell", metavar="SHELL",
help="limit shell-dependent tests to the specified shell")
# make an Action that will append the appropriate test to the "--test" arg
class AddTestModuleAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
name = option_string.lstrip('-')
if getattr(namespace, "module_tests", None) is None:
namespace.module_tests = []
namespace.module_tests.append(name)
# find unit tests
tests = []
prefix = "test_"
for importer, name, ispkg in iter_modules([tests_dir]):
if not ispkg and name.startswith(prefix)
|
:
module = importer.find_module(name).load_module(name)
name_ = name[len(prefix):]
all_module_tests.append(name_)
tests.append((name_, module))
# create argparse entry for each module's unit test
for name, module in sorted(tests):
parser.add_argument(
"--%s" % name,
|
action=AddTestModuleAction, nargs=0,
dest="module_tests", default=[],
help=module.__doc__.strip().rstrip('.'))
def command(opts, parser, extra_arg_groups=None):
import sys
from rez.vendor.unittest2.main import main
os.environ["__REZ_SELFTEST_RUNNING"] = "1"
if opts.only_shell:
os.environ["__REZ_SELFTEST_SHELL"] = opts.only_shell
if not opts.module_tests and not opts.tests:
module_tests = all_module_tests
else:
module_tests = opts.module_tests
module_tests = [("rez.tests.test_%s" % x) for x in sorted(module_tests)]
tests = module_tests + opts.tests
argv = [sys.argv[0]] + tests
main(module=None, argv=argv, verbosity=opts.verbose)
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
matyro/Cor-RC
|
Website/site_rawPacket.py
|
Python
|
mit
| 1,137 | 0.024626 |
from Website.site_base import BaseHandler
import tornado.web
import tornado
import SQL.table_simulation as SQLsim
class RawPacketHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
if self.current_user is None:
self.redirect('login.html?next=edit')
|
return
if self.current_user.permission < 9000:
self.redirect('/')
return
sim = self.database.query(SQLsim.Simulation).filter( SQLsim.Simulation.status==1 ).all()
print('Sim: ' + str(sim))
self.render('raw_packet.html', simulation=sim)
self.database.commit() #Important otherwise transactions are reused and cached data is used not (possible) new
|
def post(self):
print(str(self.request))
print('Message: ' + str( self.get_argument('message', '')))
print('Client: ' + str( self.get_argument('client', '')))
print('header: ' + str( self.get_argument('header', '')))
self.redirect('/raw')
|
NicovincX2/Python-3.5
|
Théorie des nombres/Nombre/Nombre aléatoire/random_points_on_a_circle.py
|
Python
|
gpl-3.0
| 691 | 0 |
# -*- coding: utf-8 -*-
import os
from collections import defaultdict
from random import choice
world = defaultdict(int)
possiblepoints = [(x, y) for x in range(-15, 16)
for y in range(-15, 16)
if 10 <= abs(x + y * 1j) <= 15]
for i in range(100):
world[choice(possiblepoints)] += 1
for x in range(-15, 16):
print(''.join(str(min([9, world[(x, y)]])) if world[(x, y)] else ' '
for y in range(-15, 16)))
for i in range(1000):
world[choice(possiblepoints)] += 1
for x in range(-15, 16):
|
print(''.join(str(min
|
([9, world[(x, y)]])) if world[(x, y)] else ' '
for y in range(-15, 16)))
os.system("pause")
|
achadwick/mypaint
|
gui/layerswindow.py
|
Python
|
gpl-2.0
| 14,745 | 0.001695 |
# This file is part of MyPaint.
# Copyright (C) 2014 by Andrew Chadwick <a.t.chadwick@gmail.com>
# Copyright (C) 2009 by Ilya Portnov <portnov@bk.ru>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Layers panel"""
## Imports
from __future__ import division, print_function
from gettext import gettext as _
import os.path
from logging import getLogger
logger = getLogger(__name__)
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import Pango
import lib.layer
import lib.xml
import widgets
from widgets import inline_toolbar
from workspace import SizedVBoxToolWidget
import layers
## Module constants
#: UI XML for the current layer's class (framework: ``layerswindow.xml``)
LAYER_CLASS_UI = [
(lib.layer.SurfaceBackedLayer, """
<popup name='LayersWindowPopup'>
<placeholder name="BasicLayerActions">
<menuitem action='CopyLayer'/>
</placeholder>
</popup>
"""),
(lib.layer.PaintingLayer, """
<popup name='LayersWindowPopup'>
<placeholder name="BasicLayerActions">
<menuitem action='PasteLayer'/>
<menuitem action='ClearLayer'/>
</placeholder>
<placeholder name='AdvancedLayerActions'>
<menuitem action='TrimLayer'/>
</placeholder>
</popup>
"""),
(lib.layer.ExternallyEditable, """
<popup name='LayersWindowPopup'>
<placeholder name='BasicLayerActions'>
<separator/>
<menuitem action='BeginExternalLayerEdit'/>
<menuitem action='CommitExternalLayerEdit'/>
<separator/>
</placeholder>
</popup>
"""),
]
## Class definitions
class LayersTool (SizedVBoxToolWidget):
"""Panel for arranging layers within a tree structure"""
## Class properties
tool_widget_icon_name = "mypaint-layers-symbolic"
tool_widget_title = _("Layers")
tool_widget_description = _("Arrange layers and assign effects")
LAYER_MODE_TOOLTIP_MARKUP_TEMPLATE = "<b>{name}</b>\n{description}"
#TRANSLATORS: tooltip for the opacity slider (text)
OPACITY_SCALE_TOOLTIP_TEXT_TEMPLATE = _("Layer opacity: %d%%")
__gtype_name__ = 'MyPaintLayersTool'
STATUSBAR_CONTEXT = 'layerstool-dnd'
#TRANSLATORS: status bar messages for drag, without/with modifiers
STATUSBAR_DRAG_MSG = _("Move layer in stack...")
STATUSBAR_DRAG_INTO_MSG = _("Move layer in stack (dropping into a "
"regular layer will create a new group)")
## Construction
def __init__(self):
GObject.GObject.__init__(self)
from application import get_app
app = get_app()
self.app = app
self.set_spacing(widgets.SPACING_CRAMPED)
self.set_border_width(widgets.SPACING_TIGHT)
# GtkTreeView init
docmodel = app.doc.model
view = layers.RootStackTreeView(docmodel)
self._treemodel = view.get_model()
self._treeview = view
# Motion and modifier keys during drag
view.current_layer_rename_requested += self._rename_current_layer_cb
view.current_layer_changed += self._blink_current_layer_cb
view.current_layer_menu_requested += self._popup_menu_cb
view.drag_began += self._view_drag_began_cb
view.drag_ended += self._view_drag_ended_cb
statusbar_cid = app.statusbar.get_context_id(self.STATUSBAR_CONTEXT)
self._drag_statusbar_context_id = statusbar_cid
# View scrolls
view_scroll = Gtk.ScrolledWindow()
view_scroll.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
scroll_pol = Gtk.PolicyType.AUTOMATIC
view_scroll.set_policy(scroll_pol, scroll_pol)
view_scroll.add(view)
view_scroll.set_size_request(-1, 100)
view_scroll.set_hexpand(True)
view_scroll.set_vexpand(True)
# Context menu
ui_dir = os.path.dirname(os.path.abspath(__file__))
ui_path = os.path.join(ui_dir, "layerswindow.xml")
self.app.ui_manager.add_ui_from_file(ui_path)
menu = self.app.ui_manager.get_widget("/LayersWindowPopup")
menu.set_title(_("Layer"))
self.connect("popup-menu", self._popup_menu_cb)
menu.attach_to_widget(self, None)
self._menu = menu
self._layer_specific_ui_mergeids = []
self._layer_specific_ui_class = None
# Main layout grid
grid = Gtk.Grid()
grid.set_row_spacing(widgets.SPACING_TIGHT)
grid.set_column_spacing(widgets.SPACING)
# Mode dropdown
row = 0
label = Gtk.Label
|
(label=_('Mode:'))
label.set_tooltip_text(
_("Blending mode: how the current layer combines with the "
"layers underneath it."))
label.set_alignment(0, 0.5)
label.set_hexpand(False)
grid.attach(label, 0, row, 1, 1)
store = Gtk.ListStore(int, str, bool)
|
modes = lib.layer.STACK_MODES + lib.layer.STANDARD_MODES
for mode in modes:
label, desc = lib.layer.MODE_STRINGS.get(mode)
store.append([mode, label, True])
combo = Gtk.ComboBox()
combo.set_model(store)
combo.set_hexpand(True)
cell = Gtk.CellRendererText()
combo.pack_start(cell, True)
combo.add_attribute(cell, "text", 1)
combo.add_attribute(cell, "sensitive", 2)
self._layer_mode_combo = combo
grid.attach(combo, 1, row, 5, 1)
# Opacity slider
row += 1
opacity_lbl = Gtk.Label(label=_('Opacity:'))
opacity_lbl.set_tooltip_text(
_("Layer opacity: how much of the current layer to use. "
"Smaller values make it more transparent."))
opacity_lbl.set_alignment(0, 0.5)
opacity_lbl.set_hexpand(False)
adj = Gtk.Adjustment(lower=0, upper=100,
step_incr=1, page_incr=10)
self._opacity_scale = Gtk.HScale.new(adj)
self._opacity_scale.set_draw_value(False)
self._opacity_scale.set_hexpand(True)
grid.attach(opacity_lbl, 0, row, 1, 1)
grid.attach(self._opacity_scale, 1, row, 5, 1)
# Layer list and controls
row += 1
layersbox = Gtk.VBox()
style = layersbox.get_style_context()
style.add_class(Gtk.STYLE_CLASS_LINKED)
style = view_scroll.get_style_context()
style.set_junction_sides(Gtk.JunctionSides.BOTTOM)
list_tools = inline_toolbar(
self.app,
[
("NewLayerGroupAbove", "mypaint-layer-group-new-symbolic"),
("NewPaintingLayerAbove", "mypaint-add-symbolic"),
("RemoveLayer", "mypaint-remove-symbolic"),
("RaiseLayerInStack", "mypaint-up-symbolic"),
("LowerLayerInStack", "mypaint-down-symbolic"),
("DuplicateLayer", None),
("MergeLayerDown", None),
]
)
style = list_tools.get_style_context()
style.set_junction_sides(Gtk.JunctionSides.TOP)
layersbox.pack_start(view_scroll, True, True, 0)
layersbox.pack_start(list_tools, False, False, 0)
layersbox.set_hexpand(True)
layersbox.set_vexpand(True)
grid.attach(layersbox, 0, row, 6, 1)
# Background layer controls
row += 1
show_bg_btn = Gtk.CheckButton()
change_bg_act = self.app.find_action("BackgroundWindow")
change_bg_btn = widgets.borderless_button(action=change_bg_act)
show_bg_act = self.app.find_action("ShowBackgroundToggle")
show_bg_btn.set_related_action(show_bg_act)
grid.attach(show_bg_btn, 0, row, 5, 1)
grid.attach(change_bg_btn, 5, row, 1, 1)
# Pack
self.pack_start(grid, False, True, 0)
# Updates from the real layers tree (TODO: move to lib/layer
|
daniel-bell/google-codejam-2014
|
qualification/a_magic_trick.py
|
Python
|
mit
| 802 | 0.032419 |
import sys
file_name = sys.argv[1]
with open(file_name, "r") as f:
num = int(f.readline())
for i in range(num):
first_row = int(f.readline()) - 1
first_board = list()
for x in range(4):
raw_line = f.readline()
line = [int(x) for x in raw_line.split(" ")]
first_board.append(line)
|
second_row = int(f.readline()) - 1
second_board = list()
for x in range(4):
raw_line = f.readline()
line = [int(x) for x in raw_line.split(" ")]
second_board.append(line)
common_values = [x for x in first_board[first_row] if x in second_board[second_row]];
if not common_
|
values:
case_string = "Volunteer cheated!"
elif len(common_values) > 1:
case_string = "Bad magician!"
else:
case_string = str(common_values[0])
print("Case #" + str(i + 1) + ": " + case_string)
|
daisychainme/daisychain
|
daisychain/channel_facebook/tests/test_base.py
|
Python
|
mit
| 2,078 | 0.001444 |
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.client import Client
from django.utils import timezone
from channel_facebook.
|
channel import FacebookChannel
from channel_facebook.models import FacebookAccount
from core import models
class FacebookBaseTestCase(TestCase):
fixtures = ['core/fixtures/initial_data.json','channel_facebook/fixtures/initial_data.json']
class MockResponse:
def __init__(self, json_data
|
, status_code, ok):
self.json_data = json_data
self.status_code = status_code
self.ok = ok
def json(self):
return self.json_data
def setUp(self):
self.time = timezone.now()
self.user = self.create_user()
self.facebook_account = self.create_facebook_account(self.user)
self.channel = FacebookChannel()
self.channel_name = models.Channel.objects.get(name="Facebook").name
self.channel_id = models.Channel.objects.get(name="Facebook").id
self.client = Client()
self.conditions = {'hashtag': '#me'}
self.fields = 'message,actions,full_picture,picture,from,created_time,link,permalink_url,type,description,source,object_id'
self.webhook_data = {
"time": self.time,
"id": "101915710270588",
"changed_fields": ["statuses"],
"uid": "101915710270588"
}
def create_user(self):
user = User.objects.create_user('Superuser',
'superuser@super.com',
'Password')
user.save()
return user
def create_facebook_account(self, user):
facebook_account = FacebookAccount(user=user,
username='101915710270588',
access_token='test token',
last_post_time=timezone.now()
)
facebook_account.save()
return facebook_account
|
naparuba/kunai
|
opsbro/module.py
|
Python
|
mit
| 3,141 | 0.013372 |
from .parameters import ParameterBasedType
from .log import LoggerFactory
from .packer import packer
from .misc.six import add_metaclass
TYPES_DESCRIPTIONS = {'generic' : 'Generic module', 'functions_export': 'Such modules give functions that are useful by evaluation rules',
'connector': 'Suchs modules will export data to external tools',
'listener' : 'Such module will listen to external queries',
'handler' : 'Such module will add new handlers'}
MODULE_STATE_COLORS = {'STARTED': 'green', 'DISABLED': 'grey', 'ERROR': 'red'}
MODULE_STATES = ['STARTED', 'DISABLED', 'ERROR']
class ModulesMetaClass(type):
__inheritors__ = set()
def __new__(meta, name, bases, dct):
klass = type.__new__(meta, name, bases, dct)
# This class need to implement a real role to be load
if klass.implement:
# When creating the class, we need to look at the module where it is. It will be create like this (in modulemanager)
# module___global___windows___collector_iis ==> level=global pack_name=windows, collector_name=collector_iis
from_module = dct['__module__']
elts = from_module.split('___')
# Let the klass know it
klass.pack_level = elts[1]
klass.pack_name = elts[2]
meta.__inheritors__.add(klass)
return klass
@add_metaclass(ModulesMetaClass)
class Module(ParameterBasedType):
implement =
|
''
module_type = 'generic'
@classmethod
def get_sub_class(cls):
return cls.__inheritors__
def __init__(self):
ParameterBasedType.__init__(self)
self.daemon = None
# Global logger for this part
self.logger = LoggerFactory.create_logger('module.%s' % self.__class__.pack_name)
if hasattr(self, 'pack_level') and hasattr(self, 'pack_
|
name'):
self.pack_directory = packer.get_pack_directory(self.pack_level, self.pack_name)
else:
self.pack_directory = ''
def get_info(self):
return {'configuration': self.get_config(), 'state': 'DISABLED', 'log': ''}
def prepare(self):
return
def launch(self):
return
def export_http(self):
return
# Call when the daemon go down.
# WARNING: maybe the daemon thread is still alive, beware
# of the paralel data access
def stopping_agent(self):
pass
class FunctionsExportModule(Module):
module_type = 'functions_export'
class ConnectorModule(Module):
module_type = 'connector'
class ListenerModule(Module):
module_type = 'listener'
class HandlerModule(Module):
module_type = 'handler'
def __init__(self):
super(HandlerModule, self).__init__()
from .handlermgr import handlermgr
implement = self.implement
if not implement:
self.logger.error('Unknown implement type for module, cannot load it.')
return
handlermgr.register_handler_module(implement, self)
|
inmagik/contento
|
contento/tests/__init__.py
|
Python
|
mit
| 220 | 0.004545 |
from django.test import TestCase
class Anima
|
lTestCase(TestCase):
def setUp(self):
print 2
def test_animals_can_speak(sel
|
f):
"""Animals that can speak are correctly identified"""
print 3
|
YzPaul3/h2o-3
|
h2o-py/h2o/connection.py
|
Python
|
apache-2.0
| 29,700 | 0.015354 |
"""
An H2OConnection represents the latest active handle to a cloud. No more than a single
H2OConnection object will be active at any one time.
"""
from __future__ import print_function
from __future__ import absolute_import
import requests
import math
import tempfile
import os
import re
import sys
import time
import subprocess
import atexit
import warnings
import site
from .display import H2ODisplay
from .h2o_logging import _is_logging, _log_rest
from .two_dim_table import H2OTwoDimTable
from .utils.shared_utils import quote
from six import iteritems, PY3
from string import ascii_lowercase, digits
from random import choice
warnings.simplefilter('always', UserWarning)
try:
warnings.simplefilter('ignore', requests.packages.urllib3.exceptions.InsecureRequestWarning)
except:
pass
__H2OCONN__ = None # the single active connection to H2O cloud
__H2O_REST_API_VERSION__ = 3 # const for the version of the rest api
class H2OConnection(object):
"""
H2OConnection is a class that represents a connection to the H2O cluster.
It is specified by an IP address and a port number.
Objects of type H2OConnection are not instantiated directly!
This class contains static methods for performing the common REST methods
GET, POST, and DELETE.
"""
__ENCODING__ = "utf-8"
__ENCODING_ERROR__ = "replace"
def __init__(self, ip, port, start_h2o, enable_assertions, license, nthreads, max_mem_size, min_mem_size, ice_root,
strict_version_check, proxy, https, insecure, username, password, max_mem_size_GB, min_mem_size_GB, proxies, size):
"""
Instantiate the package handle to the H2O cluster.
:param ip: An IP address, default is "localhost"
:param port: A port, default is 54321
:param start_h2o: A boolean dictating whether this module should start the H2O jvm. An attempt is made anyways if _connect fails.
:param enable_assertions: If start_h2o, pass `-ea` as a VM option.
:param license: If not None, is a path to a license file.
:param nthreads: Number of threads in the thread pool. This relates very closely to the number of CPUs used.
-1 means use all CPUs on the host. A positive integer specifies the number of CPUs directly. This value is only used when Python starts H2O.
:param max_mem_size: Maximum heap size (jvm option Xmx) in gigabytes.
:param min_mem_size: Minimum heap size (jvm option Xms) in gigabytes.
:param ice_root: A temporary directory (default location is determined by tempfile.mkdtemp()) to hold H2O log files.
:param strict_version_check: Setting this to False is unsupported and should only be done when advised by technical support.
:param proxy: A dictionary with keys 'ftp', 'http', 'https' and values that correspond to a proxy path.
:param https: Set this to True to use https instead of http.
:param insecure: Set this to True to disable SSL certificate checking.
:param username: Username to login with.
:param password: Password to login with.
:param max_mem_size_GB: DEPRECATED. Use max_mem_size.
:param min_mem_size_GB: DEPRECATED. Use min_mem_size.
:param proxies: DEPRECATED. Use proxy.
:param size: DEPRECATED.
:return: None
"""
port = as_int(port)
if not (isinstance(port, int) and 0 <= port <= sys.maxsize): raise ValueError("Port out of range, "+port)
if https != insecure: raise ValueError("`https` and `insecure` must both be True to enable HTTPS")
#Deprecated params
if max_mem_size_GB is not None:
warnings.warn("`max_mem_size_GB` is deprecated. Use `max_mem_size` instead.", category=DeprecationWarning)
max_mem_size = max_mem_size_GB
if min_mem_size_GB is not None:
warnings.warn("`min_mem_size_GB` is deprecated. Use `min_mem_size` instead.", category=DeprecationWarning)
min_mem_size = min_mem_size_GB
if proxies is not None:
warnings.warn("`proxies` is deprecated. Use `proxy` instead.", category=DeprecationWarning)
proxy = proxies
if size is not None:
warnings.warn("`size` is deprecated.", category=DeprecationWarning)
global __H2OCONN__
self._cld = None
self._ip = ip
self._port = port
self._proxy = proxy
self._https = https
self._insecure = insecure
self._username = username
self._password = password
self._session_id = None
self._rest_version = __H2O_REST_API_VERSION__
self._child = getattr(__H2OCONN__, "_child") if hasattr(__H2OCONN__, "_child") else None
__H2OCONN__ = self
#Give user warning if proxy environment variable is found. PUBDEV-2504
for name, value in os.environ.items():
if name.lower()[-6:] == '_proxy' and value:
warnings.warn("Proxy environment variable `" + name + "` with value `" + value + "` found. This may interfere with your H2O Connection.")
jarpaths = H2OConnection.jar_paths()
if os.path.exists(jarpaths[0]): jar_path = jarpaths[0]
elif os.path.exists(jarpaths[1]): jar_path = jarpaths[1]
elif os.path.exists(jarpaths[2]): jar_path = jarpaths[2]
elif os.path.exists(jarpaths[3]): jar_path = jarpaths[3]
elif os.path.exists(jarpaths[4]): jar_path = jarpaths[4]
else: jar_path = jarpaths[5]
try:
cld = self._connect()
except:
# try to start local jar or re-raise previous exception
if not start_h2o: raise ValueError("Cannot connect to H2O server. Please check that H2O is running at {}".format(H2OConnection.make_url("")))
print()
print()
print("No instance found at ip and port: " + ip + ":" + str(port) + ". Trying to start local jar...")
print()
print()
|
path_to_jar = os.path.exists(jar_path)
if path_to_jar:
if not ice_root:
ice_root = tempfile.mkdtemp()
cld = self._start_local_h2o_jar(max_mem_size, min_mem_size, enable_assertions, license, ice_root, jar_path, nthreads)
else:
print("No
|
jar file found. Could not start local instance.")
print("Jar Paths searched: ")
for jp in jarpaths:
print("\t" + jp)
print()
raise
__H2OCONN__._cld = cld
if strict_version_check and os.environ.get('H2O_DISABLE_STRICT_VERSION_CHECK') is None:
ver_h2o = cld['version']
from .__init__ import __version__
ver_pkg = "UNKNOWN" if __version__ == "SUBST_PROJECT_VERSION" else __version__
if ver_h2o != ver_pkg:
try:
branch_name_h2o = cld['branch_name']
except KeyError:
branch_name_h2o = None
else:
branch_name_h2o = cld['branch_name']
try:
build_number_h2o = cld['build_number']
except KeyError:
build_number_h2o = None
else:
build_number_h2o = cld['build_number']
if build_number_h2o is None:
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, str(ver_pkg)))
elif build_number_h2o == 'unknown':
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, str(ver_pkg)))
elif build_number_h2o == '99999':
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"This is a developer build, please contact your developer."
"".format(ver_h2o, str(ver_pkg)))
else:
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Install the matching h2o-Python version from - "
|
mcdeoliveira/ctrl
|
examples/rc_motor_control.py
|
Python
|
apache-2.0
| 4,210 | 0.024703 |
#!/usr/bin/env python3
def main():
# import python's standard math module and numpy
import math, numpy, sys
# import Controller and other blocks from modules
from pyctrl.rc import Controller
from pyctrl.block import Interp, Logger, Constant
from pyctrl.block.system import System, Differentiator, Feedback
from pyctrl.system.tf import PID
# initialize controller
Ts = 0.01
bbb = Controller(period = Ts)
# add encoder as source
bbb.add_source('encoder1',
('pyctrl.rc.encoder', 'Encoder'),
['encoder'],
kwargs = {'encoder': 3,
'ratio': 60 * 35.557})
# add motor as sink
bbb.add_sink('motor1',
('pyctrl.rc.motor', 'Motor'),
['pwm'],
kwargs = {'motor': 3},
enable = True)
# add motor speed signal
bbb.add_signal('speed')
# add motor speed filter
bbb.add_filter('speed',
Differentiator(),
['clock','encoder'],
['speed'])
# calculate PI controller gains
tau = 1/55 # time constant (s)
g = 0.092 # gain (cycles/sec duty)
Kp = 1/g
Ki = Kp/tau
print('Controller gains: Kp = {}, Ki = {}'.format(Kp, Ki))
# build controller block
pid = System(model = PID(Kp = Kp, Ki = Ki, period = Ts))
# add motor speed signal
bbb.add_signal('speed_reference')
bbb.add_filter('PIcontrol',
Feedback(block = pid),
['speed','speed_reference'],
['pwm'])
# build interpolated input signal
ts = [0, 1, 2, 3, 4, 5, 5, 6]
us = [0, 0, 8, 8, -4, -4, 0, 0]
# add filter to interpolate data
bbb.add_filter('input',
Interp(xp = us, fp = ts),
['clock'],
['speed_reference'])
# add logger
bbb.add_sink('logger',
Logger(),
['clock','pwm','encoder','speed','speed_reference'])
# Add a timer to stop
|
the controller
bbb.add_timer('stop',
Constant(value = 0),
None, ['is_running'],
period = 6, repeat = False)
# print controller info
print(bbb.info('all'))
try:
# run the controller
|
print('> Run the controller.')
# set speed_reference
#bbb.set_signal('speed_reference', 5)
# reset clock
bbb.set_source('clock', reset = True)
with bbb:
# wait for the controller to finish on its own
bbb.join()
print('> Done with the controller.')
except KeyboardInterrupt:
pass
finally:
pass
# read logger
data = bbb.get_sink('logger', 'log')
try:
# import matplotlib
import matplotlib.pyplot as plt
except:
print('! Could not load matplotlib, skipping plots')
sys.exit(0)
print('> Will plot')
try:
# start plot
plt.figure()
except:
print('! Could not plot graphics')
print('> Make sure you have a connection to a windows manager')
sys.exit(0)
# plot pwm
plt.subplot(2,1,1)
plt.plot(data['clock'], data['pwm'], 'b')
plt.ylabel('pwm (%)')
plt.ylim((-120,120))
plt.xlim(0,6)
plt.grid()
# plot encoder
plt.subplot(2,1,2)
plt.plot(data['clock'], data['encoder'],'b')
plt.ylabel('position (cycles)')
plt.ylim((0,25))
plt.xlim(0,6)
plt.grid()
# start plot
plt.figure()
# plot pwm
ax1 = plt.gca()
ax1.plot(data['clock'], data['pwm'],'g', label='pwm')
ax1.set_ylabel('pwm (%)')
ax1.set_ylim((-60,120))
ax1.grid()
plt.legend(loc = 2)
# plot velocity
ax2 = plt.twinx()
ax2.plot(data['clock'], data['speed'],'b', label='speed')
ax2.plot(data['clock'], data['speed_reference'], 'r', label='reference')
ax2.set_ylabel('speed (Hz)')
ax2.set_ylim((-6,12))
ax2.set_xlim(0,6)
ax2.grid()
plt.legend(loc = 1)
# show plots
plt.show()
if __name__ == "__main__":
main()
|
egeland/agodaparser
|
agodaparser.py
|
Python
|
gpl-3.0
| 2,962 | 0.005402 |
#!/usr/bin/env python3
# pylint: disable=C0103, C0325, C0301
"""
Zipped Agoda Hotel Data File Parser
-----------------------------------
This utility unzips and parses the Agoda hotel data file, in-memory,
and makes the data available
"""
import csv
import zipfile
import io
import sys
class AgodaParser(object):
"""Class to manage parsing and searching of parsed data"""
def __init__(self, zipdatafile):
"""Read and parse Agoda hotel data from a zip file"""
if not zipfile.is_zipfile(zipdatafile):
print("ERROR: '{0}' is not a valid zip file".format(zipdatafile))
sys.exit(1)
zipfh = zipfile.ZipFile(zipdatafile, mode='r')
datafile = zipfh.infolist()[0]
with zipfh.open(datafile, mode='rU')
|
as datafh:
datafh.read(3) # strips the BOM
csvReader = csv.DictReader(io.TextIOWrapp
|
er(datafh), delimiter=',', quotechar='"')
self.result = []
for row in csvReader:
if not float == type(row['rates_from']):
try:
rates_from = float(row['rates_from'])
except ValueError:
#print("ERROR: Unable to convert '{0}' to float for '{1}'".format(row['rates_from'], row['hotel_name']))
#print("DEBUG: '{0}'".format(row))
rates_from = 'Rates Not Available'
else:
rates_from = row['rates_from']
row['rates_from'] = rates_from
self.result.append(row)
zipfh.close()
def get_all(self):
"""Return the full list of hotels as a list of dictionaries"""
return self.result
def find(self, hotel_id=None):
"""Locate a specific hotel by id"""
if None == hotel_id:
raise ValueError("Missing a hotel id")
hotel_id = str(hotel_id)
return next((item for item in self.result if item["hotel_id"] == hotel_id), None)
def find_url(self, url=None):
"""Locate a specific hotel by url snippet"""
if None == url:
raise ValueError("Missing a hotel url")
return next((item for item in self.result if item["url"] in url), None)
if __name__ == "__main__":
import argparse
argparser = argparse.ArgumentParser(description='Parse zipped Agoda hotel data file')
argparser.add_argument("zipped_datafile", help="Agoda hotel datafile, in .zip format")
args = argparser.parse_args()
zipdatafile = args.zipped_datafile
parsed = AgodaParser(zipdatafile)
for entryrow in parsed.get_all():
if 'Rates Not Available' == entryrow['rates_from']:
print("{0} - '{1}': No rates available".format(entryrow['hotel_id'], entryrow['hotel_name']))
else:
print("{0} - '{1}' from '{2}' '{3}'".format(entryrow['hotel_id'], entryrow['hotel_name'], entryrow['rates_currency'], entryrow['rates_from']))
|
OsirisSPS/osiris-sps
|
client/data/extensions/148B613D055759C619D5F4EFD9FDB978387E97CB/scripts/oml/rotator.py
|
Python
|
gpl-3.0
| 610 | 0.04918 |
import os
import osiris
import globalvars
class OmlRotator(osiris.OMLHtmlWrapper):
def __init__(self, tag):
osiris.OMLHtmlWrapper.__init__(self, tag, "div", False, "", "", "")
def processHtml(self, item, context):
extensionID = globalvars.extension.getID().getString()
context.page.addJav
|
ascript("/htdocs/js/oml/rotator.js")
item.setParam("id","rotator_" + osiris.UniqueID.generat
|
e().getString())
script = "<script type=\"text/javascript\">Rotator.init('" + item.getParam("id") + "');</script>";
return osiris.OMLHtmlWrapper.processHtml(self, item, context) + script;
|
jasonrollins/shareplum
|
shareplum/__init__.py
|
Python
|
mit
| 360 | 0 |
# SharePlum
# This library simplfies the code necessary
# to automate interactions with a SharePoint
# server using python
from .office365 import Office365 # noqa: F401
from .site imp
|
ort Site # noqa: F401
from .version import __version__ # noqa: F
|
401
__all__ = ["site", "office365"]
__title__ = "SharePlum SharePoint Library"
__author__ = "Jason Rollins"
|
nicko96/Chrome-Infra
|
appengine/crbuild/gerrit/test/client_test.py
|
Python
|
bsd-3-clause
| 3,159 | 0.001266 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from mock import Mock
from gerrit import GerritClient
from test import CrBuildTestCase
SHORT_CHANGE_ID = 'I7c1811882cf59c1dc55018926edb6d35295c53b8'
CHANGE_ID = 'project~master~%s' % SHORT_CHANGE_ID
REVISION = '404d1697dca23824bc1130061a5bd2be4e073922'
class GerritClientTestCase(CrBuildTestCase):
def test_get_change(self):
req_path = 'changes/%s?o=ALL_REVISIONS' % CHANGE_ID
change_reponse = {
'id': CHANGE_ID,
'project': 'project',
'branch': 'master',
'hashtags': [],
'change_id': SHORT_CHANGE_ID,
'subject': 'My change',
'status': 'NEW',
'created': '2014-10-17 18:24:39.193000000',
'updated': '2014-10-17 20:44:48.338000000',
'mergeable': True,
'insertions': 10,
'deletions': 11,
'_sortkey': '0030833c0002bff9',
'_number': 180217,
'owner': {
'name': 'John Doe'
},
'current_revision': REVISION,
'revisions': {
REVISION: {
'_number': 1,
'fetch': {
'http': {
'url': 'https://chromium.googlesource.com/html-office',
'ref': 'refs/changes/80/123/1'
}
}
}
}
}
client = GerritClient('chromium-review.googlesource.com')
client._fetch = Mock(return_value=change_reponse)
change = client.get_change(CHANGE_ID)
client._fetch.assert_called_once_with(req_path)
self.assertIsNotNone(change)
self.assertEqual(change.change_id, SHORT_CHANGE_ID)
self.assertEqual(change.branch, 'master')
self.assertEqual(change.project, 'project')
self.assertEqual(change.owner.name, 'John Doe')
self.assertEqual(change.current_revision, REVISION)
# smoke test for branch coverage
change = client.get_change(CHANGE_ID, include_all_revisions=False,
include_owner_details=True)
def test_get_nonexistent_change(self):
client = GerritClient('chromium-review.googlesource.com')
change = client.get_change(CHANGE_ID)
self.assertIsNone(change)
def test_set_review(self):
req_path = 'changes/%s/revisions/%s/review' % (CHANGE_ID, REVISION)
labels = {'Verified': 1 }
client = GerritClient('chromium-review.googlesource.com')
client._fetch = Mock(return_value={'labels': labels})
client.set_review(CHANGE_ID
|
, REVISION, message='Hi!', labels=labels)
client._fetch.assert_called_with(req_path, method='POST', body={
'message': 'Hi!',
'labels': labels,
})
# Test with "notify" parameter.
client.set_review(CHANGE_ID, REVISION, message='Hi!', labels=labels,
notify='all')
clie
|
nt._fetch.assert_called_with(req_path, method='POST', body={
'message': 'Hi!',
'labels': labels,
'notify': 'ALL',
})
with self.assertRaises(AssertionError):
client.set_review(CHANGE_ID, REVISION, notify='Argh!')
|
chapmanb/cloudbiolinux
|
cloudbio/custom/versioncheck.py
|
Python
|
mit
| 2,864 | 0.003142 |
"""Tool specific version checking to identify out of date dependencies.
This provides infrastructure to check version strings against installed
tools, enabling re-installation if a version doesn't match. This is a
lightweight way to avoid out of date dependencies.
"""
from __future__ import print_function
from distutils.version import LooseVersion
from cloudbio.custom import shared
from cloudbio.fabutils import quiet
def _parse_from_stdoutflag(out, flag, stdout_index=-1):
"""Extract version information from a flag in verbose stdout.
flag -- text information to identify the line we should split for a version
stdout_index -- Position of the version information in the split line. Defaults
to the last item.
"""
for line in out.split("\n") + out.stderr.split("\n"):
if line.find(flag) >= 0:
parts = line.split()
return parts[stdout_index].strip()
print("Did not find version information with flag %s from: \n %s" % (flag, out))
return ""
def _clean_version(x):
if x.startswith("upstream/"):
x = x.replace("upstream/", "")
if x.startswith("("):
x = x[1:].strip()
if x.endswith(")"):
x = x[:-1].strip()
if x.startswith("v"):
x = x[1:].strip()
return x
def up_to_date(env, cmd, version, args=None, stdout_flag=None,
stdout_index=-1):
iversion = get_installed_version(env, cmd, version, args, stdout_flag,
stdout_index)
if not iversion:
return False
else:
return LooseVersion(iversion) >= LooseVersion(version)
def is_version(env, cmd, version, args=None, stdout_flag=None,
stdout_index=-1):
iversion = get_installed_version(env, cmd, version, args, stdout_fl
|
ag,
stdout_index)
if not iversion:
return False
else:
return LooseVersion(iversion) == LooseVersion(version)
def get_installed_version(env, cmd, version, args=None, stdout_flag=None,
stdout_index=-1):
"""Check if the given command is up to date with the pr
|
ovided version.
"""
if shared._executable_not_on_path(cmd):
return False
if args:
cmd = cmd + " " + " ".join(args)
with quiet():
path_safe = ("export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:{s}/lib/pkgconfig && "
"export PATH=$PATH:{s}/bin && "
"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{s}/lib && ".format(s=env.system_install))
out = env.safe_run_output(path_safe + cmd)
if stdout_flag:
iversion = _parse_from_stdoutflag(out, stdout_flag, stdout_index)
else:
iversion = out.strip()
iversion = _clean_version(iversion)
if " not found in the pkg-config search path" in iversion:
return False
return iversion
|
sendgrid/sendgrid-python
|
sendgrid/helpers/stats/__init__.py
|
Python
|
mit
| 29 | 0 |
fr
|
om .stats import * # no
|
qa
|
schnapptack/gskompetenzen
|
features/gsaudit/migrations/0011_auto__add_field_pupil_birthday.py
|
Python
|
agpl-3.0
| 12,951 | 0.006486 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Pupil.birthday'
db.add_column('gsaudit_pupil', 'birthday',
self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2012, 6, 17, 0, 0)),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Pupil.birthday'
db.delete_column('gsaudit_pupil', 'birthday')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True',
|
'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content
|
_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'gsaudit.audit': {
'Meta': {'object_name': 'Audit'},
'assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.TeachingAssignment']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'written_exam': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'gsaudit.auditskill': {
'Meta': {'object_name': 'AuditSkill'},
'audit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Audit']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Skill']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'gsaudit.grade': {
'Meta': {'object_name': 'Grade'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.School']"})
},
'gsaudit.gradeparticipant': {
'Meta': {'object_name': 'GradeParticipant'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'grade': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Grade']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'pupil': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Pupil']"})
},
'gsaudit.pupil': {
'Meta': {'ordering': "('first_name', 'last_name')", 'object_name': 'Pupil'},
'birthday': ('django.db.models.fields.DateField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'modified': ('django.db.models.fields.DateTimeField', [], {})
},
'gsaudit.pupilauditskill': {
'Meta': {'object_name': 'PupilAuditSkill'},
'audit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Audit']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'diagnosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pupil': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Pupil']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'skill': ('django.db.models.fields.related.
|
liqiangnlp/LiNMT
|
scripts/hint.aware/chunk/eng/LiNMT-postprocess-text-chunking-rmNP.py
|
Python
|
mit
| 2,875 | 0.017043 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Qiang Li
# Email: liqiangneu@gmail.compile
# Time: 10:27, 03/30/2017
import sys
import codecs
import argparse
import random
from io import open
argparse.open = open
reload(sys)
sys.setdefaultencoding('utf8')
if sys.version_info < (3, 0):
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
else:
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer)
def create_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Text Chunking')
parser.add_argument(
'--input', '-i', type=argparse.FileType('r'), default
|
=sys.stdin,
metavar='PATH', help='Input text (default: standard input).')
parser.add_argument(
'--outword', '-w', type=argparse.FileType('w'), required=True,
metavar='PATH', help='Output word file')
|
parser.add_argument(
'--outlabel', '-l', type=argparse.FileType('w'), required=True,
metavar='PATH', help='Output label file')
return parser
def pos_postprocess(ifobj, owfobj, olfobj, ologfobj):
line_word = ''
line_label = ''
total_words = 0
reserved_words = 0
remove_words = 0
for line in ifobj:
line = line.strip()
if line == '':
line_word = line_word.strip()
line_label = line_label.strip()
owfobj.write('{0}\n'.format(line_word))
olfobj.write('{0}\n'.format(line_label))
line_word = ''
line_label = ''
else:
words = line.split('\t')
total_words += 1
if words[0] == '':
words[0] = 'NA'
if words[3] == '':
words[3] = 'O'
if "NP" in words[3]:
words[0] = '#'
words[3] = '#'
remove_words += 1
line_word += ' '+words[0]
line_label += ' '+words[3]
ologfobj.write('total word:{0}\n'.format(total_words))
ologfobj.write('remove word:{0}\n'.format(remove_words))
reserve_words = total_words - remove_words
ologfobj.write('reserve word:{0}\n'.format(reserve_words))
reserve_rate = float(reserve_words) / float(total_words)
print reserve_rate
ologfobj.write('reserve rate:{0}\n'.format(reserve_rate))
if __name__ == '__main__':
parser = create_parser()
args = parser.parse_args()
# read/write files as UTF-8
if args.input.name != '<stdin>':
args.input = codecs.open(args.input.name, encoding='utf-8')
args.outword = codecs.open(args.outword.name, 'w', encoding='utf-8')
args.outlabel = codecs.open(args.outlabel.name, 'w', encoding='utf-8')
args.outlog = codecs.open(args.outword.name+".log", 'w', encoding='utf-8')
pos_postprocess(args.input, args.outword, args.outlabel, args.outlog)
|
ankitshah009/High-Radix-Adaptive-CORDIC
|
Testbench Generation code/Testbench Files/test_sin,sinh.py
|
Python
|
apache-2.0
| 3,601 | 0.064149 |
import math,sys
from math import pi
def ieee754 (a):
rep = 0
#sign bit
if (a<0):
rep = 1<<31
a = math.fabs(a)
if (a >= 1):
#exponent
exp = int(math.log(a,2))
rep = rep|((exp+127)<<23)
#mantissa
temp = a / pow(2,exp) - 1
i = 22
while i>=0:
temp = temp * 2
if temp > 1:
rep = rep | (1<<i)
temp = temp - 1
i-=1
return rep
elif ((a<1) and (a!=0)):
#exponent
exp = 0
temp = a
while temp<1 :
temp = temp*2
exp +=1
rep = rep |((127 - exp)<<23)
#mantissa
temp = temp - 1
i = 22
while i>=0:
temp = temp * 2
if temp > 1:
rep = rep | (1<<i)
temp = temp - 1
i-=1
return rep
else :
return 0
def ieee754tofloat (a):
ex = (a & 0x7F800000)>>23
ex = ex - 127
i = 1
p = 22
num = 1.0
#print('%d \n' % (ex))
while (p != -1) :
i = 1<<p
dig = (a & i)>>p
#print dig
num += (dig * pow(2,p-23))
p -= 1
num = num * pow(2,ex)
i = 1<<31
sign = a & i
if (sign) :
num = num * -1
print num
return num
#def generate_testbench(value):
def convert2hex_of_xilinx(hex_number,num_of_bits):
hex_number = hex_number.split('x')
hex_number = hex_number[1]
hex_number = str(hex_number)
hex_number = str(num_of_bits)+"'h"+ hex_number +';'
return hex_number
if __name__ == "__main__":
time =0;
i = 0;
j = 0;
for time in range(0,100):
i = i+1
j = j+1
if j == 255:
j = 0;
else:
j = j;
if i ==2:
i = 0
else:
i=i
InsTagIn = j
InsTagIn = hex(InsTagIn)
InsTagIn = InsTagIn.split('x')
InsTagIn = InsTagIn[1]
instagin = str("\tInsTagIn = ")
InsTagIn = instagin + "8'h"+str(InsTagIn) + ";"
Opcode = i
Opcode = hex(Opcode)
Opcode = Opcode.split('x')
Opcode = Opcode[1]
opcode = str("\tOpcode = ")
Opcode = opcode +"4'h"+ str(Opcode) +";"
delay = 20
delay = str(delay)
delay = '#' + delay
x = str(" x_processor= ")
x = delay +x
y = str("\ty_processor= ")
z = str("\tz_processor= ")
#z = delay+z
'''x_processor = 0.01*time
x_processor = float(x_processor)
x_processor = ieee754(x_processor)
x_processor = hex(x_processor)
x_processor = x_processor.split('x')
x_processor = x_process
|
or[1]
x_processor = str(x_processor)
y_processor = 0.5 + 0.01*time
y_processor = float(y_processor)
y_processor = ieee754(y_processor)
y_processor = hex(y_proce
|
ssor)
y_processor = y_processor.split('x')
y_processor = y_processor[1]
y_processor = str(y_processor)'''
x_processor = str(00000000);
y_processor = str(00000000);
z_processor = time*pi/180
z_float1 = float(z_processor)
z_processor = ieee754(z_float1)
z_processor = hex(z_processor)
z_processor = z_processor.split('x')
z_processor = z_processor[1]
z_processor = str(z_processor)
x = x+"32'h"+x_processor +";"
y = y+"32'h"+y_processor +";"
z = z+"32'h"+z_processor +";"
print x
print y
print z
print Opcode
print InsTagIn
'''if i ==0:
sine = math.sin(z_float1)
sine = ieee754(sine)
sine = hex(sine)
sine = convert2hex_of_xilinx(sine,32)
cosine = math.cos(z_float1)
cosine = ieee754(cosine)
cosine = hex(cosine)
cosine = convert2hex_of_xilinx(cosine,32)
print "\t" +"x_out ="+ str(cosine)
print "\t" +"y_out ="+ str(sine)
elif i==1:
sineh = math.sinh(z_float1)
sineh = ieee754(sineh)
sineh = hex(sineh)
sineh = convert2hex_of_xilinx(sineh,32)
cosineh = math.cosh(z_float1)
cosineh = ieee754(cosineh)
cosineh = hex(cosineh)
cosineh = convert2hex_of_xilinx(cosineh,32)
print "\t" +"x_out = "+ str(cosineh)
print "\t" +"y_out = "+ str(sineh)'''
|
agry/NGECore2
|
scripts/expertise/expertise_sm_path_inside_information_1.py
|
Python
|
lgpl-3.0
| 199 | 0.030151 |
import sys
def addAbiliti
|
es(core, actor, player):
actor.addAbility("sm_inside_information")
return
def removeAbilities(core, acto
|
r, player):
actor.removeAbility("sm_inside_information")
return
|
chartbeat-labs/mongomock
|
mongomock/collection.py
|
Python
|
bsd-3-clause
| 39,269 | 0.006239 |
import collections
import copy
import functools
import itertools
import json
import time
import warnings
from sentinels import NOTHING
from .filtering import filter_applies, iter_key_candidates
from . import ObjectId, OperationFailure, DuplicateKeyError
from .helpers import basestring, xrange, print_deprecation_warning
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
try:
# Optional requirements for providing Map-Reduce functionality
import execjs
except ImportError:
execjs = None
try:
from bson import (json_util, SON)
except ImportError:
json_utils = SON = None
from six import (
string_types,
text_type,
iteritems,
itervalues,
iterkeys)
from mongomock import helpers
class Collection(object):
def __init__(self, db, name):
super(Collection, self).__init__()
self.name = name
self.full_name = "{0}.{1}".format(db.name, name)
self._Collection__database = db
self._documents = OrderedDict()
self._uniques = []
def __repr__(self):
return "Collection({0}, '{1}')".format(self._Collection__database, self.name)
def __getitem__(self, name):
return self._Collection__database[self.name + '.' + name]
def __getattr__(self, name):
return self.__getitem__(name)
def insert(self, data, manipulate=True,
safe=None, check_keys=True, continue_on_error=False, **kwargs):
if isinstance(data, list):
return [self._insert(element) for element in data]
return self._insert(data)
def _insert(self, data):
if not all(isinstance(k, string_types) for k in data):
raise ValueError("Document keys must be strings")
if '_id' not in data:
data['_id'] = ObjectId()
object_id = data['_id']
if object_id in self._documents:
raise DuplicateKeyError("Duplicate Key Error", 11000)
for unique in self._uniques:
find_kwargs = {}
for key, direction in unique:
if key in data:
find_kwargs[key] = data[key]
answer = self.find(spec=find_kwargs)
if answer.count() > 0:
raise DuplicateKeyError("Duplicate Key Error", 11000)
self._documents[object_id] = self._internalize_dict(data)
return object_id
def _internalize_dict(self, d):
return dict((k, copy.deepcopy(v)) for k, v in iteritems(d))
def _has_key(self, doc, key):
return key in doc
def update(self, spec, document, upsert = False, manipulate = False,
safe = False, multi = False, _check_keys = False, **kwargs):
"""Updates document(s) in the collection."""
found = False
updated_existing = False
num_updated = 0
for existing_document in itertools.chain(self._iter_documents(spec), [None]):
# we need was_insert for the setOnInsert update operation
was_insert = False
# the sentinel document means we should do an upsert
if existing_document is None:
if not upsert:
continue
existing_document = self._documents[self._insert(self._discard_operators(spec))]
was_insert = True
else:
updated_existing = True
num_updated += 1
first = True
found = True
|
subdocument = None
for k, v in iteritems(document):
if k == '$set':
positional = False
for key in iterkeys(v):
if '$' in key:
positional = True
break
if positional:
subdocument = self._update_document_fields_positional(existing_document,v, spec, _set_updater, subdocument)
conti
|
nue
self._update_document_fields(existing_document, v, _set_updater)
elif k == '$setOnInsert':
if not was_insert:
continue
positional = any('$' in key for key in iterkeys(v))
if positional:
# we use _set_updater
subdocument = self._update_document_fields_positional(existing_document,v, spec, _set_updater, subdocument)
else:
self._update_document_fields(existing_document, v, _set_updater)
elif k == '$unset':
for field, value in iteritems(v):
if self._has_key(existing_document, field):
del existing_document[field]
elif k == '$inc':
positional = False
for key in iterkeys(v):
if '$' in key:
positional = True
break
if positional:
subdocument = self._update_document_fields_positional(existing_document, v, spec, _inc_updater, subdocument)
continue
self._update_document_fields(existing_document, v, _inc_updater)
elif k == '$addToSet':
for field, value in iteritems(v):
container = existing_document.setdefault(field, [])
if value not in container:
container.append(value)
elif k == '$pull':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
if len(nested_field_list) == 1:
if field in existing_document:
arr = existing_document[field]
if isinstance(value, dict):
existing_document[field] = [obj for obj in arr if not filter_applies(value, obj)]
else:
existing_document[field] = [obj for obj in arr if not value == obj]
continue
# nested fields includes a positional element
# need to find that element
if '$' in nested_field_list:
if not subdocument:
subdocument = self._get_subdocument(existing_document, spec, nested_field_list)
# value should be a dictionary since we're pulling
pull_results = []
# and the last subdoc should be an array
for obj in subdocument[nested_field_list[-1]]:
if isinstance(obj, dict):
for pull_key, pull_value in iteritems(value):
if obj[pull_key] != pull_value:
pull_results.append(obj)
continue
if obj != value:
pull_results.append(obj)
# cannot write to doc directly as it doesn't save to existing_document
subdocument[nested_field_list[-1]] = pull_results
elif k == '$push':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
if len(nested_field_list) == 1:
if field not in existing_document:
existing_document[field] = []
# document should be a list
# append to it
if isinstance(value, dict):
if '$each' in value:
# append the list to the field
existing_document
|
jdhp-sap/data-pipeline-standalone-scripts
|
datapipe/optimization/objectivefunc/tailcut_delta_psi.py
|
Python
|
mit
| 6,676 | 0.004046 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__all__ = ['ObjectiveFunction']
import numpy as np
from datapipe.denoising.tailcut import Tailcut
from datapipe.benchmark import assess
def norm_angle_diff(angle_in_degrees):
"""Normalize the difference of 2 angles in degree.
This function is used to normalize the "delta psi" angle.
"""
return np.abs(np.mod(angle_in_degrees + 90, 180) - 90.)
# OPTIMIZER ##################################################################
class ObjectiveFunction:
def __init__(self, input_files, max_num_img=None, aggregation_method="mean"):
self.call_number = 0
# Init the wavelet class
self.cleaning_algorithm = Tailcut()
# Make the image list
self.input_files = input_files
self.max_num_img = max_num_img
self.aggregation_method = aggregation_method # "mean" or "median"
print("aggregation method:", self.aggregation_method)
# PRE PROCESSING FILTERING ############################################
# TODO...
def __call__(self, threshold_list):
self.call_number += 1
aggregated_score = float('inf')
try:
high_threshold = float(threshold_list[0])
low_threshold = float(threshold_list[1])
if low_threshold > high_threshold:
# To avoid useless computation, reject solutions where low threshold is greater than high threshold
# (these solutions have the same result than the solution `low_threshold == high_threshold`)
return float('nan')
#low_threshold = min(low_threshold, high_threshold) # low threshold should not be greater than high threshold
algo_params_var = {
"high_threshold": high_threshold,
"low_threshold": low_threshold
}
benchmark_method = "delta_psi" # TODO
label = "TC_{}".format(self.call_number)
self.cleaning_algorithm.label = label
output_file_path = "score_tailcut_optim_{}.json".format(self.call_number)
algo_params = {
"kill_isolated_pixels": True,
"verbose": False,
}
algo_params.update(algo_params_var)
# TODO: randomly make a subset fo self.input_files
input_files = self.input_files
output_dict = self.cleaning_algorithm.run(algo_params,
input_file_or_dir_path_list=input_files,
benchmark_method=benchmark_method,
output_file_path=output_file_path,
|
max_num_img=self.max_num_img)
score_list = []
# Read and compute results from output_dict
|
for image_dict in output_dict["io"]:
# POST PROCESSING FILTERING #######################################
# >>>TODO<<<: Filter images: decide wether the image should be used or not ? (contained vs not contained)
# TODO: filter these images *before* cleaning them to avoid waste of computation...
# >>>TODO<<<: Filter images by energy range: decide wether the image should be used or not ?
# TODO: filter these images *before* cleaning them to avoid waste of computation...
###################################################################
# GET THE CLEANED IMAGE SCORE
if ("img_ref_hillas_2_psi" in image_dict) and ("img_cleaned_hillas_2_psi" in image_dict):
output_image_parameter_psi_rad = image_dict["img_ref_hillas_2_psi"]
reference_image_parameter_psi_rad = image_dict["img_cleaned_hillas_2_psi"]
delta_psi_rad = reference_image_parameter_psi_rad - output_image_parameter_psi_rad
normalized_delta_psi_deg = norm_angle_diff(np.degrees(delta_psi_rad))
#if image_dict["score_name"][0] != "delta_psi":
# raise Exception("Cannot get the score")
#normalized_delta_psi_deg = image_dict["score"][0]
score_list.append(normalized_delta_psi_deg)
else:
# The cleaning algorithm failed to clean this image
# TODO: add a penalty
score_list.append(90.) # the worst score
# Compute the mean
if self.aggregation_method == "mean":
aggregated_score = np.array([score_list]).mean()
elif self.aggregation_method == "median":
aggregated_score = np.array([score_list]).median()
else:
raise ValueError("Unknown value for aggregation_method: {}".format(self.aggregation_method))
# TODO: save results in a JSON file (?)
print(algo_params_var, aggregated_score, self.aggregation_method)
except Exception as e:
print(e)
return float(aggregated_score)
if __name__ == "__main__":
# Test...
#func = ObjectiveFunction(input_files=["./MISC/testset/gamma/digicam/"])
func = ObjectiveFunction(input_files=["/dev/shm/.jd/digicam/gamma/"])
threshold_list = [10, 5]
score = func(threshold_list)
|
JackDanger/sentry
|
src/sentry/south_migrations/0077_auto__add_trackeduser__add_unique_trackeduser_project_ident.py
|
Python
|
bsd-3-clause
| 22,623 | 0.008222 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TrackedUser'
db.create_table('sentry_trackeduser', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Project'])),
('ident', self.gf('django.db.models.fields.CharField')(max_length=200)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True)),
('data', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('last_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('first_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
))
db.send_create_signal('sentry', ['TrackedUser'])
# Adding unique constraint on 'TrackedUser', fields ['project', 'ident']
db.create_unique('sentry_trackeduser', ['project_id', 'ident'])
def backwards(self, orm):
# Removing unique constraint on 'TrackedUser', fields ['project', 'ident']
db.delete_unique('sentry_trackeduser', ['project_id', 'ident'])
# Deleting model 'TrackedUser'
db.delete_table('sentry_trackeduser')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length':
|
'128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '10
|
0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.affecteduserbygroup': {
'Meta': {'unique_together': "(('project', 'ident', 'group'),)", 'object_name': 'AffectedUserByGroup'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True
|
cawka/pybindgen
|
pybindgen/typehandlers/pyobjecttype.py
|
Python
|
lgpl-2.1
| 2,516 | 0.001192 |
# docstrings not neede here (the type handler doubleerfaces are fully
# documented in base.py) pylint: disable-msg=C0111
from .base import ReturnValue, Parameter, \
ReverseWrapperBase, ForwardWrapperBase
class PyObjectParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['PyObject*']
def __init__(self, ctype, name, transfer_ownership, is_const=False):
"""
:param ctype: C type, normally 'PyObject*'
:param name: parameter name
:param transfer_ownership: this parameter transfer the ownership of
the pointed-to object to the called
function
"""
super(PyObjectParam, self).__init__(
ctype, name, direction=Parameter.DIRECTION_IN, is_const=is_const)
self.transfer_ownership = transfer_ownership
def convert_c_to_py
|
thon(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
if self.transfer_ownership:
wrapper.build_params.add_parameter('N', [self.value])
else:
wrapper.build_params.add_parameter('O', [self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable(self.ctype_no_const, self.name)
wrapper.parse_params.add_parameter(
|
'O', ['&'+name], self.name)
wrapper.call_params.append(name)
if self.transfer_ownership:
wrapper.before_call.write_code("Py_INCREF((PyObject*) %s);" % name)
class PyObjectReturnValue(ReturnValue):
CTYPES = ['PyObject*']
def __init__(self, ctype, caller_owns_return, is_const=False):
"""
:param ctype: C type, normally 'MyClass*'
:param caller_owns_return: if true, ownership of the object pointer
is transferred to the caller
"""
super(PyObjectReturnValue, self).__init__(ctype, is_const)
self.caller_owns_return = caller_owns_return
def get_c_error_return(self):
return "return NULL;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("O", ["&"+self.value], prepend=True)
if self.caller_owns_return:
wrapper.after_call.write_code("Py_INCREF((PyObject*) %s);" % self.value)
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter(
(self.caller_owns_return and "N" or "O"),
[self.value], prepend=True)
|
oscaro/django
|
tests/middleware/tests.py
|
Python
|
bsd-3-clause
| 32,964 | 0.001335 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
from io import BytesIO
import random
import re
from unittest import skipIf
import warnings
from django.conf import settings
from django.core import mail
from django.db import (transaction, connections, DEFAULT_DB_ALIAS,
IntegrityError)
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.middleware.common import CommonMiddleware, BrokenLinkEmailsMiddleware
from django.middleware.http import ConditionalGetMiddleware
from django.middleware.gzip import GZipMiddleware
from django.middleware.transaction import TransactionMiddleware
from django.test import TransactionTestCase, TestCase, RequestFactory, override_settings
from django.test.utils import IgnoreDeprecationWarningsMixin
from django.utils import six
from django.utils.deprecation import RemovedInDjango18Warning
from django.utils.encoding import force_str
from django.utils.six.moves import xrange
from .models import Band
class CommonMiddlewareTest(TestCase):
urls = 'middleware.urls'
def _get_request(self, path):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = "/%s" % path
return request
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash(self):
"""
Tests that URLs with slashes go unmolested.
"""
request = self._get_request('slash/')
self.assertEqual(CommonMiddleware().process_request(request), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource(self):
"""
Tests that matches to explicit slashless URLs go unmolested.
"""
request = self._get_request('noslash')
self.assertEqual(CommonMiddleware().process_request(request), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown(self):
"""
Tests that APPEND_SLASH doesn't redirect to unknown resources.
"""
request = self._get_request('unknown')
self.assertEqual(CommonMiddleware().process_request(request), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect(self):
"""
Tests that APPEND_SLASH redirects slashless URLs to a valid pattern.
"""
request = self._get_request('slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://testserver/slash/')
@override_settings(APPEND_SLASH=True, DEBUG=True)
def test_append_slash_no_redirect_on_POST_in_DEBUG(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST to an URL which would normally be
redirected to a slashed version.
"""
request = self._get_request('slash')
request.method = 'POST'
with six.assertRaisesRegex(self, RuntimeError, 'end in a slash'):
CommonMiddleware().process_request(request)
@override_settings(APPEND_SLASH=False)
def test_append_slash_disabled(self):
"""
Tests disabling append slash functionality.
"""
request = self._get_request('slash')
self.assertEqual(CommonMiddleware().process_request(req
|
uest), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_quoted(self):
"""
Tests that URLs which require quoting are redirected to their slash
version ok.
"""
request = self._get_request('needsquoting#')
r = CommonMiddleware().process_re
|
quest(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r.url,
'http://testserver/needsquoting%23/')
@override_settings(APPEND_SLASH=False, PREPEND_WWW=True)
def test_prepend_www(self):
request = self._get_request('path/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r.url,
'http://www.testserver/path/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_have_slash(self):
request = self._get_request('slash/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url,
'http://www.testserver/slash/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_slashless(self):
request = self._get_request('slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url,
'http://www.testserver/slash/')
# The following tests examine expected behavior given a custom urlconf that
# overrides the default one through the request object.
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash_custom_urlconf(self):
"""
Tests that URLs with slashes go unmolested.
"""
request = self._get_request('customurlconf/slash/')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource_custom_urlconf(self):
"""
Tests that matches to explicit slashless URLs go unmolested.
"""
request = self._get_request('customurlconf/noslash')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown_custom_urlconf(self):
"""
Tests that APPEND_SLASH doesn't redirect to unknown resources.
"""
request = self._get_request('customurlconf/unknown')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect_custom_urlconf(self):
"""
Tests that APPEND_SLASH redirects slashless URLs to a valid pattern.
"""
request = self._get_request('customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertFalse(r is None,
"CommonMiddlware failed to return APPEND_SLASH redirect using request.urlconf")
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://testserver/customurlconf/slash/')
@override_settings(APPEND_SLASH=True, DEBUG=True)
def test_append_slash_no_redirect_on_POST_in_DEBUG_custom_urlconf(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST to an URL which would normally be
redirected to a slashed version.
"""
request = self._get_request('customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
request.method = 'POST'
with six.assertRaisesRegex(self, RuntimeError, 'end in a slash'):
CommonMiddleware().process_request(request)
@override_settings(APPEND_SLASH=False)
def test_append_slash_disabled_custom_urlconf(self):
"""
Tests disabling append slash functionality.
"""
request = self._get_request('customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_quoted_custom_urlconf(self):
"""
Tests that URLs which require quoting are redirected to their slash
version ok.
"""
request = self._get_request('customurlconf/needsquoting#')
requ
|
Sezzh/tifis_platform
|
tifis_platform/groupmodule/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 711 | 0.001406 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('usermodule', '0002_auto_20151108_2019'),
]
operations = [
migrations.CreateModel(
name
|
='Period',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_creat
|
ed=True, primary_key=True)),
('name', models.CharField(max_length=10)),
('start_date', models.DateField()),
('end_date', models.DateField()),
('professor', models.ForeignKey(to='usermodule.Professor')),
],
),
]
|
rokj/django_newsletter
|
newsletter/admin.py
|
Python
|
mit
| 1,249 | 0.008006 |
# -*- coding: utf-8 -*-
import datetime
from django.contrib import admin
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from newsletter.models import Newsletter, NewsletterSubscription
from newsletter.views import send_mass_email
def send_emails(newsletter, emails):
if settings.DEBUG == True:
emails = [d[1] for d in settings.ADMINS]
send_mass_email(settings.EMAIL_FROM, None, emails, newsletter.title, newsletter.txt, newsletter.html)
if settings.DEBUG != True:
newsletter.datetime_sent = datetime.datetime.now()
newsletter.s
|
ent_to = ';'.join(emails)
newsletter.save()
def send_newsletter(modeladmin, request, queryset):
for q in queryset:
newsletter_subscriptions = NewsletterSubscri
|
ption.objects.filter(subscribed=True)
emails = [ns.email for ns in newsletter_subscriptions]
send_emails(q, emails)
send_newsletter.short_description = _(u"Send newsletter")
class NewsletterAdmin(admin.ModelAdmin):
list_display = ['title', 'txt', 'html', 'datetime_sent']
ordering = ['-datetime_sent']
actions = [send_newsletter]
admin.site.register([Newsletter], NewsletterAdmin)
admin.site.register([NewsletterSubscription])
|
AEDA-Solutions/matweb
|
backend/Database/Models/Periodo.py
|
Python
|
mit
| 742 | 0.074124 |
from Database.Controllers.Curso import Cu
|
rso
class Periodo(object):
def __init__(self,dados=None):
if dados is not None:
|
self.id = dados ['id']
self.id_curso = dados ['id_curso']
self.periodo = dados ['periodo']
self.creditos = dados ['creditos']
def getId(self):
return self.id
def setId_curso(self,id_curso):
self.id_curso = id_curso
def getId_curso(self):
return self.Id_curso
def getCurso(self):
return (Curso().pegarCurso('where id = %s', (self.id_curso,))).getNome()
def setPeriodo(self,periodo):
self.periodo = periodo
def getPeriodo(self):
return self.periodo
def setCreditos(self,creditos):
self.creditos = creditos
def getCreditos(self):
return self.creditos
|
kamyarg/enfame
|
metuly/urls.py
|
Python
|
gpl-2.0
| 489 | 0.010225 |
from django.conf.urls import pa
|
tterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'metuly.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/l
|
ogin/$', 'django.contrib.auth.views.login'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),
url(r'', include('meddit.urls')),
)
|
mwhooker/jones
|
jones/web.py
|
Python
|
apache-2.0
| 5,362 | 0.000559 |
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flask import Flask, jsonify, redirect, render_template, request, url_for
from itertools import repeat, izip, imap
from jinja2 import Markup
from kazoo.client import KazooClient
from kazoo.exceptions import NoNodeException
from kazoo.security import make_acl, make_digest_acl_credential
from raven.contrib.flask import Sentry
from werkzeug.contrib.fixers import ProxyFix
import json
from jones import Jones, Env
import zkutil
import jonesconfig
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.from_object(jonesconfig)
app.config.from_envvar('JONES_SETTINGS', silent=True)
if 'SENTRY_DSN' in app.config:
sentry = Sentry(app)
jones_credential = make_digest_acl_credential(
'Jones', app.config['ZK_DIGEST_PASSWORD']
)
_zk = None
def get_zk():
global _zk
if _zk is None:
_zk = KazooClient(
app.config['ZK_CONNECTION_STRING'],
default_acl=(
# grants read permissions to anyone.
make_acl('world', 'anyone', read=True),
# grants all permissions to the creator of the node.
make_acl('auth', '', all=True)
)
)
_zk.start()
_zk.add_auth('digest', jones_credential)
_zk.DataWatch('/services', func=ensure_root)
return _zk
def ensure_root(data, stat):
if not data:
get_zk().ensure_path('/services')
def request_wants(t):
types = ['text/plain', 'text/html', 'application/json']
assert t in types
best = request.accept_mimetypes \
.best_match(types)
return best == t
@app.template_filter()
def as_json(d, indent=None):
return Markup(json.dumps(d, indent=indent))
@app.context_processor
def inject_services():
return dict(services=[child for child in get_zk().get_children('/services') if
Jones(child, get_zk()).exists()])
@app.route('/')
def index():
return render_template('index.j2')
def service_create(env, jones):
jones.create_config(env, {})
if request_wants('application/json') or request_wants('text/plain'):
r = jsonify(service=jones.service)
r.status_code = 201
return r
else:
if env.is_root:
env = None
return redirect(url_for(
'services', service=jones.service, env=env))
def service_update(env, jones):
jones.set_config(
env,
json.loads(request.form['data']),
int(request.form['version'])
)
return env
def service_delete(env, jones):
if env.is_root:
# deleting whole service
jones.delete_all()
#return redirect(url_for('index'))
else:
jones.delete_config(env, -1)
return env, 200
def service_get(env, jones):
if not jones.exists():
return redirect(url_for('index'))
children = jones.get_child_envs(Env.Root)
is_leaf = lambda child: len(child) and not any(
c.find(child + '/') >= 0 for c in children)
try:
version, config = jones.get_config_by_env(env)
except NoNodeException:
return redirect(url_for('services', service=jones.service))
childs = imap(dict, izip(
izip(repeat('env'), imap(Env, children)),
izip(repeat('is_leaf'), imap(is_leaf, children))))
vals = {
"env": env,
"version": version,
"children": list(childs),
"config": config,
"view": jones.get_view_by_env(env),
"service": jones.service,
"associations": jones.get_associations(env)
}
if request_wants('application/json'):
return jsonify(vals)
else:
return render_template('service.j2', **vals)
SERVICE = {
'get': service_get,
'put': service_update,
'post': service_create,
'delete': service_delete
}
ALL_METHODS = ['GET', 'PUT', 'POST', 'DELETE']
@app.route('/service/<string:service>/', defaults={'env': None},
|
methods=ALL_METHODS)
@app.route('/service/<string:service>/<path:env>/', methods=ALL_METHODS)
def services(service, env):
jones = Jones(service, get_zk())
environment = Env(env)
return SERVICE[request.method.lower()](environment, jones)
@app.route('/service/<string:service>/association/<string:assoc>',
methods=['GET', 'PUT', 'DELETE'])
def association(service, assoc):
jones = Jones(service, get_zk())
if request.method == 'G
|
ET':
if request_wants('application/json'):
return jsonify(jones.get_config(assoc))
if request.method == 'PUT':
jones.assoc_host(assoc, Env(request.form['env']))
return service, 201
elif request.method == 'DELETE':
jones.delete_association(assoc)
return service, 200
@app.route('/export')
def export():
return zkutil.export_tree(get_zk(), '/')
if __name__ == '__main__':
app.run()
|
archivsozialebewegungen/AlexandriaBase
|
tests/servicestests/test_creator_service.py
|
Python
|
gpl-3.0
| 732 | 0.008197 |
'''
Created on 12.03.2016
@author: michael
'''
import unit
|
test
from unittest.mock import MagicMock
from alexandriabase.daos import CreatorDao
from alexandriabase.domain import Creator
from alexandriabase.services import CreatorService
class CreatorServiceTest(unittest.TestCase):
def testFindVisible(self):
dao = MagicMock
|
(spec=CreatorDao)
dao.find_all_visible.return_value = [Creator(34), Creator(35)]
service = CreatorService(dao)
result = service.find_all_active_creators()
self.assertEqual(35, result[1].id)
dao.find_all_visible.assert_called_once_with()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
kou/arrow
|
python/pyarrow/tests/parquet/test_encryption.py
|
Python
|
apache-2.0
| 20,329 | 0 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from datetime import timedelta
import pyarrow as pa
try:
import pyarrow.parquet as pq
import pyarrow.parquet_encryption as pe
except ImportError:
pq = None
pe = None
else:
from pyarrow.tests.parquet.encryption import (
InMemoryKmsClient, verify_file_encrypted)
PARQUET_NAME = 'encrypted_table.in_mem.parquet'
FOOTER_KEY = b"0123456789112345"
FOOTER_KEY_NAME = "footer_key"
COL_KEY = b"1234567890123450"
COL_KEY_NAME = "col_key"
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not parquet_encryption'
pytestmark = pytest.mark.parquet_encryption
@pytest.fixture(scope='module')
def data_table():
data_table = pa.Table.from_pydict({
'a': pa.array([1, 2, 3]),
'b': pa.array(['a', 'b', 'c']),
'c': pa.array(['x', 'y', 'z'])
})
return data_table
@pytest.fixture(scope='module')
def basic_encryption_config():
basic_encryption_config = pe.EncryptionConfiguration(
footer_key=FOOTER_KEY_NAME,
column_keys={
COL_KEY_NAME: ["a", "b"],
})
return basic_encryption_config
def test_encrypted_parquet_write_read(tempdir, data_table):
"""Write an encrypted parquet, verify it's encrypted, and then read it."""
path = tempdir / PARQUET_NAME
# Encrypt the footer with the footer key,
# encrypt column `a` and column `b` with another key,
# keep `c` plaintext
encryption_config = pe.EncryptionConfiguration(
footer_key=FOOTER_KEY_NAME,
column_keys={
COL_KEY_NAME: ["a", "b"],
},
encryption_algorithm="AES_GCM_V1",
cache_lifetime=timedelta(minutes=5.0),
data_key_length_bits=256)
kms_connection_config = pe.KmsConnectionConfig(
custom_kms_conf={
FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"),
COL_KEY_NAME: COL_KEY.decode("UTF-8"),
}
)
def kms_factory(kms_connection_configuration):
return InMemoryKmsClient(kms_connection_configuration)
crypto_factory = pe.CryptoFactory(kms_factory)
# Write with encryption properties
write_encrypted_parquet(path, data_table, encryption_config,
kms_connection_config, crypto_factory)
verify_file_encrypted(path)
# Read with decryption properties
decryption_config = pe.DecryptionConfiguration(
cache_lifetime=timedelta(minutes=5.0))
result_table = read_encrypted_parquet(
path, decryption_config, kms_connection_config, crypto_factory)
assert data_table.equals(result_table)
def write_encrypted_parquet(path, table, encryption_config,
kms_connection_config, crypto_factory):
file_encryption_properties = crypto_factory.file_encryption_properties(
kms_connection_config, encryption_config)
assert(file_encryption_properties is not None)
with pq.ParquetWriter(
path, table.schema,
encryption_properties=file_encryption_properties) as writer:
writer.write_table(table)
def read_encrypted_parquet(path, decryption_config,
kms_connection_config, crypto_factory):
file_decryption_properties = crypto_factory.file_decryption_properties(
kms_connection_config, decryption_config)
assert(file_decryption_properties is not None)
meta = pq.read_metadata(
path, decryption_properties=file_decryption_properties)
assert(meta.num_columns == 3)
schema = pq.read_schema(
path, decryption_properties=file_decryption_properties)
assert(len(schema.names) == 3)
result = pq.ParquetFile(
path, decryption_properties=file_decryption_properties)
return result.read(use_threads=False)
def test_encrypted_parquet_write_read_wrong_key(tempdir, data_table):
"""Write an encrypted parquet, verify it's encrypted,
and then read it using wrong keys."""
path = tempdir / PARQUET_NAME
# Encrypt the footer with the footer key,
# encrypt column `a` and column `b` with another key,
# keep `c` plaintext
encryption_config = pe.EncryptionConfiguration(
footer_key=FOOTER_KEY_NAME,
column_keys={
COL_KEY_NAME: ["a", "b"],
},
encryption_algorithm="AES_GCM_V1",
cache_lifetime=timedelta(minutes=5.0),
data_key_length_bits=256)
kms_connection_config = pe.KmsConnectionConfig(
custom_kms_conf={
FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"),
COL_KEY_NAME: COL_KEY.decode("UTF-8"),
}
)
def kms_factory(kms_connection_configuration):
return InMemoryKmsClient(kms_connection_configuration)
crypto_factory = pe.CryptoFactory(kms_factory)
# Write with encryption properties
write_encrypted_parquet(path, data_table, encryption_config,
kms_connection_config, crypto_factory)
verify_file_encrypted(path)
# Read with decryption properties
wrong_kms_connection_config = pe.KmsConnectionConfig(
custom_kms_conf={
# Wrong keys - mixup in names
FOOTER_KEY_NAME: COL_KEY.decode("UTF-8"),
COL_KEY_NAME: FOOTER_KEY.decode("UTF-8"),
}
)
decryption_config = pe.DecryptionConfiguration(
cache_lifetime=timedelta(minutes=5.0))
with pytest.raises(ValueError, match=r"Incorrect master key used"):
read_encrypted_parquet(
path, decryption_config, wrong_kms_connection_config,
crypto_factory)
def test_encrypted_parquet_read_no_decryption_config(tempdir, data_table):
"""Write an encrypted parquet, verify it's encrypted,
but then try to read it without decryption properties."""
test_encrypted_parquet_write_read(tempdir, data_table)
# Read without decryption properties
with pytest.raises(IOError, match=r"no decryption"):
pq.ParquetFile(tempdir / PARQUET_NAME).read()
def test_encrypted_parquet_read_metadata_no_decryption_config(
tempdir, data_table):
"""Write an encrypted parquet, verify it's encrypted,
but then try to read its metadata without decryption properties."""
test_encrypted_parquet_write_read(tempdir, data_table)
# Read metadata without decryption properties
with pytest.raises(IOError, match=r"no decryption"):
pq.read_metadata(tempdir / PAR
|
QUET_NAME)
def test_encrypted_parquet_read_schema_no_decryption_c
|
onfig(
tempdir, data_table):
"""Write an encrypted parquet, verify it's encrypted,
but then try to read its schema without decryption properties."""
test_encrypted_parquet_write_read(tempdir, data_table)
with pytest.raises(IOError, match=r"no decryption"):
pq.read_schema(tempdir / PARQUET_NAME)
def test_encrypted_parquet_write_no_col_key(tempdir, data_table):
"""Write an encrypted parquet, but give only footer key,
without column key."""
path = tempdir / 'encrypted_table_no_col_key.in_mem.parquet'
# Encrypt the footer with the footer key
encryption_config = pe.EncryptionConfiguration(
footer_key=FOOTER_KEY_NAME)
kms_connection_config = pe.KmsConnectionConfig(
custom_kms_conf={
FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"),
COL_KEY_NAME: COL_KEY.decode("UTF-8"),
}
)
def kms_factory(kms_connection_configuration):
return InMemoryKmsClient(kms_co
|
kashefy/caffe_sandbox
|
nideep/datasets/twoears/label_utils.py
|
Python
|
bsd-2-clause
| 798 | 0.017544 |
'''
Created on Apr 25, 2017
@author: kashefy
'''
import numpy as np
import h5py
from nideep.iow.file_system_utils import gen_paths, filter_is_h5
def id_loc_to_loc(fpath_src, key_dst, key_src='label_id_loc', has_void_bin=True):
with h5py.File(fpath_src, 'r+') as h:
if has_void_bin:
l = np.sum(h[key_src][...,:-1], axis=1)
else:
l = np.sum(h[key_src], axis=1)
l = np.expand_dims(l, 1)
h[key_dst] = l
def walk_id_loc_to_loc(dir_src, key_dst):
def runner(fpath):
if fi
|
lter_is_
|
h5(fpath):
id_loc_to_loc(fpath, key_dst)
return True # otherwise gen_paths won't append to list
flist = gen_paths(dir_src, func_filter=runner)
return flist
if __name__ == '__main__':
pass
|
drpngx/tensorflow
|
tensorflow/contrib/predictor/contrib_estimator_predictor.py
|
Python
|
apache-2.0
| 3,274 | 0.003054 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================
|
=====================================================
"""A `Predictor constructed from a `tf.contrib.learn.Estimator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.utils import saved_model_ex
|
port_utils
from tensorflow.contrib.predictor import predictor
from tensorflow.python.framework import ops
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
class ContribEstimatorPredictor(predictor.Predictor):
"""A `Predictor constructed from a `tf.contrib.learn.Estimator`."""
def __init__(self,
estimator,
prediction_input_fn,
input_alternative_key=None,
output_alternative_key=None,
graph=None,
config=None):
"""Initialize a `ContribEstimatorPredictor`.
Args:
estimator: an instance of `tf.contrib.learn.Estimator`.
prediction_input_fn: a function that takes no arguments and returns an
instance of `InputFnOps`.
input_alternative_key: Optional. Specify the input alternative used for
prediction.
output_alternative_key: Specify the output alternative used for
prediction. Not needed for single-headed models but required for
multi-headed models.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
"""
self._graph = graph or ops.Graph()
with self._graph.as_default():
input_fn_ops = prediction_input_fn()
# pylint: disable=protected-access
model_fn_ops = estimator._get_predict_ops(input_fn_ops.features)
# pylint: enable=protected-access
checkpoint_path = saver.latest_checkpoint(estimator.model_dir)
self._session = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
config=config,
checkpoint_filename_with_path=checkpoint_path))
input_alternative_key = (
input_alternative_key or
saved_model_export_utils.DEFAULT_INPUT_ALTERNATIVE_KEY)
input_alternatives, _ = saved_model_export_utils.get_input_alternatives(
input_fn_ops)
self._feed_tensors = input_alternatives[input_alternative_key]
(output_alternatives,
output_alternative_key) = saved_model_export_utils.get_output_alternatives(
model_fn_ops, output_alternative_key)
_, fetch_tensors = output_alternatives[output_alternative_key]
self._fetch_tensors = fetch_tensors
|
dbbhattacharya/kitsune
|
vendor/packages/translate-toolkit/translate/convert/test_xliff2po.py
|
Python
|
bsd-3-clause
| 8,448 | 0.00071 |
#!/usr/bin/env python
from translate.convert import xliff2po
from translate.misc import wStringIO
from translate.storage.test_base import headerless_len, first_translatable
class TestXLIFF2PO:
xliffskeleton = '''<?xml version="1.0" ?>
<xliff version="1.1" xmlns="urn:oasis:names:tc:xliff:document:1.1">
<file original="filename.po" source-language="en-US" datatype="po">
<body>
%s
</body>
</file>
</xliff>'''
def xliff2po(self, xliffsource):
"""helper that converts xliff source to po source without requiring files"""
inputfile = wStringIO.StringIO(xliffsource)
convertor = xliff2po.xliff2po()
outputpo = convertor.convertstore(inputfile)
print "The generated po:"
print type(outputpo)
print str(outputpo)
return outputpo
def test_minimal(self):
minixlf = self.xliffskeleton % '''<trans-unit>
<source>red</source>
<target>rooi</target>
</trans-unit>'''
pofile = self.xliff2po(minixlf)
assert headerless_len(pofile.units) == 1
assert pofile.translate("red") == "rooi"
assert pofile.translate("bla") is None
def test_basic(self):
headertext = '''Project-Id-Version: program 2.1-branch
Report-Msgid-Bugs-To:
POT-Creation-Date: 2006-01-09 07:15+0100
PO-Revision-Date: 2004-03-30 17:02+0200
Last-Translator: Zuza Software Foundation <xxx@translate.org.za>
Language-Team: Afrikaans <translate-discuss-xxx@lists.sourceforge.net>
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit'''
minixlf = (self.xliffskeleton % '''<trans-unit id="1" restype="x-gettext-domain-header" approved="no" xml:space="preserve">
<source>%s</source>
<target>%s</target>
<note from="po-translator">Zulu translation of program ABC</note>
</trans-unit>
<trans-unit>
<source>gras</source>
<target>utshani</target>
</trans-unit>''') % (headertext, headertext)
print minixlf
pofile = self.xliff2po(minixlf)
assert pofile.translate("gras") == "utshani"
assert pofile.translate("bla") is None
potext = str(pofile)
assert potext.index('# Zulu translation of program ABC') == 0
assert potext.index('msgid "gras"\n')
assert potext.index('msgstr "utshani"\n')
assert potext.index('MIME-Version: 1.0\\n')
def test_translatorcomments(self):
"""Tests translator comments"""
minixlf = self.xliffskeleton % '''<trans-unit>
<source>nonsense</source>
<target>matlhapolosa</target>
<context-group name="po-entry" purpose="information">
<context context-type="x-po-trancomment">Couldn't do
it</context>
</context-group>
<note from="po-translator">Couldn't do
it</note>
</trans-unit>'''
pofile = self.xliff2po(minixlf)
assert pofile.translate("nonsense") == "matlhapolosa"
assert pofile.translate("bla") is None
unit = first_translatable(pofile)
assert unit.getnotes("translator") == "Couldn't do it"
potext = str(pofile)
assert potext.index("# Couldn't do it\n") >= 0
minixlf = self.xliffskeleton % '''<trans-unit xml:space="preserve">
<source>nonsense</source>
<target>matlhapolosa</target>
<context-group name="po-entry" purpose="information">
<context context-type="x-po-trancomment">Couldn't do
it</context>
</context-group>
<note from="po-translator">Couldn't do
it</note>
</trans-unit>'''
pofile = self.xliff2po(minixlf)
assert pofile.translate("nonsense") == "matlhapolosa"
assert pofile.translate("bla") is None
unit = first_translatable(pofile)
assert unit.getnotes("translator") == "Couldn't do\nit"
potext = str(pofile)
assert potext.index("# Couldn't do\n# it\n") >= 0
def test_autocomment(self):
"""Tests automatic comments"""
minixlf = self.xliffskeleton % '''<trans-unit>
<source>nonsense</source>
<target>matlhapolosa</target>
<context-group name="po-entry" purpose="information">
<context context-type="x-po-autocomment">Note that this is
garbage</context>
</context-group>
<note from="developer">Note that this is
garbage</note>
</trans-unit>'''
pofile = self.xliff2po(minixlf)
assert pofile.translate("nonsense") == "matlhapolosa"
assert pofile.translate("bla") is None
unit = first_translatable(pofile)
assert unit.getnotes("developer") == "Note that this is garbage"
potext = str(pofile)
assert potext.index("#. Note that this is garbage\n") >= 0
minixlf = self.xliffskeleton % '''<trans-unit xml:space="preserve">
<source>nonsense</source>
<target>matlhapolosa</target>
<context-group name="po-entry" purpose="information">
<context context-type="x-po-autocomment">Note that this is
garbage</context>
</context-group>
<note from="developer">Note that this is
garbage</note>
</trans-unit>'''
pofile = self.xliff2po(minixlf)
assert pofile.translate("nonsense") == "matlhapolosa"
assert pofile.translate("bla") is None
unit = first_translatable(pofile)
assert unit.getnotes("developer") == "Note that this is\ngarbage"
potext = str(pofile)
assert potext.index("#. Note that this is\n#. garbage\n") >= 0
def test_locations(self):
"""Tests location comments (#:)"""
minixlf = self.xliffskeleton % '''<trans-unit id="1">
<source>nonsense</source>
<target>matlhapolosa</target>
<context-group name="po-reference" purpose="location">
<context context-type="sourcefile">example.c</context>
<context context-type="linenumber">123</context>
</context-group>
<context-group name="po-reference" purpose="location">
<context context-type="sourcefile">place.py</context>
</context-group>
</trans-unit>'''
pofile = self.xliff2po(minixlf)
assert pofile.translate("nonsense") == "matlhapolosa"
assert pofile.translate("bla") is None
unit = first_translatable(pofile)
locations = unit.getlocations()
assert len(locations) == 2
assert "example.c:123" in locations
assert "place.py" in locations
def test_fuzzy(self):
"""Tests fuzzyness"""
minixlf = self.xliffskeleton % '''<trans-unit approved="no">
<source>book</source>
</trans-unit>
<trans-unit id="2" approved="yes">
<source>nonsense</source>
<target>matlha
|
polosa</target>
</trans-unit>
<trans-unit id="2" approved="no">
<source>verb</source>
<target state="needs-review-translation">lediri</target>
</trans-unit>'''
pofile
|
= self.xliff2po(minixlf)
assert pofile.translate("nonsense") == "matlhapolosa"
assert pofile.translate("verb") == "lediri"
assert pofile.translate("book") is None
assert pofile.translate("bla") is None
assert headerless_len(pofile.units) == 3
#TODO: decide if this one should be fuzzy:
#assert pofile.units[0].isfuzzy()
assert not pofile.units[2].isfuzzy()
assert pofile.units[3].isfuzzy()
def test_plurals(self):
"""Tests fuzzyness"""
minixlf = self.xliffskeleton % '''<group id="1" restype="x-gettext-plurals">
<trans-unit id="1[0]" xml:space="preserve">
<source>cow</source>
<target>inkomo</target>
</trans-unit>
<trans-unit id="1[1]" xml:space="preserve">
<source>cows</source>
<target>iinkomo</target>
</trans-unit>
</group>'''
pofile = self.xliff2po(minixlf)
print str(pofile)
potext = str(pofile)
assert headerless_len(pofile.units) == 1
assert potext.index('msgid_plural "cows"')
assert potext.index('msgstr[0] "inkomo"')
assert potext.index('msgstr[1] "iinkomo"')
class TestBasicXLIFF2PO(TestXL
|
openstack/keystone
|
keystone/common/sql/migrations/versions/yoga/contract/e25ffa003242_initial.py
|
Python
|
apache-2.0
| 835 | 0 |
# Licensed under the Apache
|
License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS O
|
F ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Initial no-op Yoga contract migration.
Revision ID: e25ffa003242
Revises: 27e647c0fad4
Create Date: 2022-01-21 00:00:00.000000
"""
# revision identifiers, used by Alembic.
revision = 'e25ffa003242'
down_revision = '27e647c0fad4'
branch_labels = ('contract',)
def upgrade():
pass
|
tum-i22/indistinguishability-obfuscation
|
obfusc8/test/test_bp.py
|
Python
|
apache-2.0
| 5,366 | 0.063362 |
import unittest
from itertools import product
from obfusc8.circuit import *
from obfusc8.bp import *
#enable testing of 'private' module member functions, somewhat sloppy style but I prefer it to any alternative
from obfusc8.bp import _matrix2cycle
class TestBranchingProgram(unittest.TestCase):
def setUp(self):
self.inputLength = 8
inputs = [Input("x"+str(x)) for x in range(0, self.inputLength)]
# (-(x0 & x1) & (-x2 & x3)) & ((x4 & x5) & -(x6 & -x7))
self.circuit = Circuit(AndGate(AndGate(NotGate(AndGate(inputs[0], inputs[1])), AndGate(NotGate(inputs[2]), inputs[3])), AndGate(AndGate(inputs[4], inputs[5]),NotGate(AndGate(inputs[6], NotGate(inputs[7]))))))
self.bp = BranchingProgram.fromCircuit(self.circuit)
def test_estimateBPSize_for_example_circuit(self):
self.assertEqual(self.bp.length, BranchingProgram.estimateBPSize(self.circuit), 'incorrecet size calculated')
def test_equality_of_bp_to_circuit(self):
for test in list(product([0,1], repeat=self.inputLengt
|
h)):
test = list(test)
circuitResult = self.circuit.evaluate(test)
bpResult = self.bp.evaluate(test)
self.assertEqual(circuitResult, bpResult, 'Wrong evaluation on input %s. Was %s instead of %s'%(test, circuitResult, bpResult))
class TestPrecalculatedMappings(unittest.TestCase):
def setUp(self):
self.mappings = precalculatedMappings()
self.id2permList = precalculatedId2PermList()
def test_precalculated_mappings(self):
|
for id, perm in zip(range(len(self.id2permList)), self.id2permList):
correct = dot(dot(_ni2n(), dot(perm, _normalInv())), _ni2n())
mappedResult = self.id2permList[self.mappings[0][id]]
self.assertTrue((correct == mappedResult).all(), 'Mapping 0 not correct on input %s. Was %s instead of %s.'%(perm, mappedResult, correct))
correct = dot(dot(_ni2n(), perm), _ni2n())
mappedResult = self.id2permList[self.mappings[1][id]]
self.assertTrue((correct == mappedResult).all(), 'Mapping 0 not correct on input %s. Was %s instead of %s.'%(perm, mappedResult, correct))
correct = dot(dot(_sec2si(), perm), _sec2si())
mappedResult = self.id2permList[self.mappings[2][id]]
self.assertTrue((correct == mappedResult).all(), 'Mapping 0 not correct on input %s. Was %s instead of %s.'%(perm, mappedResult, correct))
correct = dot(dot(_special1(), perm), _special1())
mappedResult = self.id2permList[self.mappings[3][id]]
self.assertTrue((correct == mappedResult).all(), 'Mapping 0 not correct on input %s. Was %s instead of %s.'%(perm, mappedResult, correct))
correct = dot(dot(_special2(), perm), _special3())
mappedResult = self.id2permList[self.mappings[4][id]]
self.assertTrue((correct == mappedResult).all(), 'Mapping 0 not correct on input %s. Was %s instead of %s.'%(perm, mappedResult, correct))
correct = dot(dot(_n2secInv(), perm), _n2sec())
mappedResult = self.id2permList[self.mappings[5][id]]
self.assertTrue((correct == mappedResult).all(), 'Mapping 0 not correct on input %s. Was %s instead of %s.'%(perm, mappedResult, correct))
def _identity(): return array([[1,0,0,0,0],[0,1,0,0,0],[0,0,1,0,0],[0,0,0,1,0],[0,0,0,0,1]])
def _normal(): return array([[0,1,0,0,0],[0,0,1,0,0],[0,0,0,1,0],[0,0,0,0,1],[1,0,0,0,0]]) #(01234)
def _normalInv(): return array([[0,0,0,0,1],[1,0,0,0,0],[0,1,0,0,0],[0,0,1,0,0],[0,0,0,1,0]]) #(04321)
def _ni2n(): return array([[1,0,0,0,0],[0,0,0,0,1],[0,0,0,1,0],[0,0,1,0,0],[0,1,0,0,0]]) #(14)(23)
def _n2sec(): return array([[1,0,0,0,0],[0,0,1,0,0],[0,0,0,0,1],[0,0,0,1,0],[0,1,0,0,0]]) #(124)
def _n2secInv(): return array([[1,0,0,0,0],[0,0,0,0,1],[0,1,0,0,0],[0,0,0,1,0],[0,0,1,0,0]]) #(142)
def _sec2si(): return array([[1,0,0,0,0],[0,0,1,0,0],[0,1,0,0,0],[0,0,0,0,1],[0,0,0,1,0]]) #(12)(34)
#def _res2n(): return array([[1,0,0,0,0],[0,0,0,0,1],[0,0,0,1,0],[0,0,1,0,0],[0,1,0,0,0]]) #(14)(23)
def _special1(): return array([[1, 0, 0, 0, 0],[0, 0, 0, 1, 0],[0, 0, 0, 0, 1],[0, 1, 0, 0, 0],[0, 0, 1, 0, 0]]) #(13)(24)
def _special2(): return array([[1, 0, 0, 0, 0],[0, 1, 0, 0, 0],[0, 0, 0, 0, 1],[0, 0, 1, 0, 0],[0, 0, 0, 1, 0]]) #(243)
def _special3(): return array([[1, 0, 0, 0, 0],[0, 1, 0, 0, 0],[0, 0, 0, 1, 0],[0, 0, 0, 0, 1],[0, 0, 1, 0, 0]]) #(234)
class TestExplicitPermutations(unittest.TestCase):
def test_matrix2cycle(self):
a = array([[0,0,1,0,0],[0,1,0,0,0],[1,0,0,0,0],[0,0,0,1,0],[0,0,0,0,1]])
self.assertEqual(_matrix2cycle(a), '(02)', 'wrong on input %s'%a)
self.assertEqual('(01234)', _matrix2cycle(_normal()), 'wrong on input %s'%_normal())
self.assertEqual('e', _matrix2cycle(_identity()), 'wrong on input %s'%_identity())
self.assertEqual('(04321)', _matrix2cycle(_normalInv()), 'wrong on input %s'%_normalInv())
self.assertEqual('(14)(23)', _matrix2cycle(_ni2n()), 'wrong on input %s'%_ni2n())
self.assertEqual('(124)', _matrix2cycle(_n2sec()), 'wrong on input %s'%_n2sec())
self.assertEqual('(142)', _matrix2cycle(_n2secInv()), 'wrong on input %s'%_n2secInv())
self.assertEqual('(12)(34)', _matrix2cycle(_sec2si()), 'wrong on input %s'%_sec2si())
self.assertEqual('(13)(24)', _matrix2cycle(_special1()), 'wrong on input %s'%_special1())
self.assertEqual('(243)', _matrix2cycle(_special2()), 'wrong on input %s'%_special2())
self.assertEqual('(234)', _matrix2cycle(_special3()), 'wrong on input %s'%_special3())
if __name__ == '__main__':
unittest.main()
|
harterj/moose
|
test/tests/fvkernels/mms/advective-outflow/test.py
|
Python
|
lgpl-2.1
| 7,075 | 0.005936 |
import mms
import unittest
from mooseutils import fuzzyAbsoluteEqual
class TestOutflow(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('advection-outflow.i', 7, y_pp=['L2u', 'L2v'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u', 'L2v'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('outflow.png')
for label,value in fig.label_to_slope.items():
if label == 'L2u':
self.assertTrue(fuzzyAbsoluteEqual(value, 1., .05))
else:
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
class TestExtrapolation(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('advection.i', 7, y_pp=['L2u', 'L2v'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u', 'L2v'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('extrapolation.png')
for label,value in fig.label_to_slope.items():
if label == 'L2u':
self.assertTrue(fuzzyAbsoluteEqual(value, 1., .05))
else:
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
class UpwindLimiter(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('limited-advection.i', 7, "FVKernels/advection_u/limiter='upwind'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('upwind-limiter.png')
for label,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 1., .05))
class CentralDifferenceLimiter(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('limited-advection.i', 7, "FVKernels/advection_u/limiter='central_difference'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('cd-limiter.png')
for label,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
class VanLeerLimiter(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('limited-advection.i', 9, "FVKernels/advection_u/limiter='vanLeer'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('vanLeer-limiter.png')
for label,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
class MinModLimiter(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('limited-advection.i', 9, "FVKernels/advection_u/limiter='min_mod'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('min-mod-limiter.png')
for label,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
class SOULimiter(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('limited-advection.i', 9, "FVKernels/advection_u/limiter='sou'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('sou-limiter.png')
for label,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
class QUICKLimiter(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('limited-advection.i', 15, "FVKernels/advection_u/limiter='quick'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
|
label=['L2u'],
marker='o',
|
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('quick-limiter.png')
for label,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
class KTLimitedCD(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('kt-limited-advection.i', 11, "FVKernels/advection_u/limiter='central_difference'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('kt-cd-limiter.png')
for key,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
print("%s slope, %f" % (key, value))
class KTLimitedUpwind(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('kt-limited-advection.i', 13, "FVKernels/advection_u/limiter='upwind'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('kt-upwind-limiter.png')
for key,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 1., .05))
print("%s slope, %f" % (key, value))
class KTLimitedVanLeer(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('kt-limited-advection.i', 11, "FVKernels/advection_u/limiter='vanLeer'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('kt-van-leer-limiter.png')
for key,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 2.5, .05))
print("%s slope, %f" % (key, value))
|
mtekel/digitalmarketplace-buyer-frontend
|
app/main/suppliers.py
|
Python
|
mit
| 2,705 | 0 |
# coding=utf-8
from string import ascii_uppercase
import flask_featureflags
from app.main
|
import main
from flask import render_template, request
from app.helpers.search_helpers import get_template_data
from app import data_api_client
import re
try:
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlparse, parse_qs
def process_prefix(prefix):
if prefix == "123": # special case
return prefix
if is_alpha(pr
|
efix):
return prefix[:1].upper()
return "A" # default
def process_page(page):
reg = "^[1-9]{1}$" # valid page
if re.search(reg, page):
return page
return "1" # default
def is_alpha(character):
reg = "^[A-Za-z]{1}$" # valid prefix
return re.search(reg, character)
def parse_links(links):
pagination_links = {
"prev": None,
"next": None
}
if 'prev' in links:
pagination_links['prev'] = parse_qs(urlparse(links['prev']).query)
if 'next' in links:
pagination_links['next'] = parse_qs(urlparse(links['next']).query)
return pagination_links
@main.route('/g-cloud/suppliers')
@flask_featureflags.is_active_feature('SUPPLIER_A_TO_Z')
def suppliers_list_by_prefix():
prefix = process_prefix(request.args.get('prefix', default='A'))
page = process_page(request.args.get('page', default="1"))
api_result = data_api_client.find_suppliers(prefix, page, 'gcloud')
suppliers = api_result["suppliers"]
links = api_result["links"]
template_data = get_template_data(main, {
'title': 'Digital Marketplace - Suppliers'
})
return render_template('suppliers_list.html',
suppliers=suppliers,
nav=ascii_uppercase,
count=len(suppliers),
prev_link=parse_links(links)['prev'],
next_link=parse_links(links)['next'],
prefix=prefix,
**template_data)
@main.route('/g-cloud/supplier/<supplier_id>')
@flask_featureflags.is_active_feature('SUPPLIER_A_TO_Z')
def suppliers_details(supplier_id):
supplier = data_api_client.get_supplier(
supplier_id=supplier_id)["suppliers"]
template_data = get_template_data(main, {
'title': 'Digital Marketplace - Suppliers'
})
first_character_of_supplier_name = supplier["name"][:1]
if is_alpha(first_character_of_supplier_name):
prefix = process_prefix(supplier["name"][:1])
else:
prefix = "123"
return render_template(
'suppliers_details.html',
supplier=supplier,
prefix=prefix,
**template_data)
|
openstack/sahara-dashboard
|
sahara_dashboard/test/integration_tests/tests/test_sahara_image_registry.py
|
Python
|
apache-2.0
| 2,572 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re
|
quired by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or i
|
mplied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
from sahara_dashboard.test.integration_tests.helpers import SaharaTestCase
IMAGE_NAME = helpers.gen_random_resource_name("image")
class TestSaharaImageRegistry(SaharaTestCase):
def setUp(self):
super(TestSaharaImageRegistry, self).setUp()
image_pg = self.home_pg.go_to_project_compute_imagespage()
image_pg.create_image(
IMAGE_NAME, image_file=self.CONFIG.sahara.fake_image_location)
image_pg.find_message_and_dismiss(messages.SUCCESS)
image_pg.wait_until_image_active(IMAGE_NAME)
def test_image_register_unregister(self):
"""Test the image registration in Sahara."""
image_reg_pg = \
self.home_pg.go_to_dataprocessing_clusters_imageregistrypage()
image_reg_pg.register_image(IMAGE_NAME, self.CONFIG.scenario.ssh_user,
"Test description")
image_reg_pg.wait_until_image_registered(IMAGE_NAME)
self.assertTrue(image_reg_pg.is_image_registered(IMAGE_NAME),
"Image was not registered.")
self.assertTrue(
image_reg_pg.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
image_reg_pg.find_message_and_dismiss(messages.ERROR),
"Error message occurred during image creation.")
image_reg_pg.unregister_image(IMAGE_NAME)
self.assertTrue(
image_reg_pg.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
image_reg_pg.find_message_and_dismiss(messages.ERROR))
self.assertFalse(image_reg_pg.is_image_registered(IMAGE_NAME),
"Image was not unregistered.")
def tearDown(self):
image_pg = self.home_pg.go_to_project_compute_imagespage()
image_pg.delete_image(IMAGE_NAME)
super(TestSaharaImageRegistry, self).tearDown()
|
westerncapelabs/django-wcl-skel
|
{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/urls.py
|
Python
|
bsd-3-clause
| 536 | 0.001866 |
import os
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.site.site_header = os.environ.get('{{cookiecutter.env_prefix}}_TITLE', '{{cookiecutter.project_name}} Admin')
urlpatterns
|
= patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^api/auth/',
include('rest_framework.urls', namesp
|
ace='rest_framework')),
url(r'^api/token-auth/',
'rest_framework.authtoken.views.obtain_auth_token'),
url(r'^', include('{{cookiecutter.app_name}}.urls')),
)
|
kret0s/gnuhealth-live
|
tryton/server/trytond-3.8.3/trytond/modules/account_invoice_stock/stock.py
|
Python
|
gpl-3.0
| 1,091 | 0.001833 |
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from trytond.model import fields
from trytond.pool import Pool, PoolMeta
__all__ = ['
|
StockMove']
__metaclass__ = PoolMeta
class StockMove:
__name__ = 'stock.move'
invoice_lines = fields.Many2Many('account.invoice.line-stock.move',
'stock_move', 'invoice_line', 'Invoice Lines')
@property
def invoiced_quantity(self):
'The quantity from linked invoice lines in move unit'
pool = Pool()
Uom = pool.get('product.uom')
quantity = 0
for invo
|
ice_line in self.invoice_lines:
quantity += Uom.compute_qty(invoice_line.unit,
invoice_line.quantity, self.uom)
return quantity
@classmethod
def copy(cls, moves, default=None):
if default is None:
default = {}
else:
default = default.copy()
default.setdefault('invoice_lines', None)
return super(StockMove, cls).copy(moves, default=default)
|
spencerahill/aospy-obj-lib
|
aospy_user/calcs/gms.py
|
Python
|
apache-2.0
| 4,514 | 0 |
"""Gross moist stability-related quantities."""
from aospy.constants import c_p, grav, L_v
from aospy.utils.vertcoord import to_pascal
from indiff.deriv import EtaCenDeriv, CenDeriv
import numpy as np
from .. import PLEVEL_STR
from . import horiz_divg, vert_divg
from .thermo import dse, mse, fmse
def field_vert_int_max(arr, dp):
"""Maximum magnitude of integral of a field from surface up."""
dp = to_pascal(dp)
# 2015-05-15: Problem: Sigma data indexing starts at TOA, while pressure
# data indexing starts at 1000 hPa. So for now only do for
# sigma data and flip array direction to start from sfc.
arr_dp_g = (arr*dp)[::-1] / grav
# Input array dimensions are assumed ([time dims,] level, lat, lon).
pos_max = np.amax(np.cumsum(arr_dp_g, axis=0), axis=-3)
neg_max = np.amin(np.cumsum(arr_dp_g, axis=0), axis=-3)
# Flip sign because integrating from p_sfc up, i.e. with dp negative.
return -1*np.where(pos_max > -neg_max, pos_max, neg_max)
def horiz_divg_vert_int_max(u, v, radius, dp):
"""Maximum magnitude of integral upwards of horizontal divergence."""
return field_vert_int_ma
|
x(horiz_divg(u, v, radius, dp), dp)
def vert_divg_vert_int_max(omega, p, dp):
"""Maximum magnitude of integral from surface up of vertical divergence."""
return field_vert_int_max(vert_divg(omega, p, dp), dp)
def gms_like_ratio(weights, tracer, dp)
|
:
"""Compute ratio of integrals in the style of gross moist stability."""
# Integrate weights over lower tropospheric layer
dp = to_pascal(dp)
denominator = field_vert_int_max(weights, dp)
# Integrate tracer*weights over whole column and divide.
numerator = np.sum(weights*tracer*dp, axis=-3) / grav
return numerator / denominator
def gross_moist_strat(sphum, u, v, radius, dp):
"""Gross moisture stratification, in horizontal divergence form."""
divg = horiz_divg(u, v, radius)
return L_v*gms_like_ratio(divg, sphum, dp)
def gross_dry_stab(temp, hght, u, v, radius, dp):
"""Gross dry stability, in horizontal divergence form."""
divg = horiz_divg(u, v, radius)
return -gms_like_ratio(divg, dse(temp, hght), dp)
def gross_moist_stab(temp, hght, sphum, u, v, radius, dp):
"""Gross moist stability, in horizontal divergence form."""
divg = horiz_divg(u, v, radius)
return -gms_like_ratio(divg, mse(temp, hght, sphum), dp)
def gms_up_low(temp, hght, sphum, level, lev_up=400., lev_dn=925.):
"""Gross moist stability. Upper minus lower level MSE."""
m = mse(temp, hght, sphum)
return (np.squeeze(m[np.where(level == lev_up)] -
m[np.where(level == lev_dn)])/c_p)
def gms_each_level(temp, hght, sphum, level, lev_dn=925.):
m = mse(temp, hght, sphum)
return (m - m[np.where(level == lev_dn)])/c_p
def dry_static_stab(temp, hght, level, lev_dn=925.):
"""Dry static stability, in terms of dry static energy."""
d = dse(temp, hght)
return (d - d[np.where(level == lev_dn)])/c_p
def frozen_moist_static_stab(temp, hght, sphum, q_ice, ps, bk, pk):
"""Frozen moist static stability using model-native coordinate data."""
return EtaCenDeriv(fmse(temp, hght, sphum, q_ice), pk, bk, ps, order=2,
fill_edge=True).deriv()
def moist_static_stab(temp, hght, sphum, ps, bk, pk):
"""Moist static stability using model-native coordinate data. No ice."""
return EtaCenDeriv(mse(temp, hght, sphum), pk, bk, ps, order=2,
fill_edge=True).deriv()
def frozen_moist_static_stab_p(temp, hght, sphum, q_ice):
"""Frozen moist static stability using pressure-interpolated data.
Note that the values in the stratosphere become unphysical using pressure
interpolated data, but otherwise in the troposphere they agree well with
data on model-native coordinates.
"""
p = to_pascal(temp[PLEVEL_STR])
return CenDeriv(fmse(temp, hght, sphum, q_ice), PLEVEL_STR, coord=p,
order=2, fill_edge=True).deriv()
def moist_static_stab_p(temp, hght, sphum):
"""Moist static stability using pressure-interpolated data. No ice.
Note that the values in the stratosphere become unphysical using pressure
interpolated data, but otherwise in the troposphere they agree well with
data on model-native coordinates.
"""
p = to_pascal(temp[PLEVEL_STR])
return CenDeriv(mse(temp, hght, sphum), PLEVEL_STR, coord=p,
order=2, fill_edge=True).deriv()
|
KGerring/RevealMe
|
data/tested.py
|
Python
|
mit
| 6,442 | 0.0104 |
from email import message_from_file
from pkg_resources import working_set as WS
import path
from pathlib import Path
from pkg_resources import *
from pkg_resources import DistInfoDistribution, Distribution
import distutils.dist
import pkg_resources
import dpath
import sys, os, re
from distutils.errors import *
from distutils.fancy_getopt import FancyGetopt, translate_longopt
from distutils.util import check_environ, strtobool, rfc822_escape
from distutils import log
from distutils.debug import DEBUG
import json
import pip
import sys
import socket
import multiprocessing
from xmlrpc.client import ServerProxy
from itertools import zip_longest
from functools import reduce
LISTING = '/Users/Kristen/PycharmProjects/proj/Other/data/current_dists.txt'
BASENAMES = ('name', 'version', 'author', 'author_email', 'maintainer', 'maintainer_email', 'url', 'license', 'description', 'long_description', 'keywords', 'platforms', 'fullname', 'contact', 'contact_email', 'license', 'classifiers', 'download_url', 'provides', 'requires', 'obsoletes')
def listing():
listing = set()
with open(LISTING) as reader:
data =reader.read()
data = [item.strip().replace('-', '_') for item in data.splitlines()]
return frozenset(data)
listing =listing()
from pkg_resources import working_set
WS = working_set.by_key
ws = frozenset(ws.replace('-','_') for ws in WS.keys())
def search_dist_list(word): return [dist for dist in ws if dist.__contains__(word)]
class cache(object):
def __init__(self, func=None):
self.storage = dict()
self.items = set()
if func:
self.function = func
else:
self.function = lambda x: x
self.description = ''
def update_description(self, msg, rewrite=False):
if not rewrite:
msg = '. '+msg
msg += self.description
else:
self.description = msg
return self.description
def get_state(self):
class_dict = self.__dict__
if 'function' in class_dict.keys():
del class_dict['function']
return class_dict
def on_search(self, key, search_function=None):
searcher = search_function if search_function else self.function
if key in self.items:
return self.storage[key]
else:
seek = searcher(key)
if seek is not None:
self.storage[key] = seek
self.items.add(key)
self.__setattr__(key, seek)
return seek
return None
def __getattr__(self, attr):
return self[attr]
def __repr__(self):
if hasattr(self.function, '__name__'):
name = self.function.__name__
else:
name = "nofunc"
items = str(len(self.items))
return "Cache for function ({}) with {} items".format(name, items)
Cache =cache(search_dist_list)
class EmptyDictException(Exception):
"""Need a dictionary that isn't `None`"""
def split_keyword_dicts(d=None):
if d is None:
print('Need a dictionary that isnt None')
pass
if not isdict(d):
pass
if isdict(d):
for value in d.items():
if not isdict(value):
pass
for k, v in d.items():
if v['keywords'] is None:
v['keywords'] = ' '
v['keywords'] = v['keywords'].lower()
if v['keywords'].__contains__(', '):
v['keywords'] = ' '.join(v['keywords'].split(', '))
if v['keywords'].__contains__(','):
v['keywords'] = ' '.join(v['keywords'].split(','))
v['keywords'] = set([val.strip() for val in v['keywords'].split()])
return d
class attrdict(dict):
def __getattr__(self, attr):
return self[attr]
def __setitem__(self,key, value):
self[key] = value
self.__setattr__(key, value)
def one_level_attrdict(d):
if isdict(d):
for key, value in d.items():
if isdict(value):
d[key] = attrdict(value)
return d
def isdict(obj): return isinstance(obj, dict)
def isNone(obj):
if obj is not None:
return False
return True
MetaData = distutils.dist.DistributionMetadata
msg = message_from_file
def _read_field(name):
value = msg[name]
if value == 'UNKNOWN':
return None
return value
def _read_list(name):
values = msg.get_all(name, None)
if values == []:
return None
return values
ex=Path('/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/dpath-1.4.0.dist-info/METADATA')
exs = "flatdict", 'graphviz', 'genshi', 'textblob'
def get_egg(distribution):
info = distribution.egg_info
if info is not None:
finder = Path(info)
else:
finder = None
return finder
def parse_egg_info(path):
finder =Path(path)
file_dict = dict()
for file in finder.iterdir():
if file.name == 'METADATA' or 'PKG-INFO':
file_dict['METADATA']= file.open()
if file.name == 'DESCRIPTION':
file_dict['DESCRIPTION'] =file.open()
if file.name == 'metadata.json':
try:
file=json.load(file.open())
except TypeError:
file = file.open()
#extensions/python.details/project_urls
#extensions/python.details/document_names ##in BASENAMES
#extensions/python.details/contacts[lst]/[email | name | role ]
file_dict['metadata_json'] = json.load(file.open())
if file.name == 'RECORD':
record = set()
file = file.open().read()
for line in file.splitlines():
try:
name,hash,size = line.split(',')
items = tuple(name,hash,size)
except ValueErro
|
r:
items = line.split(',')
record.add(items)
file_dict['RECORD']= record
else:
file_dict[file.name] = None
return file_dict
| |
Hackplayers/Empire-mod-Hpys-tests
|
lib/modules/python/collection/osx/prompt.py
|
Python
|
bsd-3-clause
| 3,830 | 0.006005 |
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Prompt',
# list of one or more authors for the module
'Author': ['@FuzzyNop', '@harmj0y'],
# more verbose multi-line description of the module
'Description': ('Launches a specified application with an prompt for credentials with osascript.'),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module n
|
eeds administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'C
|
omments': [
"https://github.com/fuzzynop/FiveOnceInYourLife"
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'AppName' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'The name of the application to launch.',
'Required' : True,
'Value' : 'App Store'
},
'ListApps' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Switch. List applications suitable for launching.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
listApps = self.options['ListApps']['Value']
appName = self.options['AppName']['Value']
if listApps != "":
script = """
import os
apps = [ app.split('.app')[0] for app in os.listdir('/Applications/') if not app.split('.app')[0].startswith('.')]
choices = []
for x in xrange(len(apps)):
choices.append("[%s] %s " %(x+1, apps[x]) )
print "\\nAvailable applications:\\n"
print '\\n'.join(choices)
"""
else:
# osascript prompt for the specific application
script = """
import os
print os.popen('osascript -e \\\'tell app "%s" to activate\\\' -e \\\'tell app "%s" to display dialog "%s requires your password to continue." & return default answer "" with icon 1 with hidden answer with title "%s Alert"\\\'').read()
""" % (appName, appName, appName, appName)
return script
|
sunweaver/ganetimgr
|
apply/migrations/0005_add_application_network_field.py
|
Python
|
gpl-3.0
| 8,911 | 0.008753 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'InstanceApplication.network'
db.add_column('apply_instanceapplication', 'network', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ganeti.Network'], null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'InstanceApplication.network'
db.delete_column('apply_instanceapplication', 'network_id')
models = {
'apply.instanceapplication': {
'Meta': {'object_name': 'InstanceApplication'},
'admin_contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'admin_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'admin_contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'backend_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'cluster': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ganeti.Cluster']", 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'cookie': ('django.db.models.fields.CharField', [], {'default': "'AYkWSa4Fr2'", 'max_length': '255'}),
'disk_size': ('django.db.models.fields.IntegerField', [], {}),
'filed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hosts_mail_server': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mem
|
ory': ('django.db.models.fields.IntegerField', [], {}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ganeti.Network']", 'null': 'True', 'blank': 'True'}),
'operating_system': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization': ('django.db.model
|
s.fields.related.ForeignKey', [], {'to': "orm['apply.Organization']"}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'vcpus': ('django.db.models.fields.IntegerField', [], {})
},
'apply.organization': {
'Meta': {'object_name': 'Organization'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'apply.sshpublickey': {
'Meta': {'object_name': 'SshPublicKey'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fingerprint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'key_type': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'ganeti.cluster': {
'Meta': {'object_name': 'Cluster'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'fast_create': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5080'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_ind
|
hydratk/hydratk
|
src/hydratk/lib/install/command.py
|
Python
|
bsd-3-clause
| 6,457 | 0.004337 |
# -*- coding: utf-8 -*
"""HydraTK installation commands
.. module:: lib.install.command
:platform: Unix
:synopsis: HydraTK installation commands
.. moduleauthor:: Petr Rašek <bowman@hydratk.org>
"""
from subprocess import call, Popen, PIPE
from os import path, environ
from sys import exit
from hydratk.lib.system.utils import Utils
from hydratk.lib.console.shellexec import shell_exec
def is_install_cmd(argv):
"""Method checks if installation is requested
Args:
argv (list): command arguments
Returns:
bool: result
"""
res = False
if ('install' in argv or 'bdist_egg' in argv or 'bdist_wheel' in argv):
res = True
return res
def get_pck_manager():
"""Method returns system package managera
Args:
none
Returns:
list: list of string
"""
pck_managers = ['apt-get', 'yum', 'dnf', 'zypper', 'emerge', 'pacman', 'pkg']
pckm = []
for pck in pck_managers:
if (is_installed(pck)):
pckm.append(pck)
return pckm
def is_installed(app):
"""Method checks if system application is installed
Args:
app (str): application
Returns:
bool: result
"""
cmd = ['which', app]
proc = Popen(cmd, stdout=PIPE)
out = proc.communicate()
result = True if (len(out[0]) > 0) else False
return result
def install_pck(pckm, pck):
"""Method installs system package from repository
Args:
pckm (str): package manager
pck (str): package
Returns:
none
"""
print('Installing package {0}'.format(pck))
if (pckm == 'apt-get'):
cmd = 'apt-get -y install {0}'.format(pck)
elif (pckm == 'yum'):
cmd = 'yum -y install {0}'.format(pck)
elif (pckm == 'dnf'):
cmd = 'dnf -y install {0}'.format(pck)
elif (pckm == 'zypper'):
cmd = 'zypper install -y {0}'.format(pck)
elif (pckm == 'emerge'):
cmd = 'emerge {0}'.format(pck)
elif (pckm == 'pacman'):
cmd = 'pacman -S --noconfirm {0}'.format(pck)
elif (pckm == 'pkg'):
cmd = 'pkg install -y {0}'.format(pck)
result, _, err = shell_exec(cmd, True)
if (result != 0):
print('Failed to install package {0}, hydratk installation failed.'.format(pck))
print(err)
exit(-1)
def create_dir(dst):
"""Method creates directory
Args:
dst (str): destination path
Returns:
none
"""
if (not path.exists(dst)):
print('Creating directory {0}'.format(dst))
cmd = 'mkdir -p {0}'.format(dst)
result, _, err = shell_exec(cmd, True)
if (result != 0):
print('Failed to create directory {0}'.format(dst))
print(err)
def copy_file(src, dst):
"""Method copies file
Args:
src (str): source path
|
dst (str): destination path
Returns:
none
"""
create_dir(dst)
print ('Copying file {0} to {1}'.format(src, dst))
cmd = 'cp {0} {1}'.format(src, dst)
result, _, err = shell_exec(cmd, True)
if (result != 0):
print('Failed to copy {0} to {1}'.format(src, dst))
print(err)
def move_file(src, dst):
"""Method moves f
|
ile
Args:
src (str): source path
dst (str): destination path
Returns:
none
"""
print('Moving file {0} to {1}'.format(src, dst))
cmd = 'mv {0} {1}'.format(src, dst)
result, _, err = shell_exec(cmd, True)
if (result != 0):
print('Failed to move {0} to {1}'.format(src, dst))
print(err)
def remove(src, recursive=True):
"""Method removes file or directory
Args:
src (str): source path
recursive (bool): recursive deletion
Returns:
none
"""
print('Removing {0}'.format(src))
cmd = ('rm -fR {0}' if (recursive) else 'rm -f {0}').format(src)
result, _, err = shell_exec(cmd, True)
if (result != 0):
print('Failed to remove {0}'.format(src))
print(err)
def set_rights(path, rights, recursive=True):
"""Method sets access rights
Args:
path (str): directory/file path
rights (str): access rights
recursive (bool): set recursive rights
Returns:
none
"""
print('Setting rights {0} for {1}'.format(rights, path))
if (recursive):
cmd = 'chmod -R {0} {1}'.format(rights, path)
else:
cmd = 'chmod {0} {1}'.format(rights, path)
result, _, err = shell_exec(cmd, True)
if (result != 0):
print('Failed to set rights for {0}'.format(path))
print(err)
def install_pip(module):
"""Method installs python module via pip
Args:
module (str): python module
Returns:
none
"""
modtok = module.split('>=') if ('>=' in module) else module.split('==')
module_name = modtok[0]
module_version = modtok[1] if (len(modtok) > 1) else None
pip_path = 'pip' if ('pip' not in environ) else '$pip'
if (module_version != None and Utils.module_exists(module_name)):
if Utils.module_version_ok(module_version, Utils.module_version(module_name)):
print('Module {0} already installed with version {1}'.format(module_name,Utils.module_version(module_name)))
else:
print ('Upgrading module {0} to version {1}'.format(module_name,module_version))
cmd = '{0} install --upgrade "{1}"'.format(pip_path, module)
result, _, err = shell_exec(cmd, True)
if result != 0:
print('Failed to install {0}, hydratk installation failed.'.format(module))
print(err)
exit(-1)
else:
print ('Installing module {0}'.format(module))
cmd = '{0} install "{1}"'.format(pip_path, module)
print(cmd)
result, _, err = shell_exec(cmd, True)
if result != 0:
print('Failed to install {0}, hydratk installation failed.'.format(module))
print(err)
exit(-1)
def uninstall_pip(module):
"""Method uninstalls python module via pip
Args:
module (str): python module
Returns:
none
"""
print ('Uninstalling module {0}'.format(module))
cmd = 'pip uninstall -y {0}'.format(module)
result, _, err = shell_exec(cmd, True)
if (result != 0):
print('Failed to uninstall {0}'.format(module))
print(err)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.