commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
e6d327a5249e14765999357a391d97c4fd2cd8b8 | Test for sitemap.xml | praekelt/jmbo-skeleton,praekelt/jmbo-skeleton,praekelt/jmbo-skeleton | skeleton/tests.py | skeleton/tests.py | from django.core import management
from django.test import TestCase as BaseTestCase
from django.test.client import Client as BaseClient, FakePayload, \
RequestFactory
from django.core.urlresolvers import reverse
from post.models import Post
from foundry.models import Member
class TestCase(BaseTestCase):
@classmethod
def setUpClass(cls):
cls.request = RequestFactory()
cls.client = BaseClient()
# Post-syncdb steps
management.call_command('load_photosizes', interactive=False)
management.call_command('loaddata', 'skeleton/fixtures/sites.json', interactive=False)
# Editor
cls.editor, dc = Member.objects.get_or_create(
username='editor',
email='editor@test.com'
)
cls.editor.set_password("password")
cls.editor.save()
# Post
post, dc = Post.objects.get_or_create(
title='Post 1', content='<b>aaa</b>',
owner=cls.editor, state='published',
)
post.sites = [1]
post.save()
def test_common_urls(self):
"""High-level test to confirm common set of URLs render"""
urls = (
(reverse('join'), 200),
(reverse('login'), 200),
(reverse('logout'), 302),
(reverse('password_reset'), 200),
(reverse('terms-and-conditions'), 200),
('/post/post-1/', 200),
('/sitemap.xml', 200),
)
for url, code in urls:
print "Checking path %s" % url
response = self.client.get(url)
self.assertEqual(response.status_code, code)
| from django.core import management
from django.test import TestCase as BaseTestCase
from django.test.client import Client as BaseClient, FakePayload, \
RequestFactory
from django.core.urlresolvers import reverse
from post.models import Post
from foundry.models import Member
class TestCase(BaseTestCase):
@classmethod
def setUpClass(cls):
cls.request = RequestFactory()
cls.client = BaseClient()
# Post-syncdb steps
management.call_command('load_photosizes', interactive=False)
management.call_command('loaddata', 'skeleton/fixtures/sites.json', interactive=False)
# Editor
cls.editor, dc = Member.objects.get_or_create(
username='editor',
email='editor@test.com'
)
cls.editor.set_password("password")
cls.editor.save()
# Post
post, dc = Post.objects.get_or_create(
title='Post 1', content='<b>aaa</b>',
owner=cls.editor, state='published',
)
post.sites = [1]
post.save()
def test_common_urls(self):
"""High-level test to confirm common set of URLs render"""
urls = (
(reverse('join'), 200),
(reverse('login'), 200),
(reverse('logout'), 302),
(reverse('password_reset'), 200),
(reverse('terms-and-conditions'), 200),
('/post/post-1/', 200),
)
for url, code in urls:
print "Checking path %s" % url
response = self.client.get(url)
self.assertEqual(response.status_code, code)
| bsd-3-clause | Python |
2eb3a682706a5ff0255474230ec32fd0ac96c727 | update mac build script | SasView/sasview,lewisodriscoll/sasview,lewisodriscoll/sasview,SasView/sasview,lewisodriscoll/sasview,SasView/sasview,lewisodriscoll/sasview,SasView/sasview,lewisodriscoll/sasview,SasView/sasview,SasView/sasview | sansview/setup_mac.py | sansview/setup_mac.py | """
This is a setup.py script partly generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
import periodictable.xsf
import DataLoader.readers
from distutils.sysconfig import get_python_lib
import os
DATA_FILES = []
RESOURCES_FILES = []
#Periodictable data file
DATA_FILES = periodictable.data_files()
#invariant and calculator help doc
import sans.perspectives.calculator as calculator
DATA_FILES += calculator.data_files()
import sans.perspectives.invariant as invariant
DATA_FILES += invariant.data_files()
#CANSAxml reader data files
RESOURCES_FILES.append(os.path.join(DataLoader.readers.get_data_path(),'defaults.xml'))
# Locate libxml2 library
lib_locs = ['/usr/local/lib', '/usr/lib']
libxml_path = None
for item in lib_locs:
libxml_path_test = '%s/libxml2.dylib' % item
if os.path.isfile(libxml_path_test):
libxml_path = libxml_path_test
if libxml_path == None:
raise RuntimeError, "Could not find libxml2 on the system"
APP = ['sansview.py']
DATA_FILES += ['images','test','plugins','media']
OPTIONS = {'argv_emulation': True,
'packages': ['lxml','periodictable'],
'iconfile': 'images/ball.icns',
'frameworks':[libxml_path],
'resources': RESOURCES_FILES
}
setup(
app=APP,
data_files=DATA_FILES,
include_package_data= True,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| """
This is a setup.py script partly generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
import periodictable.xsf
import DataLoader.readers
from distutils.sysconfig import get_python_lib
import os
DATA_FILES = []
RESOURCES_FILES = []
#Periodictable data file
DATA_FILES = periodictable.data_files()
#invariant and calculator help doc
import sans.perspectives.calculator as calculator
DATA_FILES += calculator.data_files()
import sans.perspectives.invariant as invariant
DATA_FILES += invariant.data_files()
#CANSAxml reader data files
RESOURCES_FILES.append(os.path.join(DataLoader.readers.get_data_path(),'defaults.xml'))
# Locate libxml2 library
lib_locs = ['/usr/local/lib', '/usr/lib']
libxml_path = None
for item in lib_locs:
libxml_path = '%s/libxml2.dylib' % item
if os.path.isfile(libxml_path):
break
if libxml_path == None:
raise RuntimeError, "Could not find libxml2 on the system"
APP = ['sansview.py']
DATA_FILES += ['images','test','plugins','media']
OPTIONS = {'argv_emulation': True,
'packages': ['lxml','periodictable'],
'iconfile': 'images/ball.icns',
'frameworks':[libxml_path],
'resources': RESOURCES_FILES
}
setup(
app=APP,
data_files=DATA_FILES,
include_package_data= True,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| bsd-3-clause | Python |
b6c8e38b96293bacd7739411200cd85f47f1efca | handle division by zero | Emsu/prophet,YuepengGuo/prophet | prophet/analyze.py | prophet/analyze.py | from prophet.utils.formatters import dict_to_table
import math
import numpy as np
class Analyzer(object):
def __repr__(self):
return self.name
class Volatility(Analyzer):
name = 'volatility'
def run(self, backtest, **kwargs):
return backtest.get_daily_returns().std()
class Sharpe(Analyzer):
name = 'sharpe'
def run(self, data, config, **kwargs):
avg_daily_returns = data['average_return']
volatility = data['volatility']
risk_free_rate = config.get('RISK_FREE_RATE', 0)
trading_days = config.get('YEARLY_TRADING_DAYS', 252)
if volatility == 0:
return 0
return ((avg_daily_returns - risk_free_rate) / volatility
* math.sqrt(trading_days))
class Sortino(Analyzer):
name = 'sortino'
def run(self, backtest, data, config, **kwargs):
avg_daily_returns = data['average_return']
negative_returns = backtest.get_daily_returns()[backtest.get_daily_returns() < 0]
volatility_negative_returns = negative_returns.std()
risk_free_rate = config.get('RISK_FREE_RATE', 0)
trading_days = config.get('YEARLY_TRADING_DAYS', 252)
if volatility_negative_returns == 0:
return 0
return ((avg_daily_returns - risk_free_rate) / volatility_negative_returns
* math.sqrt(trading_days))
class AverageReturn(Analyzer):
name = 'average_return'
def run(self, backtest, **kwargs):
return backtest.get_daily_returns().mean()
class CumulativeReturn(Analyzer):
name = "cumulative_return"
def run(self, backtest, **kwargs):
return backtest.normalize0()[-1]
class MaximumDrawdown(Analyzer):
name = "maximum_drawdown"
def run(self, backtest, **kwargs):
dd_end = np.argmax(np.maximum.accumulate(backtest) - backtest)
dd_start = np.argmax(backtest[:dd_end])
if backtest[dd_start] == 0:
return 0
return 1-backtest[dd_end]/backtest[dd_start]
class Analysis(dict):
def __repr__(self):
""" Represents Analysis object as a text table. """
return dict_to_table(self)
default_analyzers = [Volatility(), AverageReturn(),
Sharpe(), CumulativeReturn(), MaximumDrawdown(), Sortino()]
| from prophet.utils.formatters import dict_to_table
import math
import numpy as np
class Analyzer(object):
def __repr__(self):
return self.name
class Volatility(Analyzer):
name = 'volatility'
def run(self, backtest, **kwargs):
return backtest.get_daily_returns().std()
class Sharpe(Analyzer):
name = 'sharpe'
def run(self, data, config, **kwargs):
avg_daily_returns = data['average_return']
volatility = data['volatility']
risk_free_rate = config.get('RISK_FREE_RATE', 0)
trading_days = config.get('YEARLY_TRADING_DAYS', 252)
return ((avg_daily_returns - risk_free_rate) / volatility
* math.sqrt(trading_days))
class Sortino(Analyzer):
name = 'sortino'
def run(self, backtest, data, config, **kwargs):
avg_daily_returns = data['average_return']
negative_returns = backtest.get_daily_returns()[backtest.get_daily_returns() < 0]
volatility_negative_returns = negative_returns.std()
risk_free_rate = config.get('RISK_FREE_RATE', 0)
trading_days = config.get('YEARLY_TRADING_DAYS', 252)
return ((avg_daily_returns - risk_free_rate) / volatility_negative_returns
* math.sqrt(trading_days))
class AverageReturn(Analyzer):
name = 'average_return'
def run(self, backtest, **kwargs):
return backtest.get_daily_returns().mean()
class CumulativeReturn(Analyzer):
name = "cumulative_return"
def run(self, backtest, **kwargs):
return backtest.normalize0()[-1]
class MaximumDrawdown(Analyzer):
name = "maximum_drawdown"
def run(self, backtest, **kwargs):
dd_end = np.argmax(np.maximum.accumulate(backtest) - backtest)
dd_start = np.argmax(backtest[:dd_end])
return 1-backtest[dd_end]/backtest[dd_start]
class Analysis(dict):
def __repr__(self):
""" Represents Analysis object as a text table. """
return dict_to_table(self)
default_analyzers = [Volatility(), AverageReturn(),
Sharpe(), CumulativeReturn(), MaximumDrawdown(), Sortino()]
| bsd-3-clause | Python |
6ddc32c10124d10db53f9017044be65a00a35a33 | Add newline to end of __init__.py. | jensengroup/propka,jensengroup/propka-3.1 | propka/__init__.py | propka/__init__.py | """PROPKA 3.1
See https://github.com/jensengroup/propka-3.1 for more information.
Please cite these PROPKA references in publications:
* Sondergaard, Chresten R., Mats HM Olsson, Michal Rostkowski, and Jan H. Jensen.
"Improved Treatment of Ligands and Coupling Effects in Empirical Calculation and
Rationalization of pKa Values." Journal of Chemical Theory and Computation 7,
no. 7 (2011): 2284-2295.
* Olsson, Mats HM, Chresten R. Sondergaard, Michal Rostkowski, and Jan H. Jensen.
"PROPKA3: consistent treatment of internal and surface residues in empirical pKa
predictions." Journal of Chemical Theory and Computation 7, no. 2 (2011): 525-537.
"""
__all__ = ["atom", "bonds", "calculations", "conformation_container",
"coupled_groups", "determinant", "determinants", "group",
"hybrid36", "iterative", "lib", "ligand_pka_values", "ligand",
"molecular_container", "output", "parameters", "pdb", "protonate",
"run", "vector_algebra", "version"]
| """PROPKA 3.1
See https://github.com/jensengroup/propka-3.1 for more information.
Please cite these PROPKA references in publications:
* Sondergaard, Chresten R., Mats HM Olsson, Michal Rostkowski, and Jan H. Jensen.
"Improved Treatment of Ligands and Coupling Effects in Empirical Calculation and
Rationalization of pKa Values." Journal of Chemical Theory and Computation 7,
no. 7 (2011): 2284-2295.
* Olsson, Mats HM, Chresten R. Sondergaard, Michal Rostkowski, and Jan H. Jensen.
"PROPKA3: consistent treatment of internal and surface residues in empirical pKa
predictions." Journal of Chemical Theory and Computation 7, no. 2 (2011): 525-537.
"""
__all__ = ["atom", "bonds", "calculations", "conformation_container",
"coupled_groups", "determinant", "determinants", "group",
"hybrid36", "iterative", "lib", "ligand_pka_values", "ligand",
"molecular_container", "output", "parameters", "pdb", "protonate",
"run", "vector_algebra", "version"] | lgpl-2.1 | Python |
0560f65eb6740281c77c9b016bd9a44f486be6ae | Use dense matrices instead of sparse where that makes sense | ebmdatalab/openprescribing,annapowellsmith/openpresc,ebmdatalab/openprescribing,ebmdatalab/openprescribing,annapowellsmith/openpresc,annapowellsmith/openpresc,ebmdatalab/openprescribing,annapowellsmith/openpresc | openprescribing/matrixstore/matrix_ops.py | openprescribing/matrixstore/matrix_ops.py | from __future__ import division
import numpy
import scipy.sparse
# Above a certain level of density it becomes more efficient to use a normal
# dense matrix instead of a sparse one. This threshold was determined through a
# not particularly scientific process of trial and error. Due to the
# compression we apply there's very little difference in storage requirements
# between sparse and dense matrices, but the difference comes in the time take
# to perform operations (e.g summing) using these matrices and that's harder to
# measure. At some point we can profile and optimise the performance of the
# MatrixStore, but for now it's fast enough.
DENSITY_THRESHOLD = 0.5
def sparse_matrix(shape, integer=False):
"""
Create a new sparse matrix (either integer or floating point) in a form
suitable for populating with data
"""
dtype = numpy.int_ if integer else numpy.float_
return scipy.sparse.lil_matrix(shape, dtype=dtype)
def finalise_matrix(matrix):
"""
Return a copy of a sparse matrix in a form suitable for storage
"""
if get_density(matrix) < DENSITY_THRESHOLD:
matrix = matrix.tocsc()
matrix.sort_indices()
else:
matrix = matrix.toarray()
if is_integer(matrix):
matrix = convert_to_smallest_int_type(matrix)
return matrix
def is_integer(matrix):
"""
Return whether or not the matrix has integer type
"""
return numpy.issubdtype(matrix.dtype, numpy.integer)
def get_density(matrix):
"""
Return the density of a sparse matrix
"""
return matrix.getnnz() / (matrix.shape[0] * matrix.shape[1])
def convert_to_smallest_int_type(matrix):
"""
Convert a matrix to use the smallest integer type capable of representing
all the values currently stored in it
"""
target_type = smallest_int_type_for_range(matrix.min(), matrix.max())
if target_type != matrix.dtype:
matrix = matrix.astype(target_type, copy=False)
return matrix
def smallest_int_type_for_range(minimum, maximum):
"""
Return smallest numpy integer type capable of representing all values in
the supplied range
"""
signed = minimum < 0
abs_max = max(maximum, abs(minimum))
if signed:
if abs_max < 1 << 7:
return numpy.int8
elif abs_max < 1 << 15:
return numpy.int16
elif abs_max < 1 << 31:
return numpy.int32
else:
if abs_max < 1 << 8:
return numpy.uint8
elif abs_max < 1 << 16:
return numpy.uint16
elif abs_max < 1 << 32:
return numpy.uint32
# Return default integer type (other than in the exceptional case that the
# value is too big to store in a signed 64-bit int)
if not signed and abs_max > 1 << 63:
return numpy.uint64
else:
return numpy.int64
| import numpy
import scipy.sparse
def sparse_matrix(shape, integer=False):
"""
Create a new sparse matrix (either integer or floating point) in a form
suitable for populating with data
"""
dtype = numpy.int_ if integer else numpy.float_
return scipy.sparse.lil_matrix(shape, dtype=dtype)
def finalise_matrix(matrix):
"""
Return a copy of a sparse matrix in a form suitable for storage
"""
matrix = matrix.tocsc()
matrix.sort_indices()
if is_integer(matrix):
matrix = convert_to_smallest_int_type(matrix)
return matrix
def is_integer(matrix):
"""
Return whether or not the matrix has integer type
"""
return numpy.issubdtype(matrix.dtype, numpy.integer)
def convert_to_smallest_int_type(matrix):
"""
Convert a matrix to use the smallest integer type capable of representing
all the values currently stored in it
"""
target_type = smallest_int_type_for_range(matrix.min(), matrix.max())
if target_type != matrix.dtype:
matrix = matrix.astype(target_type, copy=False)
return matrix
def smallest_int_type_for_range(minimum, maximum):
"""
Return smallest numpy integer type capable of representing all values in
the supplied range
"""
signed = minimum < 0
abs_max = max(maximum, abs(minimum))
if signed:
if abs_max < 1 << 7:
return numpy.int8
elif abs_max < 1 << 15:
return numpy.int16
elif abs_max < 1 << 31:
return numpy.int32
else:
if abs_max < 1 << 8:
return numpy.uint8
elif abs_max < 1 << 16:
return numpy.uint16
elif abs_max < 1 << 32:
return numpy.uint32
# Return default integer type (other than in the exceptional case that the
# value is too big to store in a signed 64-bit int)
if not signed and abs_max > 1 << 63:
return numpy.uint64
else:
return numpy.int64
| mit | Python |
1185595ea55f4b4be2f227a37b33d8142bcf92c1 | bump version to 6.0.0.dev0 | StellarCN/py-stellar-base | stellar_sdk/__version__.py | stellar_sdk/__version__.py | """
_____ _______ ______ _ _ _____ _____ _____ _ __
/ ____|__ __| ____| | | | /\ | __ \ / ____| __ \| |/ /
| (___ | | | |__ | | | | / \ | |__) |____| (___ | | | | ' /
\___ \ | | | __| | | | | / /\ \ | _ /______\___ \| | | | <
____) | | | | |____| |____| |____ / ____ \| | \ \ ____) | |__| | . \
|_____/ |_| |______|______|______/_/ \_\_| \_\ |_____/|_____/|_|\_\
"""
__title__ = "stellar-sdk"
__description__ = "The Python Stellar SDK library provides APIs to build transactions and connect to Horizon."
__url__ = "https://github.com/StellarCN/py-stellar-base"
__issues__ = f"{__url__}/issues"
__version__ = "6.0.0.dev0"
__author__ = "Eno, overcat"
__author_email__ = "appweb.cn@gmail.com, 4catcode@gmail.com"
__license__ = "Apache License 2.0"
| """
_____ _______ ______ _ _ _____ _____ _____ _ __
/ ____|__ __| ____| | | | /\ | __ \ / ____| __ \| |/ /
| (___ | | | |__ | | | | / \ | |__) |____| (___ | | | | ' /
\___ \ | | | __| | | | | / /\ \ | _ /______\___ \| | | | <
____) | | | | |____| |____| |____ / ____ \| | \ \ ____) | |__| | . \
|_____/ |_| |______|______|______/_/ \_\_| \_\ |_____/|_____/|_|\_\
"""
__title__ = "stellar-sdk"
__description__ = "The Python Stellar SDK library provides APIs to build transactions and connect to Horizon."
__url__ = "https://github.com/StellarCN/py-stellar-base"
__issues__ = f"{__url__}/issues"
__version__ = "5.0.0"
__author__ = "Eno, overcat"
__author_email__ = "appweb.cn@gmail.com, 4catcode@gmail.com"
__license__ = "Apache License 2.0"
| apache-2.0 | Python |
e589f4347a730abc64f43b3b427ac556643c361f | Use proper mesh | certik/cos-10-poster,certik/ccms-10-poster | graphs/hydrogen_pfem.py | graphs/hydrogen_pfem.py | R_x = {
0: [5, 7, 10, 16, 18, 22, 28, 34],
}
R_y = {
(1, 0): [0.17076672795968373, 0.043356138894337204, 0.041045532914633254, 0.0058943597765469535, 0.00018759868059159412, 1.7891829137695048e-06, 7.0804719309869313e-09, 6.5346731359383625e-09],
(2, 0): [0.044544127593562716, 0.01096517172704424, 0.0089970596630603861, 0.0029545088700716898, 0.00014843931056320037, 1.4554012751771817e-06, 9.5016940149239559e-10, 1.3958578737316429e-10],
(3, 0): [0.015033623095632616, 0.009520817576687414, 0.0069807964180385249, 0.0016484608980817175, 0.00023362526206750084, 1.1715220633229384e-05, 3.2697808772796932e-06, 4.7940380232952551e-07],
}
| R_x = {
0: [5, 7, 13, 15, 21, 25, 29],
}
R_y = {
(1, 0): [0.1843068472879863, 0.06235603500209852, 0.011111956105256449, 0.00050366115986938409, 7.1562463805907583e-06, 4.0298526238213839e-08, 3.9956294661802616e-08],
(2, 0): [0.046105675637867799, 0.013246017078074462, 0.005542485710768652, 0.00067997064168938415, 4.6063134426982399e-05, 1.0777808132356181e-06, 4.2930948795927293e-09],
(3, 0): [0.01713673973745012, 0.014641367192083615, 0.0026518937870781203, 0.00028054793547133139, 0.00010971626504485688, 2.7061872750686056e-06, 1.0121065385088057e-07],
}
| mit | Python |
c98ef2e8b64b7daa94c0e883f71813a3e3226a78 | set current day to none if there is none | whatsthehubbub/rippleeffect,whatsthehubbub/rippleeffect,whatsthehubbub/rippleeffect,whatsthehubbub/rippleeffect,whatsthehubbub/rippleeffect | riskgame/context_processors.py | riskgame/context_processors.py | # -*- coding: utf-8
from riskgame.models import Player, TeamPlayer, EpisodeDay, Game
def player(request):
returnDict = {}
if request.user.is_authenticated():
try:
currentPlayer = Player.objects.get(user=request.user)
except Player.DoesNotExist:
currentPlayer = Player.objects.create(user=request.user)
returnDict['current_player'] = currentPlayer
try:
returnDict['current_teamplayer'] = TeamPlayer.objects.get(player=currentPlayer)
except TeamPlayer.DoesNotExist:
pass
try:
returnDict['current_day'] = EpisodeDay.objects.get(current=True)
except EpisodeDay.DoesNotExist:
returnDict['current_day'] = None
try:
returnDict['current_game'] = Game.objects.get_latest_game()
except Game.DoesNotExist:
pass
# try:
# game = Game.objects.get_latest_game()
# returnDict['game'] = game
# except:
# pass
return returnDict
| # -*- coding: utf-8
from riskgame.models import Player, TeamPlayer, EpisodeDay, Game
def player(request):
returnDict = {}
if request.user.is_authenticated():
try:
currentPlayer = Player.objects.get(user=request.user)
except Player.DoesNotExist:
currentPlayer = Player.objects.create(user=request.user)
returnDict['current_player'] = currentPlayer
try:
returnDict['current_teamplayer'] = TeamPlayer.objects.get(player=currentPlayer)
except TeamPlayer.DoesNotExist:
pass
try:
returnDict['current_day'] = EpisodeDay.objects.get(current=True)
except EpisodeDay.DoesNotExist:
pass
try:
returnDict['current_game'] = Game.objects.get_latest_game()
except Game.DoesNotExist:
pass
# try:
# game = Game.objects.get_latest_game()
# returnDict['game'] = game
# except:
# pass
return returnDict
| mit | Python |
546d4ac0bbf14da2bc5610aa09b5a75627b297a6 | Add 131 | byung-u/ProjectEuler | 100_to_199/euler_131.py | 100_to_199/euler_131.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Problem 131
There are some prime values, p, for which there exists a positive integer, n, such that the expression n3 + n2p is a perfect cube.
For example, when p = 19, 83 + 82×19 = 123.
What is perhaps most surprising is that for each prime with this property the value of n is unique, and there are only four such primes below one-hundred.
How many primes below one million have this remarkable property?
'''
from util import prime_sieve, is_prime
from itertools import count
def is_perfect_cube(x):
# x = abs(x)
return int(round(x ** (1. / 3))) ** 3 == x
def p131_slow(): # Answer: 173, 68.54s Mac pro 2016
cnt = 0
primes = prime_sieve(1000000)
for p in primes:
for i in count(1):
n = i ** 3
if is_perfect_cube(n + p):
if is_perfect_cube(n ** 2):
# print('[great] ', [p, n, i], n**2, n+p)
cnt += 1
break
if i > 600:
break
print(cnt)
def p131():
# n**3 + p = (n+1)**3
# p = 3n**2 + 3n + 1
cnt = 0
for n in count(1):
p = 3 * (n ** 2) + 3 * n + 1
if p >= 1000000:
break
if is_prime(p):
cnt += 1
print(cnt)
p131()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Problem 131
There are some prime values, p, for which there exists a positive integer, n, such that the expression n3 + n2p is a perfect cube.
For example, when p = 19, 83 + 82×19 = 123.
What is perhaps most surprising is that for each prime with this property the value of n is unique, and there are only four such primes below one-hundred.
How many primes below one million have this remarkable property?
'''
from util import prime_sieve
from itertools import count
def is_perfect_cube(x):
# x = abs(x)
return int(round(x ** (1. / 3))) ** 3 == x
def p131(): # Answer: 173, 68.54s Mac pro 2016
cnt = 0
primes = prime_sieve(1000000)
for p in primes:
for i in count(1):
n = i ** 3
if is_perfect_cube(n + p):
if is_perfect_cube(n ** 2):
# print('[great] ', [p, n, i], n**2, n+p)
cnt += 1
break
if i > 600:
break
print(cnt)
p131()
| mit | Python |
10318a11dded5e69c3d9c98325613700c9b3db63 | Fix for dependent package detection. | lgarren/spack,lgarren/spack,iulian787/spack,mfherbst/spack,skosukhin/spack,LLNL/spack,LLNL/spack,EmreAtes/spack,iulian787/spack,tmerrick1/spack,TheTimmy/spack,mfherbst/spack,mfherbst/spack,tmerrick1/spack,lgarren/spack,krafczyk/spack,iulian787/spack,krafczyk/spack,skosukhin/spack,matthiasdiener/spack,lgarren/spack,EmreAtes/spack,matthiasdiener/spack,TheTimmy/spack,iulian787/spack,tmerrick1/spack,skosukhin/spack,LLNL/spack,krafczyk/spack,TheTimmy/spack,LLNL/spack,tmerrick1/spack,matthiasdiener/spack,LLNL/spack,TheTimmy/spack,lgarren/spack,krafczyk/spack,EmreAtes/spack,mfherbst/spack,skosukhin/spack,TheTimmy/spack,krafczyk/spack,skosukhin/spack,EmreAtes/spack,mfherbst/spack,tmerrick1/spack,iulian787/spack,matthiasdiener/spack,EmreAtes/spack,matthiasdiener/spack | lib/spack/spack/cmd/dependents.py | lib/spack/spack/cmd/dependents.py | ##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://scalability-llnl.github.io/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
import llnl.util.tty as tty
import spack
import spack.cmd
description = "Show dependent packages."
def setup_parser(subparser):
subparser.add_argument(
'spec', nargs=argparse.REMAINDER, help="specs to list dependencies of.")
def dependents(parser, args):
specs = spack.cmd.parse_specs(args.spec, concretize=True)
if len(specs) != 1:
tty.die("spack dependents takes only one spec.")
fmt = '$_$@$%@$+$=$#'
deps = [d.format(fmt) for d in specs[0].package.installed_dependents]
tty.msg("Dependents of %s" % specs[0].format(fmt), *deps)
| lgpl-2.1 | Python |
|
4f3c3755d9fcbfd9ce0551c19bb893e7ba73db91 | Add missing setup.py | leifdenby/numpy,mwiebe/numpy,seberg/numpy,ssanderson/numpy,chatcannon/numpy,dimasad/numpy,nguyentu1602/numpy,dwf/numpy,cowlicks/numpy,Anwesh43/numpy,MSeifert04/numpy,numpy/numpy,NextThought/pypy-numpy,NextThought/pypy-numpy,WarrenWeckesser/numpy,ContinuumIO/numpy,GaZ3ll3/numpy,MSeifert04/numpy,mattip/numpy,Dapid/numpy,KaelChen/numpy,grlee77/numpy,jonathanunderwood/numpy,tdsmith/numpy,solarjoe/numpy,Yusa95/numpy,rmcgibbo/numpy,mindw/numpy,stefanv/numpy,matthew-brett/numpy,dimasad/numpy,pdebuyl/numpy,Linkid/numpy,anntzer/numpy,embray/numpy,cjermain/numpy,astrofrog/numpy,jankoslavic/numpy,ddasilva/numpy,pelson/numpy,matthew-brett/numpy,ChanderG/numpy,endolith/numpy,has2k1/numpy,moreati/numpy,rherault-insa/numpy,rhythmsosad/numpy,ChristopherHogan/numpy,b-carter/numpy,Srisai85/numpy,mhvk/numpy,mortada/numpy,larsmans/numpy,mathdd/numpy,numpy/numpy-refactor,ekalosak/numpy,jonathanunderwood/numpy,GrimDerp/numpy,ddasilva/numpy,ewmoore/numpy,brandon-rhodes/numpy,chatcannon/numpy,embray/numpy,pbrod/numpy,bertrand-l/numpy,dwf/numpy,cjermain/numpy,grlee77/numpy,musically-ut/numpy,Linkid/numpy,njase/numpy,dwillmer/numpy,numpy/numpy-refactor,groutr/numpy,BMJHayward/numpy,groutr/numpy,pelson/numpy,nbeaver/numpy,cowlicks/numpy,kiwifb/numpy,brandon-rhodes/numpy,jakirkham/numpy,sinhrks/numpy,tynn/numpy,ahaldane/numpy,ContinuumIO/numpy,stefanv/numpy,madphysicist/numpy,dimasad/numpy,Srisai85/numpy,AustereCuriosity/numpy,bmorris3/numpy,yiakwy/numpy,ahaldane/numpy,immerrr/numpy,jschueller/numpy,MaPePeR/numpy,bringingheavendown/numpy,pdebuyl/numpy,andsor/numpy,Anwesh43/numpy,sonnyhu/numpy,BabeNovelty/numpy,ahaldane/numpy,bertrand-l/numpy,immerrr/numpy,empeeu/numpy,Eric89GXL/numpy,ViralLeadership/numpy,moreati/numpy,mwiebe/numpy,maniteja123/numpy,ajdawson/numpy,larsmans/numpy,bmorris3/numpy,mattip/numpy,ajdawson/numpy,chatcannon/numpy,dch312/numpy,rajathkumarmp/numpy,andsor/numpy,shoyer/numpy,grlee77/numpy,jorisvandenbossche/numpy,BabeNovelty/numpy,Eric89GXL/numpy,githubmlai/numpy,numpy/numpy-refactor,sigma-random/numpy,SiccarPoint/numpy,moreati/numpy,empeeu/numpy,empeeu/numpy,ogrisel/numpy,nguyentu1602/numpy,behzadnouri/numpy,stuarteberg/numpy,ChanderG/numpy,naritta/numpy,Eric89GXL/numpy,chiffa/numpy,cjermain/numpy,rmcgibbo/numpy,WarrenWeckesser/numpy,ssanderson/numpy,cjermain/numpy,kiwifb/numpy,ESSS/numpy,AustereCuriosity/numpy,Dapid/numpy,rudimeier/numpy,CMartelLML/numpy,mortada/numpy,has2k1/numpy,mingwpy/numpy,dato-code/numpy,utke1/numpy,charris/numpy,endolith/numpy,dato-code/numpy,NextThought/pypy-numpy,gfyoung/numpy,tynn/numpy,hainm/numpy,ogrisel/numpy,MichaelAquilina/numpy,argriffing/numpy,rajathkumarmp/numpy,sigma-random/numpy,MaPePeR/numpy,ajdawson/numpy,groutr/numpy,has2k1/numpy,maniteja123/numpy,sonnyhu/numpy,anntzer/numpy,SiccarPoint/numpy,gmcastil/numpy,mingwpy/numpy,yiakwy/numpy,tdsmith/numpy,shoyer/numpy,kiwifb/numpy,bringingheavendown/numpy,rhythmsosad/numpy,MaPePeR/numpy,mingwpy/numpy,simongibbons/numpy,trankmichael/numpy,stefanv/numpy,abalkin/numpy,numpy/numpy-refactor,ajdawson/numpy,sinhrks/numpy,astrofrog/numpy,jankoslavic/numpy,mathdd/numpy,mathdd/numpy,dwf/numpy,solarjoe/numpy,pelson/numpy,joferkington/numpy,WarrenWeckesser/numpy,larsmans/numpy,brandon-rhodes/numpy,sonnyhu/numpy,bertrand-l/numpy,mhvk/numpy,jschueller/numpy,jorisvandenbossche/numpy,rhythmsosad/numpy,numpy/numpy,tdsmith/numpy,mattip/numpy,KaelChen/numpy,astrofrog/numpy,dato-code/numpy,pbrod/numpy,jonathanunderwood/numpy,embray/numpy,mindw/numpy,b-carter/numpy,ChristopherHogan/numpy,charris/numpy,NextThought/pypy-numpy,hainm/numpy,leifdenby/numpy,shoyer/numpy,matthew-brett/numpy,rherault-insa/numpy,jankoslavic/numpy,Yusa95/numpy,Srisai85/numpy,pizzathief/numpy,skwbc/numpy,rmcgibbo/numpy,githubmlai/numpy,skymanaditya1/numpy,dwf/numpy,CMartelLML/numpy,ChanderG/numpy,ddasilva/numpy,stuarteberg/numpy,ESSS/numpy,gmcastil/numpy,kirillzhuravlev/numpy,ChristopherHogan/numpy,rajathkumarmp/numpy,tdsmith/numpy,jorisvandenbossche/numpy,hainm/numpy,anntzer/numpy,utke1/numpy,pdebuyl/numpy,cowlicks/numpy,nbeaver/numpy,dch312/numpy,nguyentu1602/numpy,matthew-brett/numpy,WarrenWeckesser/numpy,has2k1/numpy,ahaldane/numpy,ViralLeadership/numpy,sinhrks/numpy,argriffing/numpy,Linkid/numpy,felipebetancur/numpy,kirillzhuravlev/numpy,joferkington/numpy,larsmans/numpy,jschueller/numpy,maniteja123/numpy,Anwesh43/numpy,SunghanKim/numpy,matthew-brett/numpy,jakirkham/numpy,cowlicks/numpy,mhvk/numpy,Srisai85/numpy,dato-code/numpy,MichaelAquilina/numpy,ewmoore/numpy,felipebetancur/numpy,sinhrks/numpy,abalkin/numpy,BabeNovelty/numpy,b-carter/numpy,BabeNovelty/numpy,endolith/numpy,chiffa/numpy,joferkington/numpy,sonnyhu/numpy,MaPePeR/numpy,pyparallel/numpy,ahaldane/numpy,stefanv/numpy,Dapid/numpy,musically-ut/numpy,rudimeier/numpy,jschueller/numpy,kirillzhuravlev/numpy,dch312/numpy,gfyoung/numpy,grlee77/numpy,rudimeier/numpy,WarrenWeckesser/numpy,dimasad/numpy,GrimDerp/numpy,ekalosak/numpy,numpy/numpy,bringingheavendown/numpy,ChanderG/numpy,seberg/numpy,tacaswell/numpy,naritta/numpy,rhythmsosad/numpy,bmorris3/numpy,pelson/numpy,githubmlai/numpy,CMartelLML/numpy,mhvk/numpy,jankoslavic/numpy,drasmuss/numpy,leifdenby/numpy,SunghanKim/numpy,mindw/numpy,SiccarPoint/numpy,GaZ3ll3/numpy,ekalosak/numpy,AustereCuriosity/numpy,pbrod/numpy,WillieMaddox/numpy,MichaelAquilina/numpy,behzadnouri/numpy,musically-ut/numpy,dwillmer/numpy,WillieMaddox/numpy,njase/numpy,seberg/numpy,chiffa/numpy,Yusa95/numpy,ViralLeadership/numpy,simongibbons/numpy,tynn/numpy,bmorris3/numpy,ogrisel/numpy,WillieMaddox/numpy,ewmoore/numpy,felipebetancur/numpy,KaelChen/numpy,rherault-insa/numpy,BMJHayward/numpy,madphysicist/numpy,mindw/numpy,SunghanKim/numpy,skymanaditya1/numpy,tacaswell/numpy,pyparallel/numpy,mingwpy/numpy,stuarteberg/numpy,ekalosak/numpy,naritta/numpy,madphysicist/numpy,dwillmer/numpy,GrimDerp/numpy,pizzathief/numpy,shoyer/numpy,ssanderson/numpy,pelson/numpy,MichaelAquilina/numpy,mathdd/numpy,jorisvandenbossche/numpy,rgommers/numpy,ogrisel/numpy,tacaswell/numpy,simongibbons/numpy,naritta/numpy,anntzer/numpy,MSeifert04/numpy,jorisvandenbossche/numpy,MSeifert04/numpy,skwbc/numpy,musically-ut/numpy,pizzathief/numpy,Linkid/numpy,pbrod/numpy,jakirkham/numpy,stefanv/numpy,solarjoe/numpy,ESSS/numpy,madphysicist/numpy,pyparallel/numpy,rgommers/numpy,Anwesh43/numpy,GaZ3ll3/numpy,BMJHayward/numpy,GaZ3ll3/numpy,SiccarPoint/numpy,githubmlai/numpy,jakirkham/numpy,immerrr/numpy,numpy/numpy,dwf/numpy,ewmoore/numpy,rmcgibbo/numpy,utke1/numpy,pdebuyl/numpy,mortada/numpy,simongibbons/numpy,astrofrog/numpy,rgommers/numpy,argriffing/numpy,GrimDerp/numpy,mattip/numpy,SunghanKim/numpy,ewmoore/numpy,CMartelLML/numpy,dwillmer/numpy,embray/numpy,mwiebe/numpy,immerrr/numpy,brandon-rhodes/numpy,rudimeier/numpy,dch312/numpy,skymanaditya1/numpy,trankmichael/numpy,numpy/numpy-refactor,nguyentu1602/numpy,felipebetancur/numpy,ChristopherHogan/numpy,trankmichael/numpy,pizzathief/numpy,endolith/numpy,rgommers/numpy,grlee77/numpy,embray/numpy,ogrisel/numpy,mhvk/numpy,andsor/numpy,yiakwy/numpy,drasmuss/numpy,seberg/numpy,hainm/numpy,njase/numpy,stuarteberg/numpy,charris/numpy,KaelChen/numpy,behzadnouri/numpy,joferkington/numpy,shoyer/numpy,gmcastil/numpy,abalkin/numpy,ContinuumIO/numpy,empeeu/numpy,yiakwy/numpy,andsor/numpy,gfyoung/numpy,madphysicist/numpy,astrofrog/numpy,MSeifert04/numpy,sigma-random/numpy,skwbc/numpy,mortada/numpy,rajathkumarmp/numpy,nbeaver/numpy,simongibbons/numpy,jakirkham/numpy,pizzathief/numpy,Eric89GXL/numpy,Yusa95/numpy,sigma-random/numpy,skymanaditya1/numpy,pbrod/numpy,kirillzhuravlev/numpy,trankmichael/numpy,charris/numpy,drasmuss/numpy,BMJHayward/numpy | numpy/numarray/setup.py | numpy/numarray/setup.py |
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('numarray',parent_package,top_path)
config.add_data_files('include/numarray/*.h')
# Configure fftpack_lite
config.add_extension('_capi',
sources=['_capi.c']
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause | Python |
|
735ce7f11b70bf8e916bad5610093b5886c57db6 | Add quickseg.py | simmimourya1/cyvlfeat,simmimourya1/cyvlfeat,simmimourya1/cyvlfeat | cyvlfeat/quickshift/quickseg.py | cyvlfeat/quickshift/quickseg.py | import numpy as np
from cyvlfeat.quickshift import quickshift
from cyvlfeat.quickshift import flatmap
from cyvlfeat.quickshift import imseg
def quickseg(image, ratio, kernel_size, max_dist):
"""
Produce a quickshift segmentation of a greyscale image.
Parameters
----------
image : [H, W] or [H, W, 1] `float64` `ndarray`
Input image, Greyscale. A single channel, greyscale,
`float64` numpy array (ndarray).
ratio : `double`
Trade-off between spatial consistency and color consistency.
Small ratio gives more importance to the spatial component.
Note that distance calculations happen in unnormalized image
coordinates, so RATIO should be adjusted to compensate for
larger images.
kernel_size : `double`
The standard deviation of the parzen window density estimator.
max_dist : `double`
The maximum distance between nodes in the quickshift tree. Used
to cut links in the tree to form the segmentation.
Returns
-------
i_seg :
A color image where each pixel is labeled by the mean color in its
region.
labels : [H, W] `float64` `ndarray`.
Array of the same size of image.
A labeled image where the number corresponds to the cluster identity.
maps : [H, W] `float64` `ndarray`.
Array of the same size of image.
`maps` as returned by `quickshift`: For each pixel, the pointer to the
nearest pixel which increases the estimate of the density.
gaps : [H, W] `float64` `ndarray`.
Array of the same size of image.
`gaps` as returned by `quickshift`: For each pixel, the distance to
the nearest pixel which increases the estimate of the density.
estimate : [H, W] `float64` `ndarray`.
Array of the same size of image.
`estimate` as returned by `quickshift`: The estimate of the density.
"""
# validate image
if image.dtype != np.float64:
raise ValueError('Image array must be of Double precision')
# image = np.asarray(image, dtype=np.float64)
# Add less than one pixel noise to break ties caused by
# constant regions in an arbitrary fashions
noise = np.random.random(image.shape) / 2250
image += noise
# For now we're dealing with Greyscale images only.
if image.shape[2] == 1:
imagex = ratio * image
# Perform quickshift to obtain the segmentation tree, which is already cut by
# maxdist. If a pixel has no nearest neighbor which increases the density, its
# parent in the tree is itself, and gaps is inf.
(maps, gaps, estimate) = quickshift(image, kernel_size, max_dist)
# Follow the parents of the tree until we have reached the root nodes
# mapped: a labeled segmentation where the labels are the indices of the modes
# in the original image.
# labels: mapped after having been renumbered 1: nclusters and reshaped into a
# vector
(mapped, labels) = flatmap(maps)
labels = np.resize(labels, maps.shape)
# imseg builds an average description of the region by color
i_seg = imseg(image, labels)
return i_seg, labels, maps, gaps, estimate
| bsd-2-clause | Python |
|
766569894a5c849c449971aca5c7d999e7019aef | check required packages | SasView/sasview,lewisodriscoll/sasview,lewisodriscoll/sasview,SasView/sasview,SasView/sasview,SasView/sasview,lewisodriscoll/sasview,SasView/sasview,lewisodriscoll/sasview,SasView/sasview,lewisodriscoll/sasview | check_packages.py | check_packages.py | """
Checking and reinstalling the external packages
"""
import os
import sys
if sys.platform =='win32':
IS_WIN = True
else:
IS_WIN = False
try:
import setuptools
print "==> setuptools-%s installed."% setuptools.__version__
#if setuptools.__version__ != '0.6c11':
# print "!!! Recommend to install 0.6c11."
except:
print "!!! setuptools-0.6c11 not installed."
try:
import pyparsing
if pyparsing.__version__ == '1.5.5':
print "==> pyparsing-%s installed."% pyparsing.__version__
else:
print "!!! 1.5.5 recommended."
except:
print "==> pyparsing-1.5.5 not installed."
try:
import html5lib
if html5lib.__version__.count('0.95') > 0:
print "==> html5lib-%s installed."% html5lib.__version__
else:
print "!!! 0.95 recommended."
except:
print "!!! html5lib-0.95 not installed."
try:
import pyPdf
print "==> pyPdf installed."
except:
print "!!! pyPdf-1.13 not installed (optional)."
try:
import reportlab
print "==> reportlab installed."
except:
print "!!! reportlab-2.5 not installed."
try:
import lxml
print "==> lxml installed."
except:
print "!!! lxml-2.3 not installed."
try:
import PIL
print "==> PIL installed."
except:
print "!!! PIL-1.1.7 not installed."
try:
import pylint
print "==> pylint installed."
except:
print "!!! pylint not installed (optional)."
#os.system("START /B C:\python26\Scripts\easy_install pylint==0.25.0")
try:
import periodictable
print "==> periodictable-%s installed."% periodictable.__version__
if periodictable.__version__ != '1.3.0':
print "!!! Recommend to install 1.3.0."
except:
print "!!! periodictable-1.3.0 is not installed."
try:
import numpy
print "==> numpy-%s installed."% numpy.__version__
if numpy.__version__ != '1.6.1':
print "==> Recommend to install 1.6.1 (1.5.1 for MAC)."
except:
print "!!! numpy-1.6.1 not installed (1.5.1 for MAC)."
try:
import scipy
print "==> scipy-%s installed."% scipy.__version__
if scipy.__version__ != '0.10.1':
print "!!! Recommend to install 0.10.1 (1.10.0 for MAC)."
except:
print "!!! scipy-0.10.1 not installed (1.10.0 for MAC)."
try:
import wx
print "==> wxpython-%s installed."% wx.__version__
if wx.__version__ != '2.8.12.1':
print "!!! Recommend to install unicode-2.8.12.1."
except:
print "!!! wxpython-unicode-2.8.12.1 not installed."
try:
import matplotlib
print "==> matplotlib-%s installed."% matplotlib.__version__
if matplotlib.__version__ != '1.1.0':
print "!!! Recommend to install 1.1.0 (1.0.1 for MAC) or higher."
except:
print "!!! matplotlib-1.1.0 not installed (1.0.1 for MAC)."
try:
from ho import pisa
if pisa.__version__ == '3.0.27':
print "==> pisa-%s installed."% pisa.__version__
else:
print "!!! Incorrect version of pisa installed."
print "!!! 3.0.27 required."
except:
print "!!! pisa-3.0.27 not installed."
if IS_WIN:
try:
import pywin
print "==> pywin32 installed."
except:
print "!!! pywin32 not installed. Please install pywin32-217."
try:
import py2exe
print "==> py2exe-%s installed."% py2exe.__version__
if py2exe.__version__ != '0.6.9':
print "!!! Recommend to install 0.6.9."
except:
print "!!! py2exe-0.6.9 not installed. Installing..."
try:
import comtypes
print "==> comtypes-%s installed."% comtypes.__version__
if comtypes.__version__ != '0.6.2':
print "!!! Recommend to install 0.6.2."
except:
print "!!! comtypes-0.6.2 not installed. Installing..."
print "==> Require subversion = 1.6.0 or lower version."
print "Installed:"
os.system("CALL svn --version --quiet")
print "==> Checking gcc compiler ( >= 4.2 required):"
os.system("CALL gcc -v")
| bsd-3-clause | Python |
|
8080128d2ca5718ac971160bf964c3ca73b235b7 | add downloader | zenja/benchmarking-cloud-storage-systems | operators/downloader.py | operators/downloader.py | import os
from drivers import driver
from file_utils import file_util
class Downloader(object):
def __init__(self, server_driver):
"""Init a Uploader object
Args:
server_driver: a driver already connected to cloud service
"""
if not issubclass(driver.Driver, driver.Driver):
raise TypeError('Driver should be a subclass of drivers.driver.Driver')
self.driver = server_driver
def download(self, remote_filename, local_filename=None, local_dir=None):
if local_filename is None and local_dir is None:
raise AttributeError('Need at least one of local_filename or local_dir.')
if local_dir:
local_filename = os.path.join(local_dir, file_util.path_leaf(remote_filename))
self.driver.download(local_filename=local_filename,
remote_filename=remote_filename)
| apache-2.0 | Python |
|
49071cbeda14133019b0969e7499ea8f26cdb1cf | Create OogOrderedSet.py | longanimit/npni4processing | OogOrderedSet.py | OogOrderedSet.py |
# -*- coding: utf8-*-
# coding: utf-8
__author__ = "Dominique Dutoit"
__date__ = "$5 march 2014$"
from collections import MutableSet
class OrderedSet(MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
"""def __getslice__(self, x, y):
'''x>=0 & y>=0...'''
if x>len(self) or y <= x : return []
count=-1 ; res = []
for i in self.__iter__():
count+=1
if x > count : continue
elif y <= count : break
else: res.append(i)
return res"""
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
if __name__ == '__main__':
from time import clock
s = OrderedSet('abracadaba')
t = OrderedSet('simsbalabim')
assert list(s) == ['a', 'b', 'r', 'c', 'd']
assert list(s.__reversed__()) == ['d', 'c', 'r', 'b', 'a']
assert len(s) == 5
print '__iter__ and __ reversed : pass'
assert s | t == OrderedSet(['a', 'b', 'r', 'c', 'd', 's', 'i', 'm', 'l'])
assert s & t == OrderedSet(['b', 'a'])
assert t & s == OrderedSet(['a', 'b']) #
assert s - t == OrderedSet(['r', 'c', 'd'])
print 'logical operators : pass'
mylist = []
for i in xrange(10000):
[mylist.append(i) for j in xrange(int(i**0.3))]
from random import shuffle
shuffle(mylist)
assert len(mylist) == 116864
begin = clock()
a=set()
for i in mylist:
a.add(i)
assert len(a) == 9999
setduration = clock()-begin
print 'duration for ', a.__class__.__name__, setduration
a = OrderedSet()
begin = clock()
for i in mylist:
a.add(i)
assert len(a) == 9999
orderedsetduration = clock()-begin
print 'duration for ', a.__class__.__name__, orderedsetduration
#count=0
#for i in a:
# print i,
# count+=1
# if count>=10:break
#print
#print a[0:10]
from collections import defaultdict
begin = clock()
dico=defaultdict(OrderedSet)
for i in mylist:
dico['truc'].add(i)
assert len(a) == 9999
defaultdicduration = clock()-begin
print 'duration for ', dico.__class__.__name__, defaultdicduration
try:
assert 2.5*setduration > orderedsetduration #python 2.7
except: pass #pypy donne *10 parce que le set de base de pypy est beaucoup plus rapide
| artistic-2.0 | Python |
|
5be32f4022135a10585cf094b6fb8118dd87a2f6 | Add files via upload (#396) | TheAlgorithms/Python | ciphers/Atbash.py | ciphers/Atbash.py | def Atbash():
inp=raw_input("Enter the sentence to be encrypted ")
output=""
for i in inp:
extract=ord(i)
if extract>=65 and extract<=90:
output+=(unichr(155-extract))
elif extract>=97 and extract<=122:
output+=(unichr(219-extract))
else:
output+=i
print output
Atbash() ; | mit | Python |
|
8d1016437e87794fb39b447b51427bae98a51bc2 | Add one public IP provider | Saphyel/ipteller,Saphyel/ipteller | classes/jsonip.py | classes/jsonip.py | from json import load
from urllib2 import urlopen
class JsonIp:
def __init__(self):
url = 'https://jsonip.com/'
uri = urlopen(url)
response = load(uri)
self.ip = response["ip"]
# self.ip = '1.1.1.1'
| mit | Python |
|
b1be1bbf785406f4d286c7eb85ea459309ea03a2 | Fix file hierarchy. | hakanu/scripts | batch_image_resizer2/batch_image_resizer.py | batch_image_resizer2/batch_image_resizer.py | """Resize images in a folder using imagemagick command line tools.
http://hakanu.net
"""
import glob
import os
def main():
print 'Started'
images = glob.glob("/home/h/Desktop/all_karikatur_resized/*.jpg")
counter = 0
for image in images:
print 'Processing: ', image
index = image[image.rfind('/') + 1:image.rfind('.jpg')]
print 'index: ', index
os.system("convert " + index + ".jpg -resize 128x128 resize_128_" + index + ".jpg")
counter += 1
if counter % 100 == 0:
print 'Completed: ', counter
print '\n'
main()
| apache-2.0 | Python |
|
98b85a9fd8d5082e40996dfba0359b0ec32a9267 | Add solution for "Cakes" kata https://www.codewars.com/kata/525c65e51bf619685c000059 | davidlukac/codekata-python | codewars/cakes.py | codewars/cakes.py | # Cakes
# https://www.codewars.com/kata/525c65e51bf619685c000059
import math
import unittest
from typing import Dict
def cakes_1(recipe, available):
# type: (Dict[str, int], Dict[str, int]) -> int
lowest_available = math.inf
for i, a in recipe.items():
if i in available.keys():
av = available.get(i) / a
if lowest_available > av:
lowest_available = av
else:
lowest_available = 0
break
return int(lowest_available)
def cakes(recipe, available):
# type: (Dict[str, int], Dict[str, int]) -> int
return min(available.get(i, 0) // recipe[i] for i in recipe)
class TestCakes(unittest.TestCase):
def test(self):
recipe = {"flour": 500, "sugar": 200, "eggs": 1}
available = {"flour": 1200, "sugar": 1200, "eggs": 5, "milk": 200}
self.assertEquals(cakes(recipe, available), 2, 'Wrong result for example #1')
recipe = {"apples": 3, "flour": 300, "sugar": 150, "milk": 100, "oil": 100}
available = {"sugar": 500, "flour": 2000, "milk": 2000}
self.assertEquals(cakes(recipe, available), 0, 'Wrong result for example #2')
| mit | Python |
|
5b97e6fc0446912d5b9b8da65e60d06165ed1b8b | Add profile tests | ltowarek/budget-supervisor | budgetsupervisor/users/tests/test_models.py | budgetsupervisor/users/tests/test_models.py | from users.models import Profile
def test_profile_is_created_when_user_is_created(user_foo):
assert len(Profile.objects.all()) == 1
assert hasattr(user_foo, "profile")
def test_profile_is_not_created_when_user_is_updated(user_foo):
assert len(Profile.objects.all()) == 1
user_foo.username = "abc"
user_foo.save()
assert len(Profile.objects.all()) == 1
def test_profile_str(user_foo):
assert str(user_foo.profile) == str(user_foo)
| mit | Python |
|
275adbea5477bbc6938e59edab23e1df182435ea | Create split-array-with-equal-sum.py | kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,jaredkoontz/leetcode,kamyu104/LeetCode | Python/split-array-with-equal-sum.py | Python/split-array-with-equal-sum.py | # Time: O(n^2)
# Space: O(n)
class Solution(object):
def splitArray(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if len(nums) < 7:
return False
accumulated_sum = [0] * len(nums)
accumulated_sum[0] = nums[0]
for i in xrange(1, len(nums)):
accumulated_sum[i] = accumulated_sum[i-1] + nums[i]
for j in xrange(3, len(nums)-3):
lookup = set()
for i in xrange(1, j-1):
if accumulated_sum[i-1] == accumulated_sum[j-1] - accumulated_sum[i]:
lookup.add(accumulated_sum[i-1])
for k in xrange(j+2, len(nums)-1):
if accumulated_sum[-1] - accumulated_sum[k] == accumulated_sum[k-1] - accumulated_sum[j] and \
accumulated_sum[k - 1] - accumulated_sum[j] in lookup:
return True
return False
| mit | Python |
|
5e427315f46c026dd3b72b49349d3dcdbf04d138 | add financial_insights | hugochan/KATE | financial_insights.py | financial_insights.py | '''
Created on Apr, 2017
@author: hugo
'''
import numpy as np
def calc_ranks(x):
"""Given a list of items, return a list(in ndarray type) of ranks.
"""
n = len(x)
index = list(zip(*sorted(list(enumerate(x)), key=lambda d:d[1], reverse=True))[0])
rank = np.zeros(n)
rank[index] = range(1, n + 1)
return rank
def rank_bank_topic(bank_doc_map, doc_topic_dist):
"""Rank topics for banks
"""
bank_topic_ranks = {}
for each_bank in bank_doc_map:
rank = []
for each_doc in bank_doc_map[each_bank]:
rank.append(calc_ranks(doc_topic_dist[each_doc]))
rank = np.r_[rank]
# compute ranking score
bank_topic_ranks[each_bank] = np.sum(1. / rank, axis=0)
return bank_topic_ranks
if __name__ == '__main__':
n = 10
bank_doc_map = {'bank_0': ['doc_0', 'doc_1'], 'bank_1': ['doc_2', 'doc_3', 'doc_4']}
doc_topic_dist = dict([('doc_%s' % i, np.random.randn(n)) for i in range(5)])
rank = rank_bank_topic(bank_doc_map, doc_topic_dist)
| bsd-3-clause | Python |
|
6c133d4de6a79eab6bfc2da9ff9a0045e0a0994d | add problem hanckerrank 009 | caoxudong/code_practice,caoxudong/code_practice,caoxudong/code_practice,caoxudong/code_practice | hackerrank/009_sherlock_and_the_beast.py | hackerrank/009_sherlock_and_the_beast.py | #!/bin/python3
"""
https://www.hackerrank.com/challenges/sherlock-and-the-beast?h_r=next-challenge&h_v=zen
Sherlock Holmes suspects his archenemy, Professor Moriarty, is once again plotting something diabolical. Sherlock's companion, Dr. Watson, suggests Moriarty may be responsible for MI6's recent issues with their supercomputer, The Beast.
Shortly after resolving to investigate, Sherlock receives a note from Moriarty boasting about infecting The Beast with a virus; however, he also gives him a clue—a number, N. Sherlock determines the key to removing the virus is to find the largest Decent Number having N digits.
A Decent Number has the following properties:
Its digits can only be 3's and/or 5's.
The number of 3's it contains is divisible by 5.
The number of 5's it contains is divisible by 3.
If there are more than one such number, we pick the largest one.
Moriarty's virus shows a clock counting down to The Beast's destruction, and time is running out fast. Your task is to help Sherlock find the key before The Beast is destroyed!
Constraints
1<=T<=20
1<=N<=100000
Input Format
The first line is an integer, T, denoting the number of test cases.
The T subsequent lines each contain an integer, N, detailing the number of digits in the number.
Output Format
Print the largest Decent Number having N digits; if no such number exists, tell Sherlock by printing -1.
Sample Input
4
1
3
5
11
Sample Output
-1
555
33333
55555533333
Explanation
For N = 1, there is no decent number having 1 digit (so we print -1).
For N = 3, 555 is the only possible number. The number 5 appears three times in this number, so our count of 5's is evenly divisible by 3 (Decent Number Property 3).
For N = 5, 33333 is the only possible number. The number 3 appears five times in this number, so our count of 3's is evenly divisible by 5 (Decent Number Property 2).
For N = 11, 5555533333 and all permutations of these digits are valid numbers; among them, the given number is the largest one.
"""
import sys
t = int(input().strip())
for a0 in range(t):
n = int(input().strip())
| mit | Python |
|
ef1fa03d753f5d8a0b32831320a1b3e076ace363 | Add a test runner for our jqplot demo too | ralphbean/moksha,mokshaproject/moksha,lmacken/moksha,mokshaproject/moksha,pombredanne/moksha,mokshaproject/moksha,pombredanne/moksha,ralphbean/moksha,lmacken/moksha,lmacken/moksha,ralphbean/moksha,pombredanne/moksha,pombredanne/moksha,mokshaproject/moksha | moksha/apps/demo/MokshaJQPlotDemo/run_tests.py | moksha/apps/demo/MokshaJQPlotDemo/run_tests.py | #!/usr/bin/env python
"""
nose runner script.
"""
__requires__ = 'moksha'
import pkg_resources
import nose
if __name__ == '__main__':
nose.main()
| apache-2.0 | Python |
|
bd87286dccc22c6e7ac1470882550c2515df4882 | Add support for eve-central | desaster/uusipuu | modules/evecentral.py | modules/evecentral.py | from core.Uusipuu import UusipuuModule
from twisted.internet import defer
from twisted.internet.defer import inlineCallbacks
from twisted.web import client
from xml.dom import minidom
import locale
class Module(UusipuuModule):
def startup(self):
self.db_open('evedb', 'data/db/evedb.sqlite')
def shutdown(self):
self.db_close('evedb')
def findtype(self, key):
if key.isdigit():
sql = 'SELECT typeName, typeID, groupID FROM invTypes' + \
' WHERE typeID = ? COLLATE NOCASE'
else:
sql = 'SELECT typeName, typeID, groupID FROM invTypes' + \
' WHERE typeName = ? COLLATE NOCASE'
return self.db.runQuery(sql, (key,))
def findregion(self, key):
sql = 'SELECT regionID, regionName FROM mapRegions' + \
' WHERE regionName LIKE ? COLLATE NOCASE'
param = '%' + key + '%'
return self.db.runQuery(sql, (param,))
@inlineCallbacks
def cmd_market(self, user, replyto, params):
if not len(params.strip()):
self.bot.msg(replyto, 'Usage: !market item, region')
return
pieces = params.strip().split(',')
item, region = pieces[0], None
if len(pieces) > 1:
region = pieces[1].strip()
type_id = yield self.findtype(item)
if not len(type_id):
self.bot.msg(replyto, 'Unknown item :(')
self.log('Item not found [%s]' % (item,))
return
type_id = type_id[0]['typeID']
type_id = int(type_id)
url = 'http://api.eve-central.com/api/marketstat?typeid=%d' % \
(type_id,)
if region:
result = yield self.findregion(region)
if not len(result):
self.bot.msg(replyto, 'Unknown region :(')
self.log('Region not found [%s]' % (region,))
return
region = str(result[0]['regionName'])
region_id = result[0]['regionID']
region_id = int(region_id)
url += '®ionlimit=%d' % (region_id,)
result = yield client.getPage(url)
print result
#print 'Consider searching market for %d' % (type_id,)
try:
dom = minidom.parseString(result)
except:
self.bot.msg(replyto, 'XML parse failed :(')
self.log('XML parse failed (%s)' % item)
return
if not dom.getElementsByTagName('marketstat'):
self.bot.msg(replyto, 'No market data found in the result :(')
self.log('No market data found in the result (%s)' % item)
return
buy_volume = sell_volume = 0
buy_price = sell_price = 0
buy = dom.getElementsByTagName('buy')[0]
buy_volume = buy.getElementsByTagName('volume')
buy_volume = buy_volume[0].childNodes[0].nodeValue
buy_volume = int(buy_volume)
buy_price = buy.getElementsByTagName('max')
buy_price = buy_price[0].childNodes[0].nodeValue
buy_price = float(buy_price)
sell = dom.getElementsByTagName('sell')[0]
sell_volume = sell.getElementsByTagName('volume')
sell_volume = sell_volume[0].childNodes[0].nodeValue
sell_volume = int(sell_volume)
sell_price = sell.getElementsByTagName('min')
sell_price = sell_price[0].childNodes[0].nodeValue
sell_price = float(sell_price)
locale.setlocale(locale.LC_ALL, '.'.join(locale.getdefaultlocale()))
if not region:
region = 'Global'
msg = '%s (%s): Buy %s, Sell %s' % \
(item, region,
locale.format('%.*f', (2, buy_price), True),
locale.format('%.*f', (2, sell_price), True))
self.bot.msg(replyto, msg)
self.log('%s' % msg)
dom.unlink()
# vim: set et sw=4:
| bsd-2-clause | Python |
|
3623b058670f19dc7a6aa3372a04b61bc96a8678 | Create SenderNode.py | ricardodeazambuja/BrianConnectUDP | examples/SenderNode.py | examples/SenderNode.py | '''
This is the Sender Node.
It sends the start spike train and then simply receives and sends spikes using Brian NeuronGroups. At the end of
the simulation the data collected is plotted.
'''
from brian import *
import argparse
from brian_multiprocess_udp import BrianConnectUDP
def main_NeuronGroup(input_Neuron_Group, simulation_clock):
print "main_NeuronGroup!" #DEBUG!
simclock = simulation_clock
Nr=NeuronGroup(Number_of_Neurons, model='v:1', reset=0, threshold=0.5, clock=simclock)
Nr.v=1
# SYNAPSES BETWEEN REAL NEURON NETWORK AND THE INPUT
Syn_iNG_Nr=Synapses(input_Neuron_Group, Nr, model='w:1', pre='v+=w', clock=simclock)
Syn_iNG_Nr[:,:]='i==j' # It is a one-to-one connection configuration
Syn_iNG_Nr.w=1 # So it spikes every time it receives an spike (threshold is lower than one).
MExt=SpikeMonitor(Nr) # Spikes sent by UDP
Mdummy=SpikeMonitor(input_Neuron_Group) # Spikes received by UDP
return ([Nr],[Syn_iNG_Nr],[MExt, Mdummy])
def post_simulation_function(input_NG, simulation_NG, simulation_SYN, simulation_MN):
"""
input_NG: the neuron group that receives the input spikes
simulation_NG: the neuron groups list passed to the system by the user function (main_NeuronGroup)
simulation_SYN: the synapses list passed to the system by the user function (main_NeuronGroup)
simulation_MN: the monitors list passed to the system by the user function (main_NeuronGroup)
This way it is possible to plot, save or do whatever you want with these objects after the end of the simulation!
"""
figure()
ylim(-1,Number_of_Neurons)
raster_plot(simulation_MN[0])
raster_plot(simulation_MN[1])
# plot(simulation_MN[0][0],'b.')
# plot(simulation_MN[1][0],'g.')
title("Spikes \"start to be\" Sent(blue)/Received(green) by UDP")
show(block=True)
# savefig('output.pdf')
if __name__=="__main__":
# Process the information received from the command line arguments.
parser = argparse.ArgumentParser(description="Sets up and launch the SENDER node.")
parser.add_argument("--neurons", help="The total number of neurons (INPUT/OUTPUT are the same size).", type=int, required=True)
parser.add_argument("--output_port", help="Output port (integer).", type=int, required=True)
parser.add_argument("--input_port", help="Input port (integer).", type=int, required=True)
parser.add_argument("--output_ip", help="Output addresses(string).", type=str, required=True)
parser.add_argument("--sim_clock", help="Simulation clock (float, milliseconds).", type=float, required=True)
parser.add_argument("--input_clock", help="Input clock (float, milliseconds).", type=float, required=True)
parser.add_argument("--brian_addr", help="A number from 0 to 255 used to identify the node.", type=int, default=0)
parser.add_argument("--ttime", help="Total time of the simulation.", type=int, required=True)
args=parser.parse_args()
Number_of_Neurons = args.neurons
my_simulation = BrianConnectUDP(main_NeuronGroup=main_NeuronGroup, post_simulation_function=post_simulation_function,
NumOfNeuronsOutput=Number_of_Neurons, NumOfNeuronsInput=Number_of_Neurons,
simclock_dt=args.sim_clock, inputclock_dt=args.input_clock,
input_addresses=[('localhost', args.input_port, Number_of_Neurons)], output_addresses=[(args.output_ip, args.output_port)],
TotalSimulationTime=args.ttime, brian_address=args.brian_addr)
| cc0-1.0 | Python |
|
b261704bc0ada9cfae773eaf1e40b18dc49d6ceb | add outline of backgrond job processor and task interface | DOAJ/doaj,DOAJ/doaj,DOAJ/doaj,DOAJ/doaj | portality/background.py | portality/background.py | from portality import models
from portality.core import app
class BackgroundApi(object):
@classmethod
def execute(self, background_task):
job = background_task.background_job
ctx = None
if job.user is not None:
ctx = app.test_request_context("/")
ctx.push()
try:
background_task.run()
except:
background_task.log()
try:
background_task.cleanup()
except:
background_task.log()
background_task.report()
job.save()
if ctx is not None:
ctx.pop()
class BackgroundTask(object):
"""
All background tasks should extend from this object and override at least the following methods:
- run
- cleanup
- report
- log
- prepare (class method)
"""
def __init__(self, background_job):
self.background_job = background_job
def run(self):
"""
Execute the task as specified by the background_jon
:return:
"""
raise NotImplementedError()
def cleanup(self):
"""
Cleanup after a successful OR failed run of the task
:return:
"""
raise NotImplementedError()
def report(self):
"""
Augment the background_job with information about the task run
:return:
"""
raise NotImplementedError()
def log(self):
"""
Log any exceptions or other errors in running the task
:return:
"""
raise NotImplementedError()
@classmethod
def prepare(cls, **kwargs):
"""
Take an arbitrary set of keyword arguments and return an instance of a BackgroundJob,
or fail with a suitable exception
:param kwargs: arbitrary keyword arguments pertaining to this task type
:return: a BackgroundJob instance representing this task
"""
raise NotImplementedError()
@classmethod
def submit(cls, background_job):
"""
Submit the specified BackgroundJob to the background queue
:param background_job: the BackgroundJob instance
:return:
"""
pass | apache-2.0 | Python |
|
c8ec0689950a5fea0aff98afe54b172bd84e2ce9 | Add example using Tom's registration code in scipy. | yarikoptic/NiPy-OLD,yarikoptic/NiPy-OLD | examples/coregister.py | examples/coregister.py | """Example using Tom's registration code from scipy.
"""
from os import path
from glob import glob
import scipy.ndimage._registration as reg
# Data files
basedir = '/Users/cburns/data/twaite'
anatfile = path.join(basedir, 'ANAT1_V0001.img')
funcdir = path.join(basedir, 'fMRIData')
fileglob = path.join(funcdir, 'FUNC1_V000?.img') # Get first 10 images
if __name__ == '__main__':
print 'Coregister anatomical:\n', anatfile
print '\nWith these functional images:'
funclist = glob(fileglob)
for func in funclist:
print func
measures, imageF_anat, fmri_series = \
reg.demo_MRI_coregistration(anatfile, funclist[0:4])
| bsd-3-clause | Python |
|
114f8012a7faec4fe107c1d68c2ead10cdd88fbe | update zero.1flow.io settings for sparks 2.x. | 1flow/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow | oneflow/settings/zero_1flow_io.py | oneflow/settings/zero_1flow_io.py | # -*- coding: utf-8 -*-
# Settings for zero.1flow.io, a master clone used to validate migrations.
from sparks.django.settings import include_snippets
include_snippets(
(
'000_nobother',
'00_production',
'1flow_io',
'common',
'db_common',
'db_production',
'cache_common',
'cache_production',
'mail_production',
'raven_development',
'common_production',
),
__file__, globals()
)
# Overide real production settings, to be able to distinguish.
SITE_DOMAIN = 'zero.1flow.io'
ALLOWED_HOSTS += ['localhost', SITE_DOMAIN]
| # -*- coding: utf-8 -*-
# Settings for zero.1flow.io, a master clone used to validate migrations.
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
'000_nobother',
'00_production',
'1flow_io',
'common',
'db_common',
'db_production',
'cache_common',
'cache_production',
'mail_production',
'raven_development',
'common_production',
),
globals()
)
# Overide real production settings, to be able to distinguish.
SITE_DOMAIN = 'zero.1flow.io'
ALLOWED_HOSTS += ['localhost', SITE_DOMAIN]
| agpl-3.0 | Python |
db60219a1446bb75dd98bfbb12ee6ec4eda6d6bb | add structure for the pcaptotal API | abdesslem/pcapTotal,abdesslem/pcapTotal,abdesslem/pcapTotal | web/api.py | web/api.py | from flask import jsonify, abort, make_response
from flask.ext.httpauth import HTTPBasicAuth
auth = HTTPBasicAuth()
from app import app
tasks = [
{
'id': 1,
'title': u'Buy groceries',
'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',
'done': False
},
{
'id': 2,
'title': u'Learn Python',
'description': u'Need to find a good Python tutorial on the web',
'done': False
}
]
@app.route('/todo/api/v1.0/tasks', methods=['GET'])
#@auth.login_required
def get_tasks():
return jsonify({'tasks': tasks})
@app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])
#@auth.login_required
def get_task(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
return jsonify({'task': task[0]})
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@auth.get_password
def get_password(username):
if username == 'ask3m':
return 'ask3m'
return None
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error': 'Unauthorized access'}), 401)
| mit | Python |
|
62b01c3c1614d5719cc69be951b2f6c660e40faa | Add generic function for iterating arrays. | matusvalo/python-easyldap | pyldap/libldap/tools.py | pyldap/libldap/tools.py | def iterate_array(arr, f=None):
i = 0
while True:
if not arr[i]:
break
yield arr[i] if f is None else f(arr[i])
i += 1 | bsd-3-clause | Python |
|
d8cd42940df8c1d2fc9ae28e9c5caa21995ca68c | Add word-counter.py | bamos/python-scripts,bamos/python-scripts | python3/word-counter.py | python3/word-counter.py | #!/usr/bin/env python3
from collections import Counter
import argparse
import re
from itertools import islice
import operator
parser = argparse.ArgumentParser()
parser.add_argument('--numWords',type=int,default=10)
parser.add_argument('--maxTuples',type=int,default=4)
parser.add_argument('--minWordLength',type=int,default=5)
parser.add_argument('file',type=str)
args = parser.parse_args()
# Inspired by http://stackoverflow.com/questions/6822725
def window(seq, n):
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
containsShortWord = False
for i in result:
if len(i) < args.minWordLength:
containsShortWord = True
break
if not containsShortWord:
yield result
with open(args.file,'r') as f:
content = f.read().replace('\n',' ')
words = re.findall(r'\S+', content)
for i in range(1,args.maxTuples+1):
print("\n=== Sliding Window: {} ===".format(i))
for tup in Counter(window(words,i)).most_common(args.numWords):
print(" {}: '{}'".format(tup[1]," ".join(tup[0])))
| mit | Python |
|
377a8be7c0b1e77f0e9c2dfd55f603e199727907 | make pairs | mathii/europe_selection,mathii/europe_selection,mathii/europe_selection | files/v8/make_pairs.py | files/v8/make_pairs.py | import fileinput
pops=[]
for line in fileinput.input():
pops.append(line[:-1])
for i in range(len(pops)-1):
for j in range(i+1, len(pops)):
print pops[i]+","+pops[j]
| mit | Python |
|
9c0ddb1c4fff5fb3f44ad77192a7f435bc7a22fe | Create AdafruitMotorHat4Pi.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | service/AdafruitMotorHat4Pi.py | service/AdafruitMotorHat4Pi.py | # Start the services needed
raspi = Runtime.start("raspi","RasPi")
hat = Runtime.start("hat","AdafruitMotorHat4Pi")
m1 = Runtime.start("m1","MotorHat4Pi")
# Attach the HAT to i2c bus 1 and address 0x60
hat.attach("raspi","1","0x60")
# Use the M1 motor port and attach the motor to the hat
m1.setMotor("M1")
m1.attach("hat")
# Now everything is wired up and we run a few tests
# Full speed forward
m1.move(1)
sleep(3)
# half speed forward
m1.move(.5)
sleep(3)
# Move backward at 60% speed
m1.move(-.6)
sleep(3)
# Stop
m1.move(0)
# Now you should be able to use the GUI or a script to control the motor
| apache-2.0 | Python |
|
b376b0dca7ec73451ff36ebae1718fa11ec159f0 | Add utils.py for general purpose functions | Zloool/manyfaced-honeypot | manyfaced/common/utils.py | manyfaced/common/utils.py | import time
import pickle
from socket import error as socket_error
from common.status import CLIENT_TIMEOUT
def dump_file(data):
try:
with file('temp.db') as f:
string_file = f.read()
db = pickle.loads(string_file)
except:
db = list()
db.append(data)
with open('temp.db', "w") as f:
f.write(str(pickle.dumps(db)))
def recv_timeout(the_socket, timeout=CLIENT_TIMEOUT):
# make socket non blocking
the_socket.setblocking(0)
# total data partwise in an array
total_data = []
# beginning time
begin = time.time()
while True:
# if you got some data, then break after timeout
if total_data and time.time() - begin > timeout:
break
# if you got no data at all, wait a little longer, twice the timeout
elif time.time() - begin > timeout * 2:
break
# recv something
try:
data = the_socket.recv(8192)
if data:
total_data.append(data)
# change the beginning time for measurement
begin = time.time()
else:
# sleep for sometime to indicate a gap
time.sleep(0.1)
except socket_error:
pass
# join all parts to make final string
return ''.join(total_data)
| mit | Python |
|
a3cbfeca2828ac3d210921524d850091dc775db7 | Revert "Revert "New VMware Module to support configuring a VMware vmkernel IP…" | thaim/ansible,thaim/ansible | lib/ansible/modules/extras/cloud/vmware/vmware_vmkernel_ip_config.py | lib/ansible/modules/extras/cloud/vmware/vmware_vmkernel_ip_config.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_vmkernel_ip_config
short_description: Configure the VMkernel IP Address
description:
- Configure the VMkernel IP Address
version_added: 2.0
author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
hostname:
description:
- The hostname or IP address of the ESXi server
required: True
username:
description:
- The username of the ESXi server
required: True
aliases: ['user', 'admin']
password:
description:
- The password of the ESXi server
required: True
aliases: ['pass', 'pwd']
vmk_name:
description:
- VMkernel interface name
required: True
ip_address:
description:
- IP address to assign to VMkernel interface
required: True
subnet_mask:
description:
- Subnet Mask to assign to VMkernel interface
required: True
'''
EXAMPLES = '''
# Example command from Ansible Playbook
- name: Configure IP address on ESX host
local_action:
module: vmware_vmkernel_ip_config
hostname: esxi_hostname
username: esxi_username
password: esxi_password
vmk_name: vmk0
ip_address: 10.0.0.10
subnet_mask: 255.255.255.0
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask):
host_config_manager = host_system.configManager
host_network_system = host_config_manager.networkSystem
for vnic in host_network_system.networkConfig.vnic:
if vnic.device == vmk_name:
spec = vnic.spec
if spec.ip.ipAddress != ip_address:
spec.ip.dhcp = False
spec.ip.ipAddress = ip_address
spec.ip.subnetMask = subnet_mask
host_network_system.UpdateVirtualNic(vmk_name, spec)
return True
return False
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(vmk_name=dict(required=True, type='str'),
ip_address=dict(required=True, type='str'),
subnet_mask=dict(required=True, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmk_name = module.params['vmk_name']
ip_address = module.params['ip_address']
subnet_mask = module.params['subnet_mask']
try:
content = connect_to_api(module, False)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = host.keys()[0]
changed = configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask)
module.exit_json(changed=changed)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| mit | Python |
|
b9759f60c9f107c3d2c319f53ed2985ee58dc319 | Write test for mock_pose generator. | masasin/spirit,masasin/spirit | src/tests/test_mock_pose.py | src/tests/test_mock_pose.py | try:
from unittest.mock import patch, MagicMock
except ImportError:
from mock import patch, MagicMock
import pytest
import rospy
MockTf2 = MagicMock()
modules = {"tf2_ros": MockTf2}
patcher = patch.dict("sys.modules", modules)
patcher.start()
try:
rospy.init_node("pytest", anonymous=True)
except rospy.exceptions.ROSException:
pass
@pytest.fixture(scope="module")
def teardown_module():
def fin():
patcher.stop()
class TestPoseGenerator(object):
def test_tf_and_pose_same(self):
from mock_pose import PoseGenerator
pose = PoseGenerator.generate_pose()
transform = PoseGenerator.pose_to_tf(pose)
assert transform.transform.translation.x == pose.pose.position.x
assert transform.transform.translation.y == pose.pose.position.y
assert transform.transform.translation.z == pose.pose.position.z
assert transform.transform.rotation.x == pose.pose.orientation.x
assert transform.transform.rotation.y == pose.pose.orientation.y
assert transform.transform.rotation.z == pose.pose.orientation.z
assert transform.transform.rotation.w == pose.pose.orientation.w
| mit | Python |
|
1ce12ab6eb2a3b5578eba253929275bb3b394b76 | Create line_follow.py | CSavvy/python | line_follow.py | line_follow.py | from Myro import *
init("/dev/tty.scribbler")
# To stop the Scribbler, wave your hand/something in front of the fluke
while getObstacle('center') < 6300:
# Get the reading from the line sensors on the bottom of Scribbler
left, right = getLine()
# If both left and right sensors are on track
if left == 1 and right == 1:
motors(-.1, -.1)
# If just the right is on track, turn left
elif right == 1:
motors(.1,-.1)
# If just the left is on track, turn right
elif left == 1:
motors(-.1,.1)
# If both are off track, go backwards in a random direction.
# randomNumber returns a number between 0 and 1, so I scale that to go slower
elif left == 0 and right == 0:
motors(.1*randomNumber(),.1*randomNumber())
# When it's done, stop and beep happily
stop()
beep(.1,600)
beep(.1,650)
beep(.1,700)
beep(.1,750)
beep(.1,800)
beep(.1,850)
| mit | Python |
|
5fa39bb65f88fa3596cc3831890cd258cc5768e1 | Add the module file | LabPy/lantz,LabPy/lantz_qt,LabPy/lantz,LabPy/lantz,LabPy/lantz,LabPy/lantz_qt,varses/awsch,varses/awsch,LabPy/lantz_qt | lantz/drivers/ni/__init__.py | lantz/drivers/ni/__init__.py | # -*- coding: utf-8 -*-
"""
lantz.drivers.ni
~~~~~~~~~~~~~~~~
:company: National Instruments
:description:
:website: http://www.ni.com/
----
:copyright: 2012 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from .daqe import NI6052E
__all__ = ['NI6052E', ]
| bsd-3-clause | Python |
|
b3dcbe95d766d902d22a0c4c171cbbe5ce207571 | Add test for longitivity-testing: both LED-s ON at the same time for extended periods of time | anroots/teensy-moonica | python/tests/stress_test.py | python/tests/stress_test.py | #!/usr/bin/env python
import time
import sys
import os
from random import randint
# Hack to import from a parent dir
# http://stackoverflow.com/a/11158224/401554
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parentdir)
from octo import Octo
octo = Octo('/dev/ttyACM0')
octo.reset()
# Test that the LEDs don't burn out or short or what knot during continious active state
while True:
time.sleep(1)
octo.led0(randint(0,255),randint(0,255),randint(0,255))
time.sleep(1)
octo.led1(randint(0,255),randint(0,255),randint(0,255))
time.sleep(randint(60,120))
octo.led1(randint(0,255),randint(0,255),randint(0,255))
time.sleep(1)
octo.led0(randint(0,255),randint(0,255),randint(0,255))
time.sleep(randint(60,120))
| mit | Python |
|
d2a9f0cfd34f6a58c647d32b9a32fc517a210fd5 | Create tests for ChartBasedAnnotationsAccumulator | facebookresearch/detectron2,facebookresearch/detectron2,facebookresearch/detectron2 | projects/DensePose/tests/test_chart_based_annotations_accumulator.py | projects/DensePose/tests/test_chart_based_annotations_accumulator.py | # Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from detectron2.structures import Boxes, BoxMode, Instances
from densepose.modeling.losses.utils import ChartBasedAnnotationsAccumulator
from densepose.structures import DensePoseDataRelative, DensePoseList
image_shape = (100, 100)
instances = Instances(image_shape)
n_instances = 3
instances.proposal_boxes = Boxes(torch.rand(n_instances, 4))
instances.gt_boxes = Boxes(torch.rand(n_instances, 4))
# instances.gt_densepose = None cannot happen because instances attributes need a length
class TestChartBasedAnnotationsAccumulator(unittest.TestCase):
def test_chart_based_annotations_accumulator_no_gt_densepose(self):
accumulator = ChartBasedAnnotationsAccumulator()
accumulator.accumulate(instances)
expected_values = {"nxt_bbox_with_dp_index": 0, "nxt_bbox_index": n_instances}
for key in accumulator.__dict__:
self.assertEqual(getattr(accumulator, key), expected_values.get(key, []))
def test_chart_based_annotations_accumulator_gt_densepose_none(self):
instances.gt_densepose = [None] * n_instances
accumulator = ChartBasedAnnotationsAccumulator()
accumulator.accumulate(instances)
expected_values = {"nxt_bbox_with_dp_index": 0, "nxt_bbox_index": n_instances}
for key in accumulator.__dict__:
self.assertEqual(getattr(accumulator, key), expected_values.get(key, []))
def test_chart_based_annotations_accumulator_gt_densepose(self):
data_relative_keys = [
DensePoseDataRelative.X_KEY,
DensePoseDataRelative.Y_KEY,
DensePoseDataRelative.I_KEY,
DensePoseDataRelative.U_KEY,
DensePoseDataRelative.V_KEY,
DensePoseDataRelative.S_KEY,
]
annotations = [DensePoseDataRelative({k: [0] for k in data_relative_keys})] * n_instances
instances.gt_densepose = DensePoseList(annotations, instances.gt_boxes, image_shape)
accumulator = ChartBasedAnnotationsAccumulator()
accumulator.accumulate(instances)
bbox_xywh_est = BoxMode.convert(
instances.proposal_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
)
bbox_xywh_gt = BoxMode.convert(
instances.gt_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
)
expected_values = {
"s_gt": [
torch.zeros((3, DensePoseDataRelative.MASK_SIZE, DensePoseDataRelative.MASK_SIZE))
]
* n_instances,
"bbox_xywh_est": bbox_xywh_est.split(1),
"bbox_xywh_gt": bbox_xywh_gt.split(1),
"point_bbox_with_dp_indices": [torch.tensor([i]) for i in range(n_instances)],
"point_bbox_indices": [torch.tensor([i]) for i in range(n_instances)],
"bbox_indices": list(range(n_instances)),
"nxt_bbox_with_dp_index": n_instances,
"nxt_bbox_index": n_instances,
}
default_value = [torch.tensor([0])] * 3
for key in accumulator.__dict__:
to_test = getattr(accumulator, key)
gt_value = expected_values.get(key, default_value)
if key in ["nxt_bbox_with_dp_index", "nxt_bbox_index"]:
self.assertEqual(to_test, gt_value)
elif key == "bbox_indices":
self.assertListEqual(to_test, gt_value)
else:
self.assertTrue(torch.allclose(torch.stack(to_test), torch.stack(gt_value)))
| apache-2.0 | Python |
|
9bc154d662464a0073b8b7cd3bcf39312a4ac1d7 | add ifttt notification | mofhu/pkuairquality | ifttt_notification.py | ifttt_notification.py | import requests
def Air_alert():
report = {}
report["value1"] = "test"
report["value2"] = "second"
report["value3"] = "third"
requests.post(
"https://maker.ifttt.com/trigger/Air_Test/with/key/{user_key}".format(user_key=""), data=report)
if __name__ == "__main__":
Air_alert()
| mit | Python |
|
369676cfacd35c7b3321edaef97bf64f063e7d50 | Add nephrectomy model | renalreg/radar,renalreg/radar,renalreg/radar,renalreg/radar | radar/radar/models/nephrectomy.py | radar/radar/models/nephrectomy.py | from sqlalchemy import Column, Integer, ForeignKey, Date, Index
from sqlalchemy.orm import relationship
from radar.database import db
from radar.models.common import MetaModelMixin, IntegerLookupTable
NEPHRECTOMY_SIDES = OrderedDict([
('LEFT', 'Left'),
('RIGHT', 'Right'),
('BILATERAL', 'Bilateral'),
])
NEPHRECTOMY_KIDNEY_TYPES = OrderedDict([
('TRANSPLANT', 'Transplant'),
('NATURAL', 'Natural'),
])
NEPHRECTOMY_ENTRY_TYPES = OrderedDict([
('O', 'Open'),
('HA', 'Hand Assisted'),
('TPL', 'Transperitoneal Laparoscopic'),
('RPL', 'Retroperitoneal Laparoscopic'),
])
class Nephrectomy(db.Model, MetaModelMixin):
__tablename__ = 'nephrectomy'
id = Column(Integer, primary_key=True)
patient_id = Column(Integer, ForeignKey('patients.id'), nullable=False)
patient = relationship('Patient')
data_source_id = Column(Integer, ForeignKey('data_sources.id'), nullable=False)
data_source = relationship('DataSource')
date = Column(Date, nullable=False)
kidney_side = Column(String, nullable=False)
kidney_type = Column(String, nullable=False)
entry_type = Column(String, nullable=False
Index('nephrectomy_patient_id_idx', Dialysis.patient_id)
| agpl-3.0 | Python |
|
0378a225c5519ad39fee6a132c455e1848151a44 | Create run_test.py | scopatz/staged-recipes,shadowwalkersb/staged-recipes,mcs07/staged-recipes,jakirkham/staged-recipes,grlee77/staged-recipes,birdsarah/staged-recipes,conda-forge/staged-recipes,conda-forge/staged-recipes,sodre/staged-recipes,mariusvniekerk/staged-recipes,sodre/staged-recipes,synapticarbors/staged-recipes,barkls/staged-recipes,asmeurer/staged-recipes,patricksnape/staged-recipes,chrisburr/staged-recipes,igortg/staged-recipes,Juanlu001/staged-recipes,sannykr/staged-recipes,patricksnape/staged-recipes,dschreij/staged-recipes,rmcgibbo/staged-recipes,rmcgibbo/staged-recipes,jjhelmus/staged-recipes,kwilcox/staged-recipes,asmeurer/staged-recipes,basnijholt/staged-recipes,cpaulik/staged-recipes,hadim/staged-recipes,jochym/staged-recipes,NOAA-ORR-ERD/staged-recipes,SylvainCorlay/staged-recipes,ocefpaf/staged-recipes,kwilcox/staged-recipes,larray-project/staged-recipes,birdsarah/staged-recipes,petrushy/staged-recipes,johanneskoester/staged-recipes,mariusvniekerk/staged-recipes,rvalieris/staged-recipes,igortg/staged-recipes,chohner/staged-recipes,chrisburr/staged-recipes,sodre/staged-recipes,Cashalow/staged-recipes,SylvainCorlay/staged-recipes,glemaitre/staged-recipes,petrushy/staged-recipes,Cashalow/staged-recipes,stuertz/staged-recipes,pmlandwehr/staged-recipes,goanpeca/staged-recipes,larray-project/staged-recipes,ReimarBauer/staged-recipes,isuruf/staged-recipes,mcs07/staged-recipes,johanneskoester/staged-recipes,guillochon/staged-recipes,shadowwalkersb/staged-recipes,stuertz/staged-recipes,barkls/staged-recipes,pmlandwehr/staged-recipes,goanpeca/staged-recipes,grlee77/staged-recipes,sannykr/staged-recipes,glemaitre/staged-recipes,synapticarbors/staged-recipes,jochym/staged-recipes,basnijholt/staged-recipes,jakirkham/staged-recipes,NOAA-ORR-ERD/staged-recipes,chohner/staged-recipes,jjhelmus/staged-recipes,hadim/staged-recipes,scopatz/staged-recipes,cpaulik/staged-recipes,ceholden/staged-recipes,Juanlu001/staged-recipes,ReimarBauer/staged-recipes,isuruf/staged-recipes,guillochon/staged-recipes,ceholden/staged-recipes,rvalieris/staged-recipes,ocefpaf/staged-recipes,dschreij/staged-recipes | recipes/django-braces/run_test.py | recipes/django-braces/run_test.py | import django
from django.conf import settings
settings.configure(INSTALLED_APPS=['braces', 'django.contrib.contenttypes', 'django.contrib.auth'])
django.setup()
import braces
| bsd-3-clause | Python |
|
7e9c90c179df8666a75eef1610dbda764add1408 | Create elec_temp_join.py | mdbartos/RIPS,mdbartos/RIPS,mdbartos/RIPS | elec_temp_join.py | elec_temp_join.py | import numpy as np
import pandas as pd
import geopandas as gpd
from geopandas import tools
utility = '/home/akagi/Desktop/electricity_data/Electric_Retail_Service_Ter.shp'
util = gpd.read_file(utility)
urbarea = '/home/akagi/GIS/census/cb_2013_us_ua10_500k/cb_2013_us_ua10_500k.shp'
ua = gpd.read_file(urbarea)
ua = ua.to_crs(util.crs)
j = tools.sjoin(util, ua)
grid = '/home/akagi/gridcells.shp'
g = gpd.read_file(grid)
coords = g.centroid.apply(lambda x: x.coords[0])
coordstr = coords.apply(lambda x: 'data_%s_%s' % (x[1], x[0]))
g['coordstr'] = coordstr
ua_g = tools.sjoin(ua, g)
ua_g['grid_geom'] = ua_g['index_right'].map(g['geometry'])
ua_g.apply(lambda x: (x['geometry'].centroid).distance(x['grid_geom'].centroid), axis=1)
ua_g = ua_g.reset_index().loc[ua_g.reset_index().groupby('index').idxmin('dist')['FID'].values].set_index('index')
j['grid_cell'] = j['index_right'].map(ua_g['coordstr'])
| mit | Python |
|
e9135583af7a862bd426b4a068743765c4604da3 | add test for dials.convert_to_cbf (only works on dls computers) | dials/dials,dials/dials,dials/dials,dials/dials,dials/dials | test/command_line/test_convert_to_cbf.py | test/command_line/test_convert_to_cbf.py | from __future__ import absolute_import, division, print_function
import glob
import os
import pytest
import procrunner
pytestmark = pytest.mark.skipif(
not os.access("/dls/i04/data/2019/cm23004-1/20190109/Eiger", os.R_OK),
reason="Test images not available",
)
@pytest.mark.parametrize(
"master_h5",
[
"/dls/i04/data/2019/cm23004-1/20190109/Eiger/gw/Thaum/Thau_4/Thau_4_1_master.h5",
"/dls/i04/data/2019/cm23004-1/20190109/Eiger/gw/Thaum/Thau_4/Thau_4_1.nxs",
],
)
def test_convert_to_cbf(master_h5):
result = procrunner.run(["dials.convert_to_cbf", master_h5])
assert result["exitcode"] == 0
assert result["stderr"] == ""
g = glob.glob("as_cbf_*.cbf")
assert len(g) == 900 # need a smaller test set!
| bsd-3-clause | Python |
|
03e9a75d69538649df3e05d40a1c8e7d870fd807 | Add gaussian_elimination.py for solving linear systems (#1448) | TheAlgorithms/Python | arithmetic_analysis/gaussian_elimination.py | arithmetic_analysis/gaussian_elimination.py | """
Gaussian elimination method for solving a system of linear equations.
Gaussian elimination - https://en.wikipedia.org/wiki/Gaussian_elimination
"""
import numpy as np
def retroactive_resolution(coefficients: np.matrix, vector: np.array) -> np.array:
"""
This function performs a retroactive linear system resolution
for triangular matrix
Examples:
2x1 + 2x2 - 1x3 = 5 2x1 + 2x2 = -1
0x1 - 2x2 - 1x3 = -7 0x1 - 2x2 = -1
0x1 + 0x2 + 5x3 = 15
>>> gaussian_elimination([[2, 2, -1], [0, -2, -1], [0, 0, 5]], [[5], [-7], [15]])
array([[2.],
[2.],
[3.]])
>>> gaussian_elimination([[2, 2], [0, -2]], [[-1], [-1]])
array([[-1. ],
[ 0.5]])
"""
rows, columns = np.shape(coefficients)
x = np.zeros((rows, 1), dtype=float)
for row in reversed(range(rows)):
sum = 0
for col in range(row + 1, columns):
sum += coefficients[row, col] * x[col]
x[row, 0] = (vector[row] - sum) / coefficients[row, row]
return x
def gaussian_elimination(coefficients: np.matrix, vector: np.array) -> np.array:
"""
This function performs Gaussian elimination method
Examples:
1x1 - 4x2 - 2x3 = -2 1x1 + 2x2 = 5
5x1 + 2x2 - 2x3 = -3 5x1 + 2x2 = 5
1x1 - 1x2 + 0x3 = 4
>>> gaussian_elimination([[1, -4, -2], [5, 2, -2], [1, -1, 0]], [[-2], [-3], [4]])
array([[ 2.3 ],
[-1.7 ],
[ 5.55]])
>>> gaussian_elimination([[1, 2], [5, 2]], [[5], [5]])
array([[0. ],
[2.5]])
"""
# coefficients must to be a square matrix so we need to check first
rows, columns = np.shape(coefficients)
if rows != columns:
return []
# augmented matrix
augmented_mat = np.concatenate((coefficients, vector), axis=1)
augmented_mat = augmented_mat.astype("float64")
# scale the matrix leaving it triangular
for row in range(rows - 1):
pivot = augmented_mat[row, row]
for col in range(row + 1, columns):
factor = augmented_mat[col, row] / pivot
augmented_mat[col, :] -= factor * augmented_mat[row, :]
x = retroactive_resolution(
augmented_mat[:, 0:columns], augmented_mat[:, columns : columns + 1]
)
return x
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit | Python |
|
12731a74be889eff48e0e505de666ef3180794fe | add missing file | sassoftware/rmake,sassoftware/rmake,sassoftware/rmake3,sassoftware/rmake,sassoftware/rmake3,sassoftware/rmake3 | rmake/plugins/plugin.py | rmake/plugins/plugin.py | #
# Copyright (c) 2006 rPath, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/cpl.php.
#
# This program is distributed in the hope that it will be useful, but
# without any warranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
"""
Definition of plugins available for rmake plugins.
Plugin writers should derive from one of these classes.
The plugin will be called with the hooks described here, if the
correct program is being run. For example, when running rmake-server,
the server hooks will be run.
"""
from rmake.lib.pluginlib import Plugin
TYPE_CLIENT = 0
TYPE_SERVER = 1
TYPE_SUBSCRIBER = 2
class ClientPlugin(Plugin):
types = [TYPE_CLIENT]
def client_preInit(self, main):
"""
Called right after plugins have been loaded.
"""
pass
def client_preCommand(self, main, client):
"""
Called after the command-line client has instantiated,
but before the command has been executed.
"""
pass
class ServerPlugin(Plugin):
types = [TYPE_SERVER]
def server_preConfig(self, main):
"""
Called before the configuration file has been read in.
"""
pass
def server_preInit(self, main, argv):
"""
Called before the server has been instantiated.
"""
pass
def server_postInit(self, server):
"""
Called after the server has been instantiated but before
serving is done.
"""
pass
def server_pidDied(self, pid, status):
"""
Called when the server collects a child process that has died.
"""
pass
def server_loop(self, server):
"""
Called once per server loop, between requests.
"""
pass
def server_builderInit(self, server, builder):
"""
Called when the server instantiates a builder for a job.
"""
pass
def server_shutDown(self, server):
"""
Called when the server is halting.
"""
pass
class SubscriberPlugin(Plugin):
types = [TYPE_SUBSCRIBER]
protocol = None
def subscriber_get(self, uri, name):
"""
Should return a child of the StatusSubscirber class.
"""
pass
| apache-2.0 | Python |
|
3e0e898d0d3ab494edc5dbc65ccde4020f427be8 | Create quiz-eliecer.py | eliecer11/Uip-prog3 | laboratorios/quiz-eliecer.py | laboratorios/quiz-eliecer.py |
base=5
altura=7
perimetro=2*5+2*7
print ("mi perimetro es" + str(perimetro))
area=5*7
print ("mi area es" + str (area))
metrop=perimetro/100
print ("mi perimetro en metro es" + str(metrop))
pulgadap=perimetro/2.54
print ("mi perimetro en pulgada es" + str(pulgadap))
metroa=area/100
print ("mi area en metro es" + str(metroa))
pulgadaa=area/2.54
print ("mi area en pulgada es" + str(pulgadaa))
| mit | Python |
|
ee554d89d1c822537345ce4d03d2bff8783d7f1b | Disable nacl_integration due to #261724. | chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,dednal/chromium.src,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,Chilledheart/chromium,anirudhSK/chromium,fujunwei/chromium-crosswalk,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,jaruba/chromium.src,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,dednal/chromium.src,dednal/chromium.src,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,Jonekee/chromium.src,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,Just-D/chromium-1,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,fujunwei/chromium-crosswalk,Jonekee/chromium.src,dednal/chromium.src,jaruba/chromium.src,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,ondra-novak/chromium.src,Chilledheart/chromium,ltilve/chromium,patrickm/chromium.src,M4sse/chromium.src,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,TheTypoMaster/chromium-crosswalk,mogoweb/chromium-crosswalk,dushu1203/chromium.src,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,anirudhSK/chromium,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,patrickm/chromium.src,M4sse/chromium.src,patrickm/chromium.src,patrickm/chromium.src,anirudhSK/chromium,dednal/chromium.src,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ChromiumWebApps/chromium,littlstar/chromium.src,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,markYoungH/chromium.src,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,patrickm/chromium.src,Jonekee/chromium.src,chuan9/chromium-crosswalk,dushu1203/chromium.src,M4sse/chromium.src,mogoweb/chromium-crosswalk,Fireblend/chromium-crosswalk,dednal/chromium.src,mogoweb/chromium-crosswalk,Chilledheart/chromium,markYoungH/chromium.src,Fireblend/chromium-crosswalk,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,axinging/chromium-crosswalk,mogoweb/chromium-crosswalk,bright-sparks/chromium-spacewalk,mogoweb/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,jaruba/chromium.src,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,axinging/chromium-crosswalk,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,ltilve/chromium,Chilledheart/chromium,dushu1203/chromium.src,littlstar/chromium.src,Chilledheart/chromium,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,littlstar/chromium.src,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,Chilledheart/chromium,mogoweb/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,anirudhSK/chromium,ltilve/chromium,markYoungH/chromium.src,dednal/chromium.src,M4sse/chromium.src,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,anirudhSK/chromium,mogoweb/chromium-crosswalk,Jonekee/chromium.src,M4sse/chromium.src,dushu1203/chromium.src,Jonekee/chromium.src,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,Just-D/chromium-1,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ltilve/chromium,hgl888/chromium-crosswalk,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,patrickm/chromium.src,jaruba/chromium.src,Chilledheart/chromium,M4sse/chromium.src,anirudhSK/chromium,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,dushu1203/chromium.src,ChromiumWebApps/chromium,jaruba/chromium.src,ltilve/chromium,M4sse/chromium.src,littlstar/chromium.src,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,ltilve/chromium,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,dednal/chromium.src,axinging/chromium-crosswalk,patrickm/chromium.src,jaruba/chromium.src,ondra-novak/chromium.src,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,ChromiumWebApps/chromium,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,markYoungH/chromium.src,axinging/chromium-crosswalk,jaruba/chromium.src,axinging/chromium-crosswalk,patrickm/chromium.src,chuan9/chromium-crosswalk,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,ltilve/chromium,ondra-novak/chromium.src,ondra-novak/chromium.src,chuan9/chromium-crosswalk,anirudhSK/chromium,Fireblend/chromium-crosswalk,Just-D/chromium-1,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,ltilve/chromium,jaruba/chromium.src,dushu1203/chromium.src,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,ChromiumWebApps/chromium,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,anirudhSK/chromium,mogoweb/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,patrickm/chromium.src,axinging/chromium-crosswalk,Just-D/chromium-1,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src,Fireblend/chromium-crosswalk,Jonekee/chromium.src,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,Just-D/chromium-1,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl | chrome/test/nacl_test_injection/buildbot_nacl_integration.py | chrome/test/nacl_test_injection/buildbot_nacl_integration.py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
if sys.platform == 'darwin':
print >> sys.stderr, "SKIPPING NACL INTEGRATION DUE TO BUG #261724."
return 0
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows
# but it is not clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_integration_script = os.path.join(script_dir,
'buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows
# but it is not clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_integration_script = os.path.join(script_dir,
'buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause | Python |
33448340d278da7e0653701d78cbab317893279d | Add a simple analysis tool to get some structural properties about an AG's specfile. | jcnelson/syndicate,iychoi/syndicate,iychoi/syndicate,jcnelson/syndicate,iychoi/syndicate,iychoi/syndicate,iychoi/syndicate,iychoi/syndicate,iychoi/syndicate,jcnelson/syndicate,jcnelson/syndicate,jcnelson/syndicate,jcnelson/syndicate,iychoi/syndicate,jcnelson/syndicate,jcnelson/syndicate | AG/datasets/analyze.py | AG/datasets/analyze.py | #!/usr/bin/python
import os
import sys
import lxml
from lxml import etree
import math
class StatsCounter(object):
prefixes = {}
cur_tag = None
def start( self, tag, attrib ):
self.cur_tag = tag
def end( self, tag ):
pass
#self.cur_tag = None
def data( self, _data ):
if self.cur_tag != "File" and self.cur_tag != "Dir":
return
data = _data.rstrip("/")
if data == "":
return
dir_name = os.path.dirname( data )
if dir_name == "":
return
if not self.prefixes.has_key( dir_name ):
self.prefixes[ dir_name ] = 0
self.prefixes[ dir_name ] += 1
def close( self ):
return "closed!"
if __name__ == "__main__":
counter = StatsCounter()
parser = etree.XMLParser( target=counter )
fd = open( sys.argv[1], "r" )
while True:
buf = fd.read( 32768 )
if len(buf) == 0:
break
parser.feed( buf )
result = parser.close()
order = counter.prefixes.keys()
order.sort()
size_bins = {}
for path in order:
count = counter.prefixes[path]
print "% 15s %s" % (count, path)
size_bin = int(math.log(count, 10))
if not size_bins.has_key( size_bin ):
size_bins[ size_bin ] = 1
else:
size_bins[ size_bin ] += 1
print ""
print "sizes"
max_bin = max( size_bins.keys() )
bin_fmt = r"1e%0" + str( int(math.log(max_bin, 10)) + 1 ) + "s"
for size in xrange( 0, max_bin + 1 ):
binsize = 0
if size_bins.has_key( size ):
binsize = size_bins[size]
bin_str = bin_fmt % size
print "%s %s" % (bin_str, binsize)
| apache-2.0 | Python |
|
cc8e38b46ee79b08c76b31d173301549722693b9 | add examples of deployment object | kubernetes-client/python,mbohlool/client-python,mbohlool/client-python,kubernetes-client/python | examples/deployment_examples.py | examples/deployment_examples.py | # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path
import yaml
from kubernetes import client, config
DEPLOYMENT_NAME = "nginx-deployment"
def create_deployment_object():
# Instantiate an empty deployment object
deployment = client.ExtensionsV1beta1Deployment()
# Fill required Deployment fields (apiVersion, kind and metadata)
deployment.api_version = "extensions/v1beta1"
deployment.kind = "Deployment"
deployment.metadata = client.V1ObjectMeta(name=DEPLOYMENT_NAME)
# Create and configurate a spec section
spec = client.ExtensionsV1beta1DeploymentSpec()
spec.replicas = 3
spec.template = client.V1PodTemplateSpec()
spec.template.metadata = client.V1ObjectMeta(labels={"app": "nginx"})
spec.template.spec = client.V1PodSpec()
# Configureate Pod template container
container = client.V1Container()
container.name = "nginx"
container.image = "nginx:1.7.9"
contianer.ports = [client.V1containerPort(container_port=80)]
spec.template.spec.containers = [container]
# Assign spec section into deployment.spec
deployment.spec = spec
return deployment
def create_deployment(api_instance, deployment):
# Create deployement
api_response = api_instance.create_namespaced_deployment(
body=deployment,
namespace="default")
print("Deployment created. status='%s'" % str(api_response.status))
def update_deployment(api_instance, deployment):
# Update container image
deployment.container.image = "nginx:1.9.1"
# Update the deployment
api_response = api_instance.replace_namespaced_deployment(
name=DEPLOYMENT_NAME,
namespace="default",
body=deployment)
print("Deployment updated. status='%s'" % str(api_response.status))
def roll_back_deployment(api_instance):
# Instanciate an empty DeploymentRollback object
rollback = client.ExtensionsV1beta1DeploymentRollback()
# Fill required DeploymentRollback fields
rollback.api_version = "extensions/v1beta1"
rollback.kind = "DeploymentRollback"
rollback.name = DEPLOYMENT_NAME
# Configurate the rollback
rollback.rollback_to = client.ExtensionsV1beta1RollbackConfig()
rollback.rollback_to.revision = 0
# Execute the rollback
api_response = api_instance.create_namespaced_deployment_rollback(
name=DEPLOYMENT_NAME,
namespace="default",
body=rollback)
print("Deployment rolled back. status='%s'" % str(api_response.status))
def delete_deployment(api_instance):
# Delete deployment
api_response = api_instance.delete_namespaced_deployment(
name=DEPLOYMENT_NAME,
namespace="default",
client.V1DeleteOptions(propagation_policy='Foreground',
grace_period_seconds=5))
print("Deployment deleted. status='%s'" % str(api_response.status))
def main():
# Configs can be set in Configuration class directly or using helper
# utility. If no argument provided, the config will be loaded from
# default location.
config.load_kube_config()
extensions_v1beta1 = client.ExtensionsV1beta1Api()
# Create a deployment object with client-python API. The deployment we
# created is same as the `nginx-deployment.yaml` in the /examples folder.
deployment = create_deployment_object()
create_deployment(extensions_v1beta1, deployment)
update_deployment(extensions_v1beta1, deployment)
roll_back_deployment(extensions_v1beta1)
delete_deployment(extensions_v1beta1)
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
16165a9387807d54b91873e95a677bfe5d251aba | Add healthcheck module to contrib | ayust/kitnirc | kitnirc/contrib/healthcheck.py | kitnirc/contrib/healthcheck.py | import logging
import sys
import threading
import time
from kitnirc.modular import Module
_log = logging.getLogger(__name__)
class HealthcheckModule(Module):
"""A KitnIRC module which checks connection health.
By default, this module will request a PONG response from the server
if it hasn't seen any traffic in the past minute, and will assume the
connection has dropped and exit the process if it doesn't see any traffic
for 90 seconds.
These delays can be changed by setting "delay" and "timeout" under the
[healthcheck] configuration section.
"""
def __init__(self):
config = self.controller.config
if config.has_option("healthcheck", "delay"):
self.delay = config.getint("healthcheck", "delay")
else:
self.delay = 60
if config.has_option("healthcheck", "timeout"):
self.timeout = config.getint("healthcheck", "timeout")
else:
self.timeout = 90
assert self.timeout > self.delay
self.last_activity = time.clock()
self._stop = False
self.thread = threading.Thread(target=self.loop, name='healthcheck')
self.thread.daemon = True
def start(self, *args, **kwargs):
super(HealthcheckModule, self).start(*args, **kwargs)
self._stop = False
self.thread.start()
def stop(self, *args, **kwargs):
super(HealthcheckModule, self).stop(*args, **kwargs)
self._stop = True
# In any normal circumstances, the healthcheck thread should finish
# in about a second or less. We'll give it a little extra buffer.
self.thread.join(2.0)
if self.thread.is_alive():
_log.warning("Healthcheck thread alive 2s after shutdown request.")
def loop(self):
_log.info("Healthcheck running: delay=%d timeout=%d",
self.delay, self.timeout)
while not self._stop:
elapsed = time.clock() - self.last_activity
if elapsed > self.timeout:
_log.fatal("No incoming in last %d seconds - exiting.", elapsed)
logging.shutdown()
sys.exit(1)
elif elapsed > self.delay:
self.controller.client.send("PING")
time.sleep(1)
@Module.handle("LINE")
def activity(self, line):
self.last_activity = time.clock()
module = HealthcheckModule
# vim: set ts=4 sts=4 sw=4 et:
| mit | Python |
|
a533cf1eeea7f0126df86b84f54208154685ab32 | Implement color brightness and contrast ratio calculations in preparation for improving label handling of text colors | cruor99/KivyMD | kivymd/theming_dynamic_text.py | kivymd/theming_dynamic_text.py | # -*- coding: utf-8 -*-
# Two implementations. The first is based on color brightness obtained from:-
# https://www.w3.org/TR/AERT#color-contrast
# The second is based on relative luminance calculation for sRGB obtained from:-
# https://www.w3.org/TR/2008/REC-WCAG20-20081211/#relativeluminancedef
# and contrast ratio calculation obtained from:-
# https://www.w3.org/TR/2008/REC-WCAG20-20081211/#contrast-ratiodef
#
# Preliminary testing suggests color brightness more closely matches the
# Material Design spec suggested text colors, but the alternative implementation
# is both newer and the current 'correct' recommendation, so is included here
# as an option.
''' Implementation of color brightness method '''
def _color_brightness(c):
brightness = c[0] * 299 + c[1] * 587 + c[2] * 114
brightness = brightness
return brightness
def _black_or_white_by_color_brightness(color):
if _color_brightness(color) >= 500:
return 'black'
else:
return 'white'
''' Implementation of contrast ratio and relative luminance method '''
def _normalized_channel(c):
if c <= 0.03928:
return c/12.92
else:
return ((c + 0.055) / 1.055) ** 2.4
def _luminance(color):
rg = _normalized_channel(color[0])
gg = _normalized_channel(color[1])
bg = _normalized_channel(color[2])
return 0.2126*rg + 0.7152*gg + 0.0722*bg
def _black_or_white_by_contrast_ratio(color):
l_color = _luminance(color)
l_black = 0.0
l_white = 1.0
b_contrast = (l_color + 0.05) / (l_black + 0.05)
w_contrast = (l_white + 0.05) / (l_color + 0.05)
return 'white' if w_contrast >= b_contrast else 'black'
def get_contrast_text_color(color, use_color_brightness=True):
if use_color_brightness:
c = _black_or_white_by_color_brightness(color)
else:
c = _black_or_white_by_contrast_ratio(color)
if c == 'white':
return (1, 1, 1, 1)
else:
return (0, 0, 0, 1)
if __name__ == '__main__':
from kivy.utils import get_color_from_hex
from kivymd.color_definitions import colors, text_colors
for c in ['Red', 'Pink', 'Purple', 'DeepPurple', 'Indigo', 'Blue',
'LightBlue', 'Cyan', 'Teal', 'Green', 'LightGreen', 'Lime',
'Yellow', 'Amber', 'Orange', 'DeepOrange', 'Brown', 'Grey',
'BlueGrey']:
print("For the {} color palette:".format(c))
for h in ['50', '100', '200', '300', '400', '500', '600', '700',
'800', '900', 'A100', 'A200', 'A400', 'A700']:
hex = colors[c].get(h)
if hex:
col = get_color_from_hex(hex)
col_bri = get_contrast_text_color(col)
con_rat = get_contrast_text_color(col, use_color_brightness=False)
text_color = text_colors[c][h]
print(" The {} hue gives {} using color brightness, {} using contrast ratio, and {} from the MD spec"
.format(h, col_bri, con_rat, text_color))
| mit | Python |
|
cad7db237c68139d3f4f7dd691205b207edb0b79 | Refactor plugin api code into its own module | jjohnson42/confluent,michaelfardu/thinkconfluent,jjohnson42/confluent,jufm/confluent,chenglch/confluent,michaelfardu/thinkconfluent,xcat2/confluent,jufm/confluent,jjohnson42/confluent,chenglch/confluent,michaelfardu/thinkconfluent,xcat2/confluent,jufm/confluent,jjohnson42/confluent,whowutwut/confluent,chenglch/confluent,jjohnson42/confluent,michaelfardu/thinkconfluent,whowutwut/confluent,chenglch/confluent,michaelfardu/thinkconfluent,chenglch/confluent,xcat2/confluent,whowutwut/confluent,xcat2/confluent,jufm/confluent,whowutwut/confluent,xcat2/confluent,jufm/confluent | confluent/pluginapi.py | confluent/pluginapi.py | # concept here that mapping from the resource tree and arguments go to
# specific python class signatures. The intent is to require
# plugin authors to come here if they *really* think they need new 'commands'
# and hopefully curtail deviation by each plugin author
# have to specify a standard place for cfg selection of *which* plugin
# as well a standard to map api requests to python funcitons
# e.g. <nodeelement>/power/state maps to some plugin HardwareManager.get_power/set_power
# selected by hardwaremanagement.method
# plugins can advertise a set of names if there is a desire for readable things
# exceptions to handle os images
# endpoints point to a class... usually, the class should have:
# -create
# -retrieve
# -update
# -delete
# functions. Console is special and just get's passed through
# see API.txt
import os
import sys
pluginmap = {}
def load_plugins():
# To know our plugins directory, we get the parent path of 'bin'
path=os.path.dirname(os.path.realpath(__file__))
plugindir = os.path.realpath(os.path.join(path,'..','plugins'))
sys.path.append(plugindir)
plugins = set()
#two passes, to avoid adding both py and pyc files
for plugin in os.listdir(plugindir):
plugin = os.path.splitext(plugin)[0]
plugins.add(plugin)
for plugin in plugins:
if plugin.startswith('.'):
continue
tmpmod = __import__(plugin)
if 'plugin_names' in tmpmod.__dict__:
for name in tmpmod.plugin_names:
pluginmap[name] = tmpmod
else:
pluginmap[plugin] = tmpmod
nodetree = {
'/': ['power/', 'boot/', 'console/', 'attributes/'],
'/power/': ['state'],
'/boot/': ['device'],
'/console/': ['session', 'logging'],
}
# _ elements are for internal use (e.g. special console scheme)
nodeelements = {
'_console/session': {
'pluginattrs': ['console.method' ,'hardwaremanagement.method'],
},
'console/session': {
'pluginattrs': ['console.method' ,'hardwaremanagement.method'],
},
'power/state': {
'pluginattrs': ['hardwaremanagement.method'],
},
'boot/device': {
'pluginattrs': ['hardwaremanagement.method'],
}
}
def handle_path(path, operation, configmanager):
'''Given a full path request, return an object.
The plugins should generally return some sort of iterator.
An exception is made for console/session, which should return
a class with read(), write(bytes), and close()
'''
if (path.startswith("/node/") or path.startswith("/system/") or
# single node requests
path.startswith("/vm/")):
nodeidx = path.find("/",1) + 1
node = path[nodeidx:]
node, _, element = path.partition("/")
if element not in nodeelements:
raise Exception("Invalid element requested")
plugroute = nodeelements[element]
if 'pluginattrs' in plugroute:
nodeattr = configmanager.get_node_attributes(
[node], plugroute['pluginattrs'])
for attrname in plugroute['pluginattrs']:
if attrname in nodeattr:
return pluginmap[nodeattr[attrname]].__dict__[operation](
node=(node), operation=operation,
configmanager=configmanager)
| apache-2.0 | Python |
|
2cef2ff2eb9c415f374de0a2f8a7174fddca8836 | add missing session.py file | blablacar/exabgp,lochiiconnectivity/exabgp,earies/exabgp,lochiiconnectivity/exabgp,PowerDNS/exabgp,dneiter/exabgp,dneiter/exabgp,benagricola/exabgp,PowerDNS/exabgp,dneiter/exabgp,fugitifduck/exabgp,fugitifduck/exabgp,blablacar/exabgp,PowerDNS/exabgp,earies/exabgp,benagricola/exabgp,earies/exabgp,lochiiconnectivity/exabgp,benagricola/exabgp,blablacar/exabgp,fugitifduck/exabgp | lib/exabgp/configuration/bgp/session.py | lib/exabgp/configuration/bgp/session.py | # encoding: utf-8
"""
session.py
Created by Thomas Mangin on 2014-06-22.
Copyright (c) 2014-2014 Exa Networks. All rights reserved.
"""
from exabgp.configuration.engine.registry import Raised
from exabgp.configuration.engine.section import Section
from exabgp.configuration.engine.parser import asn
from exabgp.configuration.engine.parser import ip
from exabgp.configuration.engine.parser import holdtime
from exabgp.configuration.bgp.capability import syntax_capability
from exabgp.configuration.bgp.capability import SectionCapability
# ============================================================== syntax_session
syntax_session = """\
session <name> {
%s
}
""" % (
'\n\t'.join((_.replace(' <name>','') for _ in syntax_capability.split('\n')))
)
# =============================================================== RaisedSession
class RaisedSession (Raised):
syntax = syntax_session
# ============================================================== SectionSession
#
class SectionSession (Section):
syntax = syntax_session
name = 'session'
def exit (self,tokeniser):
capability = self.get_unamed(tokeniser,'capability')
if capability:
if 'capability' in self.content:
raise RaisedSession(tokeniser,'can not have unamed and named capability in a session')
self.content['capability'] = capability
if 'capability' not in self.content:
raise RaisedSession(tokeniser,'section is missing a capability section')
if 'router-id' not in self.content:
# 0.0.0.0 is now a invlid router-id so it will be replaced by the bind ip
self.content['router-id'] = ip(lambda:'0.0.0.0')
if 'hold-time' not in self.content:
self.content['hold-time'] = holdtime(lambda:'180')
if 'asn-local' not in self.content:
raise RaisedSession(tokeniser,'section is missing a local asn')
if 'asn-peer' not in self.content:
raise RaisedSession(tokeniser,'section is missing a peer asn')
def router_id (self,tokeniser):
try:
self.content['router-id'] = ip(tokeniser)
except ValueError,e:
raise RaisedSession(tokeniser,'could not parse router-id, %s' % str(e))
def hold_time (self,tokeniser):
try:
self.content['hold-time'] = holdtime(tokeniser)
except ValueError,e:
raise RaisedSession(tokeniser,'could not parse hold-time, %s' % str(e))
def local_asn (self,tokeniser):
try:
self.content['asn-local'] = asn(tokeniser)
except ValueError,e:
raise RaisedSession(tokeniser,'could not parse local asn, %s' % str(e))
def peer_asn (self,tokeniser):
try:
self.content['asn-peer'] = asn(tokeniser)
except ValueError,e:
raise RaisedSession(tokeniser,'could not parse peer asn, %s' % str(e))
def capability (self,tokeniser):
section = self.get_section(SectionCapability.name,tokeniser)
if section:
self.content['capability'] = section
else:
return False
@classmethod
def register (cls,registry,location):
registry.register_class(cls)
registry.register(SectionCapability,location+['capability'])
registry.register_hook(cls,'action',location+['capability'],'capability')
registry.register_hook(cls,'enter',location,'enter')
registry.register_hook(cls,'exit',location,'exit')
registry.register_hook(cls,'action',location+['router-id'],'router_id')
registry.register_hook(cls,'action',location+['hold-time'],'hold_time')
asn = location + ['asn']
registry.register_hook(cls,'enter',asn,'enter_unamed_section')
registry.register_hook(cls,'action',asn+['local'],'local_asn')
registry.register_hook(cls,'action',asn+['peer'],'peer_asn')
registry.register_hook(cls,'exit', asn,'exit_unamed_section')
| bsd-3-clause | Python |
|
5c8caad82cd43044152171973e2386d655a1fa3a | Add tests for the way "between" works if no limits are set up | FrankSalad/django-recurrence,django-recurrence/django-recurrence,linux2400/django-recurrence,Nikola-K/django-recurrence,django-recurrence/django-recurrence,Nikola-K/django-recurrence,linux2400/django-recurrence,FrankSalad/django-recurrence | tests/test_recurrences_without_limits.py | tests/test_recurrences_without_limits.py | from datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
RULE = Rule(
recurrence.DAILY
)
PATTERN = Recurrence(
rrules=[RULE]
)
def test_between_without_dtend_and_dtstart():
occurrences = [
instance for instance in
PATTERN.between(
datetime(2014, 1, 1, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0)
)
]
# We get back nothing, since dtstart and dtend will have defaulted
# to the current time, and January 2014 is in the past.
assert occurrences == []
def test_between_with_dtend_and_dtstart_dtend_lower_than_end():
occurrences = [
instance for instance in
PATTERN.between(
datetime(2014, 1, 1, 0, 0, 0),
datetime(2014, 1, 6, 0, 0, 0),
dtstart=datetime(2014, 1, 1, 0, 0, 0),
dtend=datetime(2014, 1, 4, 0, 0, 0),
)
]
assert occurrences == [
datetime(2014, 1, 2, 0, 0, 0),
datetime(2014, 1, 3, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
]
def test_between_with_dtend_and_dtstart_dtend_higher_than_end():
occurrences = [
instance for instance in
PATTERN.between(
datetime(2014, 1, 1, 0, 0, 0),
datetime(2014, 1, 6, 0, 0, 0),
dtstart=datetime(2014, 1, 1, 0, 0, 0),
dtend=datetime(2014, 1, 8, 0, 0, 0),
)
]
assert occurrences == [
datetime(2014, 1, 2, 0, 0, 0),
datetime(2014, 1, 3, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
datetime(2014, 1, 5, 0, 0, 0),
]
def test_between_with_dtend_and_dtstart_limits_equal_exclusive():
occurrences = [
instance for instance in
PATTERN.between(
datetime(2014, 1, 1, 0, 0, 0),
datetime(2014, 1, 6, 0, 0, 0),
dtstart=datetime(2014, 1, 2, 0, 0, 0),
dtend=datetime(2014, 1, 6, 0, 0, 0),
)
]
assert occurrences == [
datetime(2014, 1, 2, 0, 0, 0),
datetime(2014, 1, 3, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
datetime(2014, 1, 5, 0, 0, 0),
]
def test_between_with_dtend_and_dtstart_limits_equal_inclusive():
occurrences = [
instance for instance in
PATTERN.between(
datetime(2014, 1, 1, 0, 0, 0),
datetime(2014, 1, 6, 0, 0, 0),
dtstart=datetime(2014, 1, 1, 0, 0, 0),
dtend=datetime(2014, 1, 6, 0, 0, 0),
inc=True
)
]
assert occurrences == [
datetime(2014, 1, 1, 0, 0, 0),
datetime(2014, 1, 2, 0, 0, 0),
datetime(2014, 1, 3, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
datetime(2014, 1, 5, 0, 0, 0),
datetime(2014, 1, 6, 0, 0, 0),
]
def test_between_with_dtend_and_dtstart_dtstart_lower_than_start():
occurrences = [
instance for instance in
PATTERN.between(
datetime(2014, 1, 2, 0, 0, 0),
datetime(2014, 1, 6, 0, 0, 0),
dtstart=datetime(2014, 1, 1, 0, 0, 0),
dtend=datetime(2014, 1, 6, 0, 0, 0),
)
]
assert occurrences == [
datetime(2014, 1, 3, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
datetime(2014, 1, 5, 0, 0, 0),
]
def test_between_with_dtend_and_dtstart_dtstart_higher_than_start():
occurrences = [
instance for instance in
PATTERN.between(
datetime(2014, 1, 1, 0, 0, 0),
datetime(2014, 1, 6, 0, 0, 0),
dtstart=datetime(2014, 1, 2, 0, 0, 0),
dtend=datetime(2014, 1, 6, 0, 0, 0),
)
]
assert occurrences == [
datetime(2014, 1, 2, 0, 0, 0),
datetime(2014, 1, 3, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
datetime(2014, 1, 5, 0, 0, 0),
]
| bsd-3-clause | Python |
|
a9ca7f2f22551256213ecd32047022048c72db5c | Add Python 3 Script for Converting Image Types | Molecular-Image-Recognition/Molecular-Image-Recognition | scripts/convert_svgs.py | scripts/convert_svgs.py | import cairosvg
import os
# MUST RUN IN PYTHON 3 and pip install cairosvg
file_dir = '../data/hough_test/Test_Set_1/'
svgs = os.listdir(os.path.join(file_dir, 'SVGs'))
for svg in svgs:
name = svg.split('.svg')[0]
cairosvg.svg2png(url=os.path.join(file_dir, 'SVGs', svg),
write_to=os.path.join(file_dir, 'PNGs', '{0}.png'.format(name)), dpi=600)
# cairosvg.svg2pdf(url=os.path.join(file_dir, 'SVGs', svg),
# write_to=os.path.join(file_dir, 'PDFs', '{0}.pdf'.format(name)), dpi=600) | mit | Python |
|
392be3310efc812686c0d43f7ca884d9c730a879 | Add stop-by-time script to end simulation after a specified amount of simulated time | abanaiyan/sniper,abanaiyan/sniper,abanaiyan/sniper,abanaiyan/sniper,abanaiyan/sniper | scripts/stop-by-time.py | scripts/stop-by-time.py | # End ROI after x nanoseconds
# Usage: -s stop-by-time:1000000 # End after 1 ms of simulated time
import sim
class StopByTime:
def setup(self, args):
args = dict(enumerate((args or '').split(':')))
self.time = long(args.get(0, 1e6))
self.done = False
sim.util.Every(self.time * sim.util.Time.NS, self.periodic, roi_only = True)
def periodic(self, time, time_delta):
if self.done:
return
elif time >= self.time:
print '[STOPBYTIME] Ending ROI after %.0f nanoseconds' % (time / 1e6)
sim.control.set_roi(False)
self.done = True
sim.control.abort()
sim.util.register(StopByTime())
| mit | Python |
|
30a0b17d028f279a9877150ac4eb60b1ce135fa2 | Add check script for MultiplyHueAndSaturation | aleju/imgaug,aleju/imgaug,aleju/ImageAugmenter | checks/check_multiply_hue_and_saturation.py | checks/check_multiply_hue_and_saturation.py | from __future__ import print_function, division
import numpy as np
import imgaug as ia
from imgaug import augmenters as iaa
def main():
image = ia.quokka_square((128, 128))
images_aug = []
for mul in np.linspace(0.0, 2.0, 10):
aug = iaa.MultiplyHueAndSaturation(mul)
image_aug = aug.augment_image(image)
images_aug.append(image_aug)
for mul_hue in np.linspace(0.0, 5.0, 10):
aug = iaa.MultiplyHueAndSaturation(mul_hue=mul_hue)
image_aug = aug.augment_image(image)
images_aug.append(image_aug)
for mul_saturation in np.linspace(0.0, 5.0, 10):
aug = iaa.MultiplyHueAndSaturation(mul_saturation=mul_saturation)
image_aug = aug.augment_image(image)
images_aug.append(image_aug)
ia.imshow(ia.draw_grid(images_aug, rows=3))
images_aug = []
images_aug.extend(iaa.MultiplyHue().augment_images([image] * 10))
images_aug.extend(iaa.MultiplySaturation().augment_images([image] * 10))
ia.imshow(ia.draw_grid(images_aug, rows=2))
if __name__ == "__main__":
main()
| mit | Python |
|
45ec6d5451062c8e3310332c0246eebb231233ef | Add script | erinspace/osf.io,TomBaxter/osf.io,cwisecarver/osf.io,cslzchen/osf.io,caseyrollins/osf.io,pattisdr/osf.io,mattclark/osf.io,adlius/osf.io,baylee-d/osf.io,icereval/osf.io,chennan47/osf.io,icereval/osf.io,Johnetordoff/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,laurenrevere/osf.io,baylee-d/osf.io,cwisecarver/osf.io,caseyrollins/osf.io,adlius/osf.io,hmoco/osf.io,hmoco/osf.io,cwisecarver/osf.io,Nesiehr/osf.io,Johnetordoff/osf.io,Nesiehr/osf.io,mfraezz/osf.io,Johnetordoff/osf.io,mattclark/osf.io,leb2dg/osf.io,mfraezz/osf.io,erinspace/osf.io,CenterForOpenScience/osf.io,crcresearch/osf.io,sloria/osf.io,Nesiehr/osf.io,felliott/osf.io,mfraezz/osf.io,aaxelb/osf.io,caneruguz/osf.io,leb2dg/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,TomBaxter/osf.io,hmoco/osf.io,sloria/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,binoculars/osf.io,binoculars/osf.io,caneruguz/osf.io,erinspace/osf.io,felliott/osf.io,chennan47/osf.io,Nesiehr/osf.io,hmoco/osf.io,Johnetordoff/osf.io,sloria/osf.io,laurenrevere/osf.io,aaxelb/osf.io,caseyrollins/osf.io,cslzchen/osf.io,cwisecarver/osf.io,pattisdr/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,adlius/osf.io,TomBaxter/osf.io,pattisdr/osf.io,adlius/osf.io,chrisseto/osf.io,saradbowman/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,crcresearch/osf.io,saradbowman/osf.io,chrisseto/osf.io,felliott/osf.io,leb2dg/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,chrisseto/osf.io,laurenrevere/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,binoculars/osf.io,leb2dg/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,icereval/osf.io,baylee-d/osf.io,chennan47/osf.io,mattclark/osf.io,HalcyonChimera/osf.io,caneruguz/osf.io,chrisseto/osf.io,caneruguz/osf.io,crcresearch/osf.io | scripts/migrate_duplicate_external_accounts.py | scripts/migrate_duplicate_external_accounts.py | from __future__ import absolute_import
import logging
import sys
from dropbox.rest import ErrorResponse
from dropbox.client import DropboxClient
from framework.mongo import database as db
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.addons.github.api import GitHubClient
from website.addons.github.exceptions import GitHubError
from website.oauth.models import ExternalAccount
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def creds_are_valid(ea_id):
ea = ExternalAccount.load(ea_id)
if ea.provider == 'github':
try:
GitHubClient(external_account=ea).user()
except (GitHubError, IndexError):
logger.info('Invalid creds: {}'.format(ea_id))
return False
elif ea.provider == 'dropbox':
try:
DropboxClient(ea.oauth_key).account_info()
except (ValueError, IndexError, ErrorResponse):
logger.info('Invalid creds: {}'.format(ea_id))
return False
else:
raise Exception('Unexpected provider: {}'.format(ea.provider))
logger.info('Valid creds: {}'.format(ea_id))
return True
def swap_references_and_rm(to_keep, to_swap, provider):
logger.info('Swapping {} references to {} with {}'.format(provider, to_swap, to_keep))
db['{}nodesettings'.format(provider)].find_and_modify(
{'external_account': to_swap},
{'$set': {
'external_account': to_keep
}}
)
us_map = {us['_id']: us['external_accounts'] for us in db['{}usersettings'.format(provider)].find({'external_accounts': to_swap})}
for usid in us_map.keys():
ealist = us_map[usid]
ealist.remove(to_swap)
ealist.append(to_keep)
db['{}usersettings'.format(provider)].find_and_modify(
{'_id': usid},
{'$set': {
'external_accounts': ealist
}}
)
u_map = {u['_id']: u['external_accounts'] for u in db['user'].find({'external_accounts': to_swap})}
for uid in u_map.keys():
ealist = u_map[uid]
ealist.remove(to_swap)
ealist.append(to_keep)
db['user'].find_and_modify(
{'_id': uid},
{'$set': {
'external_accounts': ealist
}}
)
logger.info('Removing EA {}'.format(to_swap))
db.externalaccount.remove({'_id': to_swap})
def migrate():
possible_collisions = db.externalaccount.aggregate([{'$match': {'provider_id': {'$type': 16}}}])['result']
pc_map = {'dropbox': [], 'github': [], 'figshare': []}
for pc in possible_collisions:
pc_map[pc['provider']].append(pc)
collisions = []
for provider in pc_map:
for pc in pc_map[provider]:
if db.externalaccount.find({'provider': provider, 'provider_id': str(pc['provider_id'])}).count():
collisions.append([provider, pc['_id'], db.externalaccount.find({'provider': provider, 'provider_id': str(pc['provider_id'])})[0]['_id']])
ns_map = {'github': db.githubnodesettings, 'dropbox': db.dropboxnodesettings}
eas_no_ns = []
problem_ids = []
for cols in collisions:
provider = cols[0]
int_ea = cols[1]
str_ea = cols[2]
if ns_map[provider].find({'external_account': int_ea}).count() == 0:
eas_no_ns.append(int_ea)
swap_references_and_rm(str_ea, int_ea, provider)
elif ns_map[provider].find({'external_account': str_ea}).count() == 0:
eas_no_ns.append(str_ea)
swap_references_and_rm(int_ea, str_ea, provider)
else:
problem_ids.append([int_ea, str_ea])
if creds_are_valid(int_ea) and not creds_are_valid(str_ea):
swap_references_and_rm(int_ea, str_ea, provider)
else:
swap_references_and_rm(str_ea, int_ea, provider)
def main():
dry = '--dry' in sys.argv
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
with TokuTransaction():
migrate()
if dry:
raise RuntimeError('Dry run -- Transaction rolled back')
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
fb18da6685948de4e66c310af8b850739034c1e9 | Add move.py | AlexMathew/directory-move | move.py | move.py | import Tkinter as tk
import os
class Application(tk.Frame):
""" The Application class manages the GUI.
The create_widgets() method creates the widgets to be inserted in the frame.
A button triggers an event that calls the perform_move() method.
This function calls the recursive method move_directories(). """
def __init__(self, master):
tk.Frame.__init__(self, master)
self.grid()
self.create_widgets()
def create_widgets(self):
self.intro = tk.Label(self, text = "Do you want to move all files in one folder to another folder ?")
self.intro.grid(row = 0, column = 0, columnspan = 3, sticky = tk.W)
self.src_label = tk.Label(self, text = "Enter the complete path of the source (with single backslash b/w directories) ")
self.src_label.grid(row = 2, column = 0, columnspan = 3, sticky = tk.W)
self.src_entry = tk.Entry(self, width = 50)
self.src_entry.grid(row = 3, column = 1, sticky = tk.W)
self.dest_label = tk.Label(self, text = "Enter the complete path of the (already existing, but empty) destination (with single backslash b/w directories) ")
self.dest_label.grid(row = 5, column = 0, columnspan = 3, sticky = tk.W)
self.dest_entry = tk.Entry(self, width = 50)
self.dest_entry.grid(row = 6, column = 1, sticky = tk.W)
self.move_button = tk.Button(self, text = "MOVE IT !", command = self.perform_move)
self.move_button.grid(row = 8, column = 1, sticky = tk.W)
self.text = tk.Text(self, width = 50, height = 10, wrap = tk.WORD)
self.text.grid(row = 12, column = 0, columnspan = 2, sticky = tk.W)
def perform_move(self):
msg = ""
try:
src = self.src_entry.get()
dest = self.dest_entry.get()
walker = os.walk(src)
root, dirs, files = next(walker)
for filename in files:
old = os.path.join(root, filename)
new = os.path.join(dest, filename)
os.rename(old, new)
for directory in dirs:
self.move_directories(src, directory, dest)
msg = "Done !!"
except WindowsError:
msg = "Give an existing empty directory for the destination."
except StopIteration:
msg = "Give a valid directory for the source."
finally:
self.text.delete(0.0, tk.END)
self.text.insert(0.0, msg)
def move_directories(self, src, directory, dest):
src = os.path.join(src, directory)
dest = os.path.join(dest, directory)
os.mkdir(dest)
walker = os.walk(src)
root, dirs, files = next(walker)
for filename in files:
old = os.path.join(src, filename)
new = os.path.join(dest, filename)
os.rename(old, new)
for directory in dirs:
self.move_directories(src, directory, dest)
os.rmdir(src)
# To create the frame and create the Application class object
def main():
root = tk.Tk()
root.title("Packers & Movers")
root.geometry("700x500")
app = Application(root)
root.mainloop()
if __name__ == '__main__':
main() | mit | Python |
|
769036ffd7a21477a9133c58b352711d85c7a7a0 | add regression test for monkey patching of Queue | urllib3/urllib3,Lukasa/urllib3,Disassem/urllib3,sigmavirus24/urllib3,Lukasa/urllib3,sigmavirus24/urllib3,Disassem/urllib3,urllib3/urllib3 | test/test_queue_monkeypatch.py | test/test_queue_monkeypatch.py | from __future__ import absolute_import
import unittest
import urllib3
from urllib3.exceptions import EmptyPoolError
import Queue
class BadError(Exception):
"""
This should not be raised.
"""
pass
Queue.Empty = BadError
class TestConnectionPool(unittest.TestCase):
"""
"""
def test_queue_monkeypatching(self):
http = urllib3.HTTPConnectionPool(host="localhost", block=True)
first_conn = http._get_conn(timeout=1)
with self.assertRaises(EmptyPoolError):
second_conn = http._get_conn(timeout=1)
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
338559737e34ca395cec895ac8e822fc3147c7aa | Add basic code | VictorLoren/hanging-fabric-calculator | tule.py | tule.py | def calcLength(x,y,z=0):
return (x**2 + y**2 + z**2)**0.5
#lengths are in feet
L = 92; W=68; H=22; MidXY=(W/2,L/2)
def feetToYards(inFeet):
return inFeet/3.0
def yardsToFeet(inYards):
return inYards * 3.0
#widthOfStrand is how wide the tule piece (in feet)
def findTotal(widthOfStrand,z=0,printTotal=False):
'''
Find total in yards.
Input:
widthOfStrand (number of feet, width of tule)
z=0 (how many feet it will "drape" down linearly)
printTotal=False (Friendly print)
Output:
tuple ->
The length needed (in yards),
list of strand lengths (in yards)
'''
#Length of each break points
strandLengths = []
#Total length
total = 0
i=0
#find along width
alongWidth = 0
while(alongWidth <= W):
newX,newY = (MidXY[0] - alongWidth,MidXY[1]-L)
total += calcLength(newX,newY,z)
alongWidth += widthOfStrand
# print "width on %d: %f %f" %(i,alongWidth,total); i+=1
#find along length, around gym
alongLength = 0; i=0
while(alongLength <= L):
newX,newY = (MidXY[0] - W,MidXY[1]- alongLength)
# Length of strand needed (in yards)
strandLength = calcLength(newX,newY,z)
# Add Break point length
strandLengths.append(strandLength)
# Total length
total += strandLength
alongLength += widthOfStrand
# print "length on %d: %f %f" %(i,alongLength,total); i+=1
#convert to yards
total = feetToYards(total)
strandLengths = map(feetToYards,strandLengths)
#all the strand lengths
strandLengths *=2
if printTotal:
print '\nTotal Length For Room: %.2f yards' %(2*total)
# Return total length in yards and a list of strand lengths needed
return (2*total , strandLengths)
def totalCost(costPerYard,widthOfStrandInFeet,drapingInFeet,printTotal=False):
total = findTotal(widthOfStrandInFeet,drapingInFeet,printTotal)
cost = total * costPerYard
print "Total length %.2f yards for $%.2f (@ $%.2f per yard)" %(total,cost,costPerYard)
return cost
print "Imported 'tule.py'"
| mit | Python |
|
242479ace03928b20dc86806f7592ec1148b615b | Add integration test for DraftService. | pixelated-project/pixelated-user-agent,torquemad/pixelated-user-agent,torquemad/pixelated-user-agent,PuZZleDucK/pixelated-user-agent,PuZZleDucK/pixelated-user-agent,kaeff/pixelated-user-agent,SamuelToh/pixelated-user-agent,pixelated-project/pixelated-user-agent,PuZZleDucK/pixelated-user-agent,torquemad/pixelated-user-agent,torquemad/pixelated-user-agent,torquemad/pixelated-user-agent,phazel/pixelated-user-agent,rdoh/pixelated-user-agent,phazel/pixelated-user-agent,SamuelToh/pixelated-user-agent,pixelated/pixelated-user-agent,sw00/pixelated-user-agent,sw00/pixelated-user-agent,pixelated-project/pixelated-user-agent,rdoh/pixelated-user-agent,rdoh/pixelated-user-agent,kaeff/pixelated-user-agent,kaeff/pixelated-user-agent,phazel/pixelated-user-agent,phazel/pixelated-user-agent,pixelated/pixelated-user-agent,pixelated/pixelated-user-agent,SamuelToh/pixelated-user-agent,SamuelToh/pixelated-user-agent,PuZZleDucK/pixelated-user-agent,rdoh/pixelated-user-agent,pixelated/pixelated-user-agent,pixelated-project/pixelated-user-agent,PuZZleDucK/pixelated-user-agent,pixelated/pixelated-user-agent,pixelated-project/pixelated-user-agent,sw00/pixelated-user-agent,sw00/pixelated-user-agent,SamuelToh/pixelated-user-agent,phazel/pixelated-user-agent,rdoh/pixelated-user-agent,kaeff/pixelated-user-agent,kaeff/pixelated-user-agent,sw00/pixelated-user-agent | service/test/integration/test_draft_service.py | service/test/integration/test_draft_service.py | #
# Copyright (c) 2014 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
from twisted.internet import defer
from test.support.integration import SoledadTestBase, MailBuilder
class DraftServiceTest(SoledadTestBase):
@defer.inlineCallbacks
def test_store_and_load_draft(self):
input_mail = MailBuilder().with_body('some test text').build_input_mail()
stored_draft = yield self.draft_service.create_draft(input_mail)
draft = yield self.mail_store.get_mail(stored_draft.ident, include_body=True)
self.assertEqual('some test text', draft.body)
| agpl-3.0 | Python |
|
c58be8c77fdad5ec8b6c9da9ba6cfc45ae0f6d07 | fix typo in fuzz_addresses.py | amac0/google-location-tools | fuzz-addresses.py | fuzz-addresses.py | import sys
import csv
from dateutil.parser import parse
from datetime import datetime
#Take a csv with datetime stamps and addresses split across remaing cells and output
#a fuzzed csv that contains two columns. A datetime stamp of the first day of the year
#in which the location occured, and the country.
#call it with python fuzz_addresses.py inputfile.csv outputfile.csv
#open the CSV output file
inputfile = open((sys.argv[1]), "r")
reader = csv.reader(inputfile)
#open the CSV output file
outputfile = open((sys.argv[2]), "wb")
writer = csv.writer(outputfile)
for row in reader:
#for the date, which is the first column, strip it down to the year,
#and make everything January 1 if you don't care about fuzzing the date,
#change the line below to when=row[0]
when=parse(str( parse(row[0]).year ) +'-1-1')
#for the address, get rid of everything but the country which is the last column that has an entry
country=''
while country=='':
country = row.pop(-1)
where=country
#write the new row
writer.writerow([when, where])
#close the files
inputfile.close()
outputfile.close()
| apache-2.0 | Python |
|
0ff705b6bbe2d2844d6b947ca2aa8fc9cc9ead66 | Create PedidoDeletar.py | AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb | backend/Models/Grau/PedidoDeletar.py | backend/Models/Grau/PedidoDeletar.py | from Framework.Pedido import Pedido
from Framework.ErroNoHTTP import ErroNoHTTP
class PedidoDeletar(Pedido):
def __init__(self,variaveis_do_ambiente):
super(PedidoDeletar, self).__init__(variaveis_do_ambiente)
try:
self.id = self.corpo['id']
except:
raise ErroNoHTTP(400)
def getId(self):
return self.id
| mit | Python |
|
d85442d5961602ae91c385a65e9503c409316b3f | Scrub stale data from redis | mozilla/tc-coalesce,dividehex/tc-coalesce | bin/scrub_stale_lists.py | bin/scrub_stale_lists.py | #!/usr/bin/env python
import sys
import os
import time
import redis
import requests
import logging
from urlparse import urlparse
from datetime import timedelta
def main(rds):
pf = "coalesce.v1."
tasks_removed = 0
lists_removed = 0
list_keys = rds.smembers(pf + "list_keys")
for key in list_keys:
logging.debug("Inspecting list: " + pf + key)
coalesce_list = rds.lrange(pf + "lists." + key, start=0, end=-1)
for taskId in coalesce_list:
logging.debug(" - inspecting task: " + taskId)
if not is_pending(taskId):
logging.debug("Removing stale task: " + taskId)
rds.lrem(pf + 'lists.' + key, taskId, num=0)
tasks_removed += 1
if not rds.llen(pf + "lists." + key):
logging.debug("Removing stale list key: " + key)
rds.srem(pf + "list_keys", key)
lists_removed += 1
return tasks_removed, lists_removed
def is_pending(taskId):
url = 'https://queue.taskcluster.net/v1/task/%s/status' % (taskId)
try:
r = requests.get(url, timeout=3)
if r.status_code == 404:
logging.debug("Queue service returned 404 for task: " + taskId)
return False
if not r.json()['status']['state'] == 'pending':
return False
except:
logging.debug("Failed to get status")
return True
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
try:
redis_url = urlparse(os.environ['REDIS_URL'])
except KeyError:
logging.exception("Missing REDIS_URL env variable")
sys.exit(1)
rds = redis.Redis(host=redis_url.hostname,
port=redis_url.port,
password=redis_url.password)
try:
start = time.time()
logging.info("Starting scrub task")
tasks_removed, lists_removed = main(rds)
elapsed = time.time() - start
logging.info("Completed scrub task in %s" % (str(timedelta(seconds=elapsed))))
logging.info("Removed %s lists and %s tasks" % (tasks_removed, lists_removed))
except Exception:
logging.exception("Fatal error in main loop")
| mpl-2.0 | Python |
|
b2b81ac5222a5be7a310c8ef7774f9e2887b412e | optimise lags implemented | andrew0harney/Semantic-encoding-model | optimiseLDA.py | optimiseLDA.py | import cPickle, string, numpy, getopt, sys, random, time, re, pprint
import numpy as np
import glob
from gensim import corpora, models
import pickle
import nltk
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import wordnet as wn
import logging
import matplotlib.pyplot as plt
import copy
from matplotlib.backends.backend_pdf import PdfPages
import os
#Andrew O'Harney 17/03/14
#Script to perform grid search of vocab size and number of topics on corpus
#Will save each generated gensim topic model and perplexity matrix
#Metric used for model performance is 90/10 train/test split of perplexity
#Full credit for the difficult part of this (LDA classification goes to Radim Řehůřek for
#the excellent gensim toolkit http://radimrehurek.com/gensim/)
#Turn on gensim logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
dictionaryName = #Save path for dictionary (gensim encoded)
vocabName = #Save name for complete vocab (plain text of vocab)
corpName = #Save path for corpus (gensim encoded)
documentsName= #Save path for corpus (plain text)
stopsPath = #Path to list of stop words
#######################################################
#Useless stop words to be removed (contain no topic information)
stops = [re.findall(r"[\w']+",word.lower()) for word in open(stopsPath).readlines()]
stemmer = WordNetLemmatizer()
fnames = []
#Train LDA algorithm from vocab of SUN dataset
all_obj = '/home/andy/workspace/ecog/Data/cm/sun.db/object_labels.txt'
documents = [re.findall(r"[\w']+",word) for word in open(all_obj).readlines()] #Documents/images
#Stem words
documents=[list(set([stemmer.lemmatize(word,'n') for word in document if [word] not in stops])) for document in documents]
#Get frequency counts of each word
freqCount = {}
for document in documents:
for word in document:
freqCount[word] = freqCount.get(word,0)+1
random.shuffle(documents)
print 'Creating Vocab'
dictionary = corpora.Dictionary(documents)
print 'Number of unique objects %d'%len(dictionary)
dictionary.save(fname=dictionaryName) # store the dictionary, for future reference
#
print 'Creating training corpus'
print 'Total number of images: %d'%len(documents)
pickle.dump(documents,open(documentsName,'w'))
corp = [dictionary.doc2bow(doc) for doc in documents]
corpora.MmCorpus.serialize(corpName, corp)
######################################
#Train and test
corp = list(corp)
perplexityName = #Save path for perplexity matrix
nwords = np.arange(200,900,100) #Range of vocab lengths to perform grid search
ntopics = np.arange(10,32,2) #Range of topic lengths to perform grid search
perplexity = np.zeros([len(ntopics),len(nwords)])
testSize = 0.9 #fraction of total documents to train on
resultsRoot = #Output folder paths for each of the lda classifiers
for i,num_words in enumerate(nwords):
#
#Use only num_words number of words in documents
print 'Creating training corpus'
t_dict = copy.copy(dictionary)
t_dict.filter_extremes(no_below=0.001*len(documents),no_above=1,keep_n=num_words) #Removes low occurring words (less than num_words and occurring in less than 0.1% of documents)
t_docs = filter(None,[[word for word in doc if word in t_dict.values()] for doc in documents]) #Remove any words no longer accounted for and empty documents
t_corp = [t_dict.doc2bow(doc) for doc in t_docs] #Create training corpus
#
#Training and test sets
s = int(testSize*len(t_corp))
corpTrain = t_corp[:s]
corpTest = t_corp[s:]
#
for j,num_topics in enumerate(ntopics):
name = resultsRoot+'vocab_'+str(len(t_dict))+'_topics_'+str(num_topics)
os.mkdir(name)
print '# Words: %d \t # Topics: %d'%(len(t_dict),num_topics)
#Train model
lda = models.ldamodel.LdaModel(corpus=t_corp, id2word=t_dict, num_topics=num_topics, update_every=0, chunksize=len(t_corp), passes=50,alpha='auto')
#Save data for this model
lda.save(fname=name+'/model')
t_dict.save(fname=name+'/dictionary') # store the dictionary, for future reference
pickle.dump(t_docs,open(name+'/documents','w'))
corpora.MmCorpus.serialize(name+'/corp', t_corp)
perplexity[j,i]= np.exp2(-lda.bound(corpTest) / sum(cnt for document in corpTest for _, cnt in document)) #Normalized test perplexity
#
#Show word distributions for top 100 words for each topic
pp = PdfPages(name+'/wordDistribution.pdf')
for tn in np.arange(num_topics):
plt.figure()
plt.title('Topic %d'%(tn+1))
#
ldaOut=[s.split('*') for s in lda.print_topic(tn,100).split('+')]
word_probs = [float(a) for (a,b) in ldaOut]
words = [b for (a,b) in ldaOut]
#
plt.plot(np.arange(len(ldaOut)),word_probs)
plt.xticks(np.arange(len(ldaOut)),words,rotation=45,size=2)
pp.savefig()
plt.close()
pp.close()
np.save(open(perplexityName,'w'), perplexity)
| mit | Python |
|
9870fdd4b0996254216ff85a4dc0f9706843ca50 | Add test for nested while with exc and break. | ruffy91/micropython,jmarcelino/pycom-micropython,henriknelson/micropython,puuu/micropython,neilh10/micropython,xuxiaoxin/micropython,utopiaprince/micropython,cwyark/micropython,aethaniel/micropython,tuc-osg/micropython,vitiral/micropython,orionrobots/micropython,noahchense/micropython,paul-xxx/micropython,ganshun666/micropython,tralamazza/micropython,kostyll/micropython,trezor/micropython,AriZuu/micropython,mhoffma/micropython,mgyenik/micropython,infinnovation/micropython,torwag/micropython,SungEun-Steve-Kim/test-mp,suda/micropython,deshipu/micropython,hosaka/micropython,SungEun-Steve-Kim/test-mp,ChuckM/micropython,adafruit/circuitpython,deshipu/micropython,toolmacher/micropython,matthewelse/micropython,AriZuu/micropython,SHA2017-badge/micropython-esp32,micropython/micropython-esp32,tuc-osg/micropython,xuxiaoxin/micropython,ryannathans/micropython,paul-xxx/micropython,drrk/micropython,selste/micropython,kostyll/micropython,ahotam/micropython,SHA2017-badge/micropython-esp32,feilongfl/micropython,adamkh/micropython,MrSurly/micropython,warner83/micropython,supergis/micropython,Peetz0r/micropython-esp32,alex-march/micropython,torwag/micropython,suda/micropython,tuc-osg/micropython,misterdanb/micropython,martinribelotta/micropython,ChuckM/micropython,jimkmc/micropython,chrisdearman/micropython,hosaka/micropython,danicampora/micropython,rubencabrera/micropython,alex-robbins/micropython,danicampora/micropython,mianos/micropython,orionrobots/micropython,Timmenem/micropython,puuu/micropython,ahotam/micropython,adafruit/circuitpython,turbinenreiter/micropython,martinribelotta/micropython,neilh10/micropython,alex-robbins/micropython,selste/micropython,pozetroninc/micropython,stonegithubs/micropython,ceramos/micropython,misterdanb/micropython,cwyark/micropython,tdautc19841202/micropython,MrSurly/micropython-esp32,methoxid/micropystat,stonegithubs/micropython,noahwilliamsson/micropython,swegener/micropython,martinribelotta/micropython,vitiral/micropython,lowRISC/micropython,torwag/micropython,tobbad/micropython,supergis/micropython,ceramos/micropython,Vogtinator/micropython,chrisdearman/micropython,AriZuu/micropython,jlillest/micropython,infinnovation/micropython,heisewangluo/micropython,jlillest/micropython,dinau/micropython,galenhz/micropython,vriera/micropython,cloudformdesign/micropython,matthewelse/micropython,blmorris/micropython,ryannathans/micropython,oopy/micropython,mpalomer/micropython,turbinenreiter/micropython,adafruit/circuitpython,infinnovation/micropython,jmarcelino/pycom-micropython,praemdonck/micropython,selste/micropython,dhylands/micropython,firstval/micropython,aethaniel/micropython,pozetroninc/micropython,noahwilliamsson/micropython,blmorris/micropython,danicampora/micropython,matthewelse/micropython,tobbad/micropython,xhat/micropython,ryannathans/micropython,ahotam/micropython,omtinez/micropython,hiway/micropython,emfcamp/micropython,matthewelse/micropython,TDAbboud/micropython,EcmaXp/micropython,jimkmc/micropython,methoxid/micropystat,galenhz/micropython,stonegithubs/micropython,ahotam/micropython,emfcamp/micropython,drrk/micropython,bvernoux/micropython,heisewangluo/micropython,henriknelson/micropython,neilh10/micropython,adafruit/micropython,cwyark/micropython,SHA2017-badge/micropython-esp32,hiway/micropython,misterdanb/micropython,supergis/micropython,blazewicz/micropython,tdautc19841202/micropython,mhoffma/micropython,ceramos/micropython,bvernoux/micropython,redbear/micropython,blmorris/micropython,ericsnowcurrently/micropython,EcmaXp/micropython,orionrobots/micropython,mpalomer/micropython,tobbad/micropython,swegener/micropython,cloudformdesign/micropython,tralamazza/micropython,dxxb/micropython,swegener/micropython,henriknelson/micropython,EcmaXp/micropython,dmazzella/micropython,redbear/micropython,lbattraw/micropython,ChuckM/micropython,omtinez/micropython,galenhz/micropython,pramasoul/micropython,alex-robbins/micropython,dinau/micropython,lbattraw/micropython,ChuckM/micropython,praemdonck/micropython,feilongfl/micropython,noahwilliamsson/micropython,adafruit/micropython,PappaPeppar/micropython,adamkh/micropython,rubencabrera/micropython,dxxb/micropython,cwyark/micropython,dinau/micropython,adafruit/micropython,methoxid/micropystat,PappaPeppar/micropython,blazewicz/micropython,dxxb/micropython,henriknelson/micropython,dxxb/micropython,emfcamp/micropython,heisewangluo/micropython,micropython/micropython-esp32,danicampora/micropython,mianos/micropython,mianos/micropython,blazewicz/micropython,lbattraw/micropython,Vogtinator/micropython,Timmenem/micropython,trezor/micropython,infinnovation/micropython,pramasoul/micropython,KISSMonX/micropython,ruffy91/micropython,feilongfl/micropython,redbear/micropython,mpalomer/micropython,warner83/micropython,mhoffma/micropython,EcmaXp/micropython,mianos/micropython,blazewicz/micropython,xuxiaoxin/micropython,kostyll/micropython,kerneltask/micropython,feilongfl/micropython,chrisdearman/micropython,firstval/micropython,aethaniel/micropython,praemdonck/micropython,paul-xxx/micropython,adafruit/micropython,neilh10/micropython,skybird6672/micropython,pfalcon/micropython,tobbad/micropython,mianos/micropython,kostyll/micropython,ruffy91/micropython,pramasoul/micropython,ericsnowcurrently/micropython,jmarcelino/pycom-micropython,pozetroninc/micropython,cnoviello/micropython,pozetroninc/micropython,xyb/micropython,skybird6672/micropython,mgyenik/micropython,pfalcon/micropython,ericsnowcurrently/micropython,aethaniel/micropython,puuu/micropython,chrisdearman/micropython,MrSurly/micropython,SHA2017-badge/micropython-esp32,emfcamp/micropython,pfalcon/micropython,HenrikSolver/micropython,EcmaXp/micropython,galenhz/micropython,hosaka/micropython,galenhz/micropython,vriera/micropython,blmorris/micropython,kerneltask/micropython,mpalomer/micropython,utopiaprince/micropython,kostyll/micropython,cwyark/micropython,SungEun-Steve-Kim/test-mp,stonegithubs/micropython,danicampora/micropython,mgyenik/micropython,toolmacher/micropython,xyb/micropython,tobbad/micropython,tdautc19841202/micropython,drrk/micropython,dmazzella/micropython,ernesto-g/micropython,paul-xxx/micropython,ganshun666/micropython,methoxid/micropystat,TDAbboud/micropython,micropython/micropython-esp32,ernesto-g/micropython,adafruit/circuitpython,jimkmc/micropython,TDAbboud/micropython,ericsnowcurrently/micropython,jimkmc/micropython,suda/micropython,swegener/micropython,pfalcon/micropython,henriknelson/micropython,lowRISC/micropython,cnoviello/micropython,HenrikSolver/micropython,micropython/micropython-esp32,mhoffma/micropython,bvernoux/micropython,kerneltask/micropython,cnoviello/micropython,ganshun666/micropython,SungEun-Steve-Kim/test-mp,SungEun-Steve-Kim/test-mp,neilh10/micropython,deshipu/micropython,cnoviello/micropython,orionrobots/micropython,bvernoux/micropython,MrSurly/micropython-esp32,noahchense/micropython,pramasoul/micropython,praemdonck/micropython,vriera/micropython,ganshun666/micropython,pfalcon/micropython,supergis/micropython,HenrikSolver/micropython,hosaka/micropython,ernesto-g/micropython,xyb/micropython,AriZuu/micropython,omtinez/micropython,tdautc19841202/micropython,noahchense/micropython,rubencabrera/micropython,ceramos/micropython,MrSurly/micropython-esp32,Vogtinator/micropython,ryannathans/micropython,trezor/micropython,Timmenem/micropython,dmazzella/micropython,puuu/micropython,ernesto-g/micropython,dhylands/micropython,deshipu/micropython,MrSurly/micropython,supergis/micropython,deshipu/micropython,cloudformdesign/micropython,Vogtinator/micropython,rubencabrera/micropython,toolmacher/micropython,tralamazza/micropython,ChuckM/micropython,oopy/micropython,xyb/micropython,xhat/micropython,lbattraw/micropython,pozetroninc/micropython,torwag/micropython,hosaka/micropython,puuu/micropython,suda/micropython,trezor/micropython,dinau/micropython,ryannathans/micropython,lowRISC/micropython,adafruit/circuitpython,ernesto-g/micropython,jmarcelino/pycom-micropython,suda/micropython,slzatz/micropython,aethaniel/micropython,slzatz/micropython,kerneltask/micropython,slzatz/micropython,tuc-osg/micropython,ruffy91/micropython,vitiral/micropython,alex-robbins/micropython,Timmenem/micropython,utopiaprince/micropython,trezor/micropython,bvernoux/micropython,utopiaprince/micropython,vriera/micropython,swegener/micropython,vitiral/micropython,cnoviello/micropython,omtinez/micropython,alex-march/micropython,KISSMonX/micropython,hiway/micropython,mgyenik/micropython,martinribelotta/micropython,dinau/micropython,paul-xxx/micropython,firstval/micropython,firstval/micropython,omtinez/micropython,hiway/micropython,ahotam/micropython,dhylands/micropython,turbinenreiter/micropython,misterdanb/micropython,misterdanb/micropython,emfcamp/micropython,adamkh/micropython,skybird6672/micropython,utopiaprince/micropython,PappaPeppar/micropython,ganshun666/micropython,SHA2017-badge/micropython-esp32,Timmenem/micropython,MrSurly/micropython-esp32,Peetz0r/micropython-esp32,MrSurly/micropython,adafruit/circuitpython,drrk/micropython,PappaPeppar/micropython,ruffy91/micropython,dhylands/micropython,oopy/micropython,matthewelse/micropython,orionrobots/micropython,selste/micropython,Peetz0r/micropython-esp32,alex-march/micropython,adafruit/micropython,xuxiaoxin/micropython,tuc-osg/micropython,heisewangluo/micropython,Vogtinator/micropython,MrSurly/micropython-esp32,vitiral/micropython,warner83/micropython,oopy/micropython,warner83/micropython,Peetz0r/micropython-esp32,micropython/micropython-esp32,noahchense/micropython,mpalomer/micropython,alex-robbins/micropython,pramasoul/micropython,xhat/micropython,selste/micropython,martinribelotta/micropython,lowRISC/micropython,skybird6672/micropython,mhoffma/micropython,lowRISC/micropython,mgyenik/micropython,dmazzella/micropython,oopy/micropython,lbattraw/micropython,torwag/micropython,praemdonck/micropython,dxxb/micropython,feilongfl/micropython,KISSMonX/micropython,TDAbboud/micropython,tdautc19841202/micropython,chrisdearman/micropython,xyb/micropython,rubencabrera/micropython,xhat/micropython,AriZuu/micropython,tralamazza/micropython,infinnovation/micropython,adamkh/micropython,matthewelse/micropython,MrSurly/micropython,stonegithubs/micropython,jlillest/micropython,methoxid/micropystat,turbinenreiter/micropython,noahchense/micropython,redbear/micropython,alex-march/micropython,firstval/micropython,cloudformdesign/micropython,cloudformdesign/micropython,blmorris/micropython,jlillest/micropython,slzatz/micropython,toolmacher/micropython,blazewicz/micropython,skybird6672/micropython,ericsnowcurrently/micropython,KISSMonX/micropython,ceramos/micropython,HenrikSolver/micropython,warner83/micropython,alex-march/micropython,jimkmc/micropython,jmarcelino/pycom-micropython,noahwilliamsson/micropython,noahwilliamsson/micropython,vriera/micropython,HenrikSolver/micropython,hiway/micropython,dhylands/micropython,toolmacher/micropython,kerneltask/micropython,heisewangluo/micropython,xuxiaoxin/micropython,drrk/micropython,jlillest/micropython,PappaPeppar/micropython,xhat/micropython,redbear/micropython,adamkh/micropython,KISSMonX/micropython,Peetz0r/micropython-esp32,TDAbboud/micropython,slzatz/micropython,turbinenreiter/micropython | tests/basics/while_nest_exc.py | tests/basics/while_nest_exc.py | # test nested whiles within a try-except
while 1:
print(1)
try:
print(2)
while 1:
print(3)
break
except:
print(4)
print(5)
break
| mit | Python |
|
6a541b8d5b7c2c742420bbbe758866daef804e90 | Add a unit test to verify controller objects do not persist across classes. (#483) | google/mobly | tests/mobly/test_suite_test.py | tests/mobly/test_suite_test.py | # Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
import shutil
import tempfile
from future.tests.base import unittest
from mobly import base_test
from mobly import config_parser
from mobly import records
from mobly import test_runner
from tests.lib import mock_controller
from tests.lib import utils
class TestSuiteTest(unittest.TestCase):
"""Tests for use cases of creating Mobly test suites.
Tests here target a combination of test_runner and base_test code.
"""
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
self.mock_test_cls_configs = config_parser.TestRunConfig()
self.summary_file = os.path.join(self.tmp_dir, 'summary.yaml')
self.mock_test_cls_configs.summary_writer = records.TestSummaryWriter(
self.summary_file)
self.mock_test_cls_configs.log_path = self.tmp_dir
self.mock_test_cls_configs.user_params = {"some_param": "hahaha"}
self.mock_test_cls_configs.reporter = mock.MagicMock()
self.base_mock_test_config = config_parser.TestRunConfig()
self.base_mock_test_config.test_bed_name = 'SampleTestBed'
self.base_mock_test_config.controller_configs = {}
self.base_mock_test_config.user_params = {
'icecream': 42,
'extra_param': 'haha'
}
self.base_mock_test_config.log_path = self.tmp_dir
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_controller_object_not_persistent_across_classes_in_the_same_run(
self):
self.foo_test_controller_obj_id = None
self.bar_test_controller_obj_id = None
test_run_config = self.base_mock_test_config.copy()
test_run_config.controller_configs = {'MagicDevice': [{'serial': 1}]}
class FooTest(base_test.BaseTestClass):
def setup_class(cls):
cls.controller = cls.register_controller(mock_controller)[0]
self.foo_test_controller_obj_id = id(cls.controller)
class BarTest(base_test.BaseTestClass):
def setup_class(cls):
cls.controller = cls.register_controller(mock_controller)[0]
self.bar_test_controller_obj_id = id(cls.controller)
tr = test_runner.TestRunner(self.tmp_dir,
test_run_config.test_bed_name)
tr.add_test_class(test_run_config, FooTest)
tr.add_test_class(test_run_config, BarTest)
tr.run()
self.assertNotEqual(self.foo_test_controller_obj_id,
self.bar_test_controller_obj_id)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | Python |
|
56cb3c6c046c403e186a5191e30b8b982900c57c | Add cli project tests | polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon | tests/test_cli/test_project.py | tests/test_cli/test_project.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from mock import patch
from tests.test_cli.utils import BaseCommandTestCase
from polyaxon_cli.cli.project import project
class TestProject(BaseCommandTestCase):
@patch('polyaxon_client.api.project.ProjectApi.create_project')
def test_create_project(self, create_project):
self.runner.invoke(project, 'create')
assert create_project.call_count == 0
self.runner.invoke(project, ['create', '--name=foo'])
assert create_project.call_count == 1
@patch('polyaxon_client.api.project.ProjectApi.list_projects')
def test_list_projects(self, list_projects):
self.runner.invoke(project, ['list'])
assert list_projects.call_count == 1
@patch('polyaxon_client.api.project.ProjectApi.get_project')
def test_get_project(self, get_project):
self.runner.invoke(project, ['-p admin/foo', 'get'])
assert get_project.call_count == 1
@patch('polyaxon_client.api.project.ProjectApi.update_project')
def test_update_project(self, update_project):
self.runner.invoke(project, ['update'])
assert update_project.call_count == 0
self.runner.invoke(project, ['-p admin/foo', 'update', '--description=foo'])
assert update_project.call_count == 1
@patch('polyaxon_client.api.project.ProjectApi.list_experiment_groups')
def test_project_groups(self, list_experiment_groups):
self.runner.invoke(project, ['-p admin/foo', 'groups'])
assert list_experiment_groups.call_count == 1
@patch('polyaxon_client.api.project.ProjectApi.list_jobs')
def test_project_jobs(self, list_jobs):
self.runner.invoke(project, ['-p admin/foo', 'jobs'])
assert list_jobs.call_count == 1
@patch('polyaxon_client.api.project.ProjectApi.list_experiments')
def test_project_experiment(self, list_experiments):
self.runner.invoke(project, ['-p admin/foo', 'experiments'])
assert list_experiments.call_count == 1
@patch('polyaxon_client.api.project.ProjectApi.list_builds')
def test_project_builds(self, list_builds):
self.runner.invoke(project, ['-p admin/foo', 'builds'])
assert list_builds.call_count == 1
@patch('polyaxon_client.api.project.ProjectApi.list_tensorboards')
def test_project_builds(self, list_tensorboards):
self.runner.invoke(project, ['-p admin/foo', 'tensorboards'])
assert list_tensorboards.call_count == 1
@patch('polyaxon_client.api.project.ProjectApi.download_repo')
def test_project_download_repo(self, download_repo):
self.runner.invoke(project, ['-p admin/foo', 'download'])
assert download_repo.call_count == 1
@patch('polyaxon_client.api.project.ProjectApi.bookmark')
def test_project_bookmark(self, bookmark):
self.runner.invoke(project, ['-p admin/foo', 'bookmark'])
assert bookmark.call_count == 1
@patch('polyaxon_client.api.project.ProjectApi.unbookmark')
def test_project_unbookmark(self, unbookmark):
self.runner.invoke(project, ['-p admin/foo', 'unbookmark'])
assert unbookmark.call_count == 1
| apache-2.0 | Python |
|
bf28aa8fbe9aa735d017f935aefb89c5ed48f836 | Add bubble sort implementation | ueg1990/aids | aids/sorting_and_searching/bubble_sort.py | aids/sorting_and_searching/bubble_sort.py | '''
In this module, we implement bubble sort
Time complexity: O(n ^ 2)
'''
def bubble_sort(arr):
'''
Sort array using bubble sort
'''
for index_x in xrange(len(arr)):
for index_y in xrange(len(arr) - 1, index_x, -1):
if arr[index_y] < arr[index_y - 1]:
arr[index_y], arr[index_y - 1] = arr[index_y - 1], arr[index_y]
| mit | Python |
|
73cab4c1e0a591504176011b53b9774c8782238e | test kalman | 207leftovers/cs207project,207leftovers/cs207project | tests/tsdb/test_tsdb_kalman.py | tests/tsdb/test_tsdb_kalman.py | from tsdb import TSDBClient, TSDB_REST_Client
import timeseries as ts
import numpy as np
import subprocess
import unittest
import asyncio
import asynctest
import time
class Test_TSDB_Kalman(asynctest.TestCase):
def setUp(self):
#############
### SETUP ###
#############
# We'll use a subprocess to run our server script, according to:
# http://stackoverflow.com/questions/3781851/run-a-python-script-from-another-python-script-passing-in-args
# We need this log file for some reason, it throws exceptions without it
self.server_log_file = open('.tsdb_server.log.test','w')
self.server_proc = subprocess.Popen(['python', 'go_server.py']
,stdout=self.server_log_file,stderr=subprocess.STDOUT)
time.sleep(1)
# This needs to be separate in case the test
# fails and then the server will never be shut down
def tearDown(self):
################
### SHUTDOWN ###
################
# Shuts down the server
self.server_proc.terminate()
self.server_log_file.close()
time.sleep(1)
async def test_simple_run(self):
client = TSDBClient()
await client.add_trigger('KalmanFilter', 'insert_ts', ['sig_epsilon_estimate', 'sig_eta_estimate'], None)#['mean', 'std'], None)#
sigeta_para = 1
sigeps_para = 10
sigeta = np.random.normal(0,sigeta_para,2000)
sigeps = np.random.normal(0,sigeps_para,2000)
mus = np.cumsum(sigeta)+20
y = mus + sigeps
ats = ts.TimeSeries(y,np.arange(2000))
await client.insert_ts(1,ats)
await client.upsert_meta(1, {'order': 1})
status, payload = await client.select({'order':{'==':1}}, ['sig_epsilon_estimate', 'sig_eta_estimate'], None)
assert(np.isclose(payload['1']['sig_epsilon_estimate'], sigeps_para, rtol=0.1))
assert(np.isclose(payload['1']['sig_eta_estimate'], sigeta_para, rtol=0.1))
| mit | Python |
|
faef1804e1781365fc027ecf08d61fbab56a4679 | Add migratioss I forgot to commit out of saltyness | Angoreher/xcero,Angoreher/xcero,Angoreher/xcero,Angoreher/xcero | magic/migrations/0003_auto_20170929_0229.py | magic/migrations/0003_auto_20170929_0229.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-29 05:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('magic', '0002_auto_20170929_0159'),
]
operations = [
migrations.AlterField(
model_name='card',
name='power',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='power'),
),
migrations.AlterField(
model_name='card',
name='toughness',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='toughness'),
),
]
| mit | Python |
|
e61dcb055fb4767e6e662648c89cbdfda4422c97 | Update expected error message in this test | chapel-lang/pychapel,russel/pychapel,chapel-lang/pychapel,russel/pychapel,russel/pychapel,chapel-lang/pychapel | docs/source/examples/test_no_depends_fails.py | docs/source/examples/test_no_depends_fails.py | from pych.extern import Chapel
@Chapel(sfile="users.onlyonce.chpl")
def useTwoModules(x=int, y=int):
return int
if __name__ == "__main__":
print(useTwoModules(2, 4))
import testcase
# contains the general testing method, which allows us to gather output
import os.path
def test_using_multiple_modules():
out = testcase.runpy(os.path.realpath(__file__))
# Ensure that when a used module is nowhere near the exported function, we
# get an error message to that effect.
assert "error: Cannot find module or enum \'M1\'" in out
| from pych.extern import Chapel
@Chapel(sfile="users.onlyonce.chpl")
def useTwoModules(x=int, y=int):
return int
if __name__ == "__main__":
print(useTwoModules(2, 4))
import testcase
# contains the general testing method, which allows us to gather output
import os.path
def test_using_multiple_modules():
out = testcase.runpy(os.path.realpath(__file__))
# Ensure that when a used module is nowhere near the exported function, we
# get an error message to that effect.
assert "error: Cannot find module \'M1\'" in out
| apache-2.0 | Python |
2e7048d8feae5ed2c244e617077235b5b771f326 | Add deleted selenium runner | josthkko/ggrc-core,prasannav7/ggrc-core,edofic/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,jmakov/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,jmakov/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,prasannav7/ggrc-core,kr41/ggrc-core,jmakov/ggrc-core,prasannav7/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,jmakov/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,NejcZupec/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core | test/selenium/src/run_selenium.py | test/selenium/src/run_selenium.py | #!/usr/bin/env python2.7
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
""" Basic selenium test runner
This script is used for running all selenium tests against the server defined
in the configuration yaml file. The script will wait a defined time for the
server to start before running the test. If the server fails to start before
its grace time is up, the script will return with an error code of 3. Error
codes 1 and 2 are reserved by pytest and status 0 is returned only if all the
tests pass.
"""
import logging
import os
import sys
import time
import urllib
import pytest
from lib import constants
from lib import file_ops
from lib import log
from lib import environment
PROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + "/../"
logger = logging.getLogger("selenium.webdriver.remote.remote_connection")
def wait_for_server():
""" Wait for the server to return a 200 response
"""
sys.stdout.write("Wating on server: ")
for _ in xrange(environment.SERVER_WAIT_TIME):
try:
if urllib.urlopen(environment.APP_URL).getcode() == 200:
print "[Done]"
return True
except IOError:
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(1)
print "[Failed]"
return False
if __name__ == "__main__":
if not wait_for_server():
sys.exit(3)
file_ops.create_directory(environment.LOG_PATH)
file_ops.delete_directory_contents(environment.LOG_PATH)
log.set_default_file_handler(
logger,
PROJECT_ROOT_PATH + constants.path.LOGS_DIR + constants.path.TEST_RUNNER
)
logger.setLevel(environment.LOGGING_LEVEL)
sys.exit(pytest.main())
| apache-2.0 | Python |
|
3bb65466d40c1b59faebe0db40eced260ff60010 | Create SampleFunction.py | lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples | src/Python/ImplicitFunctions/SampleFunction.py | src/Python/ImplicitFunctions/SampleFunction.py | #!/usr/bin/env python
import vtk
def main():
value = 2.0
colors = vtk.vtkNamedColors()
implicitFunction = vtk.vtkSuperquadric()
implicitFunction.SetPhiRoundness(2.5)
implicitFunction.SetThetaRoundness(.5)
# Sample the function.
sample = vtk.vtkSampleFunction()
sample.SetSampleDimensions(50,50,50)
sample.SetImplicitFunction(implicitFunction)
xmin, xmax, ymin, ymax, zmin, zmax = -value, value, -value, value, -value, value
sample.SetModelBounds(xmin, xmax, ymin, ymax, zmin, zmax)
# Create the 0 isosurface.
contours = vtk.vtkContourFilter()
contours.SetInputConnection(sample.GetOutputPort())
contours.GenerateValues(1, 2.0, 2.0)
# Map the contours to graphical primitives.
contourMapper = vtk.vtkPolyDataMapper()
contourMapper.SetInputConnection(contours.GetOutputPort())
contourMapper.SetScalarRange(0.0, 1.2)
# Create an actor for the contours.
contourActor = vtk.vtkActor()
contourActor.SetMapper(contourMapper)
# Create a box around the function to indicate the sampling volume.
#Create outline.
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(sample.GetOutputPort())
# Map it to graphics primitives.
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
# Create an actor.
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0,0,0)
# Visualize.
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
renderer.AddActor(contourActor)
renderer.AddActor(outlineActor)
renderer.SetBackground(colors.GetColor3d("Tan"))
# Enable user interface interactor
renderWindow.Render()
interactor.Start()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
95a890b988435fb019083026572ddb7042dc1379 | Create generate.py | mmcgaley/CoreRopeMemory,mmcgaley/CoreRopeMemory,mmcgaley/CoreRopeMemory | PrettyBits/generate.py | PrettyBits/generate.py | # coding=UTF-8
#
# generate a list of all 8 bit binary strings,
# sort them by how "pretty" they will be in
# Core Rope Memory
#
# mmcgaley@gmail.com
# 2015.04.19
#
WORDLEN = 7
import math
def addLeadingZeros(binaryString) :
return ((WORDLEN - len(binaryString)) * "0") + binaryString
def maxZeros(binaryString) :
zeroCount = 0
maxZeroCount = 0
for bit in list(binaryString) :
if bit is "0" : zeroCount += 1
else :
if (zeroCount > maxZeroCount) :
maxZeroCount = zeroCount
zeroCount = 0
if (zeroCount > maxZeroCount) :
maxZeroCount = zeroCount
return maxZeroCount
def totalZeros(binaryString) :
return len([x for x in binaryString if x is '0'])
def zeroBalance(binaryString) :
zeroCount = 0
symmetricalCentre = int(math.ceil(len(binaryString)/2.0))
firstHalf = binaryString[:len(binaryString)/2]
secondHalf = binaryString[symmetricalCentre:]
return abs(totalZeros(firstHalf) - totalZeros(secondHalf)) # absolute value
def charSortKey(tableEntry):
(c, _) = tableEntry
if c.islower():
return 0 + ord(c)
elif c.isupper():
return 1000 + ord(c)
elif c.isspace():
return 2000 + ord(c)
elif c.isdigit():
return 3000 + ord(c)
else:
# Cf. https://docs.python.org/2/library/stdtypes.html#str.decode
u = c.decode('utf-8')
return 4000 + ord(u)
## main :)
binaryStringsList = []
#for b in range(1, 256): starting at 1 because 0000000 is a special character
for b in range(1, pow(2, WORDLEN)):
binaryStringsList.append(addLeadingZeros(bin(b)[2:])) # strip "0b" from beginning
binaryStringsList.sort(key=totalZeros)
binaryStringsList.sort(key=maxZeros)
binaryStringsList.sort(key=zeroBalance)
# character ordering based on frequency_table.py in this directory
characterList = ['e', 't', 'a', 'o', 'n', 'i', 's', 'r', 'h', 'l', 'd', 'c',
'u', 'm', 'f', 'p', 'g', 'y', 'w', 'b', 'v', 'k', ' ', 'T', 'S', 'A', 'M', 'C', 'I',
'N', 'B', 'R', 'P', 'E', 'D', 'H', 'x', 'W', 'L', 'O', 'F', 'Y', 'G', 'J', 'z',
'j', 'U', 'q', 'K', 'V', 'Q', 'X', 'Z', 'á', 'é', 'í', 'ó', 'ú', 'Á', 'É', 'Í',
'Ó', 'Ú', ',', '.', '0', '1', '5', '2', '"', '9', '-', "'", '4', '3', '8', '6',
'7', ':', ')', '(', '$', ';', '*', '?', '/', '&', '!', '%', '+', '>', '<', '=',
'#', '@']
characterBinaryTable = zip(characterList, binaryStringsList)
characterBinaryTable.sort(key=charSortKey)
for mapping in characterBinaryTable:
print "\'%s\' => \'%s\'," % mapping # for PHP array creation
| mit | Python |
|
524f47a4d4e0db5b76dfb7ebf9447b6199e48b6d | Add data utils tests. | christabor/flask_jsondash,christabor/flask_jsondash,christabor/flask_jsondash | tests/test_data_utils_filetree.py | tests/test_data_utils_filetree.py | from uuid import uuid1
import json
import pytest
from flask_jsondash.data_utils import filetree
def test_path_hierarchy(tmpdir):
uid = uuid1()
tmpfile = tmpdir.mkdir('{}'.format(uid))
data = filetree.path_hierarchy(tmpfile.strpath)
assert json.dumps(data)
for key in ['type', 'name', 'path']:
assert key in data
def test_path_hierarchy_invalid_path(tmpdir):
with pytest.raises(OSError):
filetree.path_hierarchy('invalid-path')
| mit | Python |
|
8d6959a6d950e243ebd2c930ae176a3debf4cc9c | Add package-updates-metric.py | cmattoon/sensu-community-plugins,himyouten/sensu-community-plugins,madAndroid/sensu-community-plugins,JonathanHuot/sensu-community-plugins,himyouten/sensu-community-plugins,lenfree/sensu-community-plugins,estately/sensu-community-plugins,ideais/sensu-community-plugins,mecavity/sensu-community-plugins,giorgiosironi/sensu-community-plugins,justanshulsharma/sensu-community-plugins,estately/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,zerOnepal/sensu-community-plugins,shnmorimoto/sensu-community-plugins,Squarespace/sensu-community-plugins,lenfree/sensu-community-plugins,cread/sensu-community-plugins,julienba/sensu-community-plugins,nagas/sensu-community-plugins,alertlogic/sensu-community-plugins,pkaeding/sensu-community-plugins,loveholidays/sensu-plugins,estately/sensu-community-plugins,aryeguy/sensu-community-plugins,warmfusion/sensu-community-plugins,khuongdp/sensu-community-plugins,plasticbrain/sensu-community-plugins,warmfusion/sensu-community-plugins,tuenti/sensu-community-plugins,gferguson-gd/sensu-community-plugins,new23d/sensu-community-plugins,zerOnepal/sensu-community-plugins,JonathanHuot/sensu-community-plugins,giorgiosironi/sensu-community-plugins,emillion/sensu-community-plugins,gferguson-gd/sensu-community-plugins,circleback/sensu-community-plugins,new23d/sensu-community-plugins,gferguson-gd/sensu-community-plugins,JonathanHuot/sensu-community-plugins,estately/sensu-community-plugins,himyouten/sensu-community-plugins,jbehrends/sensu-community-plugins,madAndroid/sensu-community-plugins,cmattoon/sensu-community-plugins,luisdalves/sensu-community-plugins,ideais/sensu-community-plugins,intoximeters/sensu-community-plugins,lenfree/sensu-community-plugins,plasticbrain/sensu-community-plugins,Seraf/sensu-community-plugins,loveholidays/sensu-plugins,gferguson-gd/sensu-community-plugins,warmfusion/sensu-community-plugins,tuenti/sensu-community-plugins,loveholidays/sensu-plugins,Squarespace/sensu-community-plugins,pkaeding/sensu-community-plugins,intoximeters/sensu-community-plugins,shnmorimoto/sensu-community-plugins,maoe/sensu-community-plugins,Squarespace/sensu-community-plugins,circleback/sensu-community-plugins,ideais/sensu-community-plugins,cotocisternas/sensu-community-plugins,julienba/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,petere/sensu-community-plugins,tuenti/sensu-community-plugins,tuenti/sensu-community-plugins,thehyve/sensu-community-plugins,Seraf/sensu-community-plugins,alexhjlee/sensu-community-plugins,emillion/sensu-community-plugins,alertlogic/sensu-community-plugins,luisdalves/sensu-community-plugins,warmfusion/sensu-community-plugins,giorgiosironi/sensu-community-plugins,intoximeters/sensu-community-plugins,plasticbrain/sensu-community-plugins,lfdesousa/sensu-community-plugins,nilroy/sensu-community-plugins,loveholidays/sensu-plugins,zerOnepal/sensu-community-plugins,cotocisternas/sensu-community-plugins,mecavity/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,circleback/sensu-community-plugins,nilroy/sensu-community-plugins,maoe/sensu-community-plugins,jbehrends/sensu-community-plugins,cmattoon/sensu-community-plugins,nagas/sensu-community-plugins,maoe/sensu-community-plugins,jennytoo/sensu-community-plugins,jennytoo/sensu-community-plugins,cmattoon/sensu-community-plugins,petere/sensu-community-plugins,cread/sensu-community-plugins,nagas/sensu-community-plugins,alertlogic/sensu-community-plugins,justanshulsharma/sensu-community-plugins,julienba/sensu-community-plugins,new23d/sensu-community-plugins,thehyve/sensu-community-plugins,madAndroid/sensu-community-plugins,Squarespace/sensu-community-plugins,zerOnepal/sensu-community-plugins,petere/sensu-community-plugins,Seraf/sensu-community-plugins,lenfree/sensu-community-plugins,maoe/sensu-community-plugins,alexhjlee/sensu-community-plugins,lfdesousa/sensu-community-plugins,alexhjlee/sensu-community-plugins,giorgiosironi/sensu-community-plugins,khuongdp/sensu-community-plugins,Seraf/sensu-community-plugins,JonathanHuot/sensu-community-plugins,julienba/sensu-community-plugins,justanshulsharma/sensu-community-plugins,cread/sensu-community-plugins,alexhjlee/sensu-community-plugins,shnmorimoto/sensu-community-plugins,khuongdp/sensu-community-plugins,luisdalves/sensu-community-plugins,pkaeding/sensu-community-plugins,plasticbrain/sensu-community-plugins,jbehrends/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,lfdesousa/sensu-community-plugins,nilroy/sensu-community-plugins,alertlogic/sensu-community-plugins,thehyve/sensu-community-plugins,circleback/sensu-community-plugins,lfdesousa/sensu-community-plugins,justanshulsharma/sensu-community-plugins,mecavity/sensu-community-plugins,ideais/sensu-community-plugins,nilroy/sensu-community-plugins,madAndroid/sensu-community-plugins,petere/sensu-community-plugins,new23d/sensu-community-plugins,aryeguy/sensu-community-plugins,aryeguy/sensu-community-plugins,jennytoo/sensu-community-plugins,himyouten/sensu-community-plugins,luisdalves/sensu-community-plugins,emillion/sensu-community-plugins,khuongdp/sensu-community-plugins,pkaeding/sensu-community-plugins,shnmorimoto/sensu-community-plugins,tuenti/sensu-community-plugins,mecavity/sensu-community-plugins,cotocisternas/sensu-community-plugins,aryeguy/sensu-community-plugins,nagas/sensu-community-plugins,thehyve/sensu-community-plugins,jennytoo/sensu-community-plugins,cread/sensu-community-plugins,intoximeters/sensu-community-plugins,emillion/sensu-community-plugins,cotocisternas/sensu-community-plugins,jbehrends/sensu-community-plugins | plugins/system/package-updates-metric.py | plugins/system/package-updates-metric.py | #!/usr/bin/env python
#coding=utf-8
import apt
import apt_pkg
import json
import os
import subprocess
import sys
"""
security-updates-metric is used to check avaliable package updates
for Debian or Ubuntu system.
The program is inspired by /usr/lib/update-notifier/apt_check.py
"""
SYNAPTIC_PINFILE = "/var/lib/synaptic/preferences"
DISTRO = subprocess.check_output(["lsb_release", "-c", "-s"],
universal_newlines=True).strip()
# The packages in BLACKLIST WON'T be checked.
BLACKLIST = ['linux-virtual', 'linux-image-virtual', 'linux-headers-virtual',]
def clean(cache,depcache):
""" unmark (clean) all changes from the given depcache """
# mvo: looping is too inefficient with the new auto-mark code
# for pkg in cache.Packages:
# depcache.MarkKeep(pkg)
depcache.init()
def saveDistUpgrade(cache,depcache):
""" this functions mimics a upgrade but will never remove anything """
depcache.upgrade(True)
if depcache.del_count > 0:
clean(cache,depcache)
depcache.upgrade()
def isSecurityUpgrade(ver):
""" check if the given version is a security update (or masks one) """
security_pockets = [("Ubuntu", "%s-security" % DISTRO),
("gNewSense", "%s-security" % DISTRO),
("Debian", "%s-updates" % DISTRO)]
for (file, index) in ver.file_list:
for origin, archive in security_pockets:
if (file.archive == archive and file.origin == origin):
return True
return False
def get_update_packages():
"""
Return a list of dict about package updates
"""
pkgs = []
apt_pkg.init()
# force apt to build its caches in memory for now to make sure
# that there is no race when the pkgcache file gets re-generated
apt_pkg.config.set("Dir::Cache::pkgcache","")
try:
cache = apt_pkg.Cache(apt.progress.base.OpProgress())
except SystemError as e:
sys.stderr.write("Error: Opening the cache (%s)" % e)
sys.exit(-1)
depcache = apt_pkg.DepCache(cache)
# read the pin files
depcache.read_pinfile()
# read the synaptic pins too
if os.path.exists(SYNAPTIC_PINFILE):
depcache.read_pinfile(SYNAPTIC_PINFILE)
# init the depcache
depcache.init()
try:
saveDistUpgrade(cache,depcache)
except SystemError as e:
sys.stderr.write("Error: Marking the upgrade (%s)" % e)
sys.exit(-1)
for pkg in cache.packages:
if not (depcache.marked_install(pkg) or depcache.marked_upgrade(pkg)):
continue
inst_ver = pkg.current_ver
cand_ver = depcache.get_candidate_ver(pkg)
if cand_ver == inst_ver:
# Package does not have available update
continue
if not inst_ver or not cand_ver:
# Some packages are not installed(i.e. linux-headers-3.2.0-77)
# skip these updates
continue
if pkg.name in BLACKLIST:
# skip the package in blacklist
continue
record = {"name": pkg.name,
"security": isSecurityUpgrade(cand_ver),
"current_version": inst_ver.ver_str,
"candidate_version": cand_ver.ver_str}
pkgs.append(record)
return pkgs
def package_check_metric():
"""
Return output and exit status as Sensu required.
OK 0: no updates
WARNING 1: available normal updates
CRITICAL 2: available security updates
UNKNOWN 3: exceptions or errors
"""
try:
pkgs = get_update_packages()
security_pkgs = filter(lambda p: p.get('security'), pkgs)
except Exception as e:
# Catch all unknown exceptions
print str(e)
sys.exit(3)
if not pkgs:
# No available update
print json.dumps(pkgs)
sys.exit(0)
elif not security_pkgs:
# Has available updates
print json.dumps(pkgs)
sys.exit(1)
else:
# Has available security updates
print json.dumps(pkgs)
sys.exit(2)
if __name__ == '__main__':
package_check_metric()
| mit | Python |
|
37d0843c76b558d6d7a1892963a30e9a56d73f24 | Document typical Stylesheet attributes | leviroth/praw,gschizas/praw,praw-dev/praw,praw-dev/praw,leviroth/praw,gschizas/praw | praw/models/stylesheet.py | praw/models/stylesheet.py | """Provide the Stylesheet class."""
from .base import PRAWBase
class Stylesheet(PRAWBase):
"""Represent a stylesheet.
**Typical Attributes**
This table describes attributes that typically belong to objects of this
class. Since attributes are dynamically provided (see
:ref:`determine-available-attributes-of-an-object`), there is not a
guarantee that these attributes will always be present, nor is this list
necessarily comprehensive.
======================= ===================================================
Attribute Description
======================= ===================================================
``images`` A ``list`` of images used by the stylesheet.
``stylesheet`` The contents of the stylesheet, as CSS.
======================= ===================================================
"""
| """Provide the Stylesheet class."""
from .base import PRAWBase
class Stylesheet(PRAWBase):
"""Represent a stylesheet."""
| bsd-2-clause | Python |
4cde1f2fcc21ff83daabdb5221c462f44991c73f | Create remove-boxes.py | jaredkoontz/leetcode,yiwen-luo/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,jaredkoontz/leetcode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,kamyu104/LeetCode | Python/remove-boxes.py | Python/remove-boxes.py | # Time: O(n^3) ~ O(n^4)
# Space: O(n^3)
# Given several boxes with different colors represented by different positive numbers.
# You may experience several rounds to remove boxes until there is no box left.
# Each time you can choose some continuous boxes with the same color (composed of k boxes, k >= 1),
# remove them and get k*k points.
# Find the maximum points you can get.
#
# Example 1:
# Input:
#
# [1, 3, 2, 2, 2, 3, 4, 3, 1]
# Output:
# 23
# Explanation:
# [1, 3, 2, 2, 2, 3, 4, 3, 1]
# ----> [1, 3, 3, 4, 3, 1] (3*3=9 points)
# ----> [1, 3, 3, 3, 1] (1*1=1 points)
# ----> [1, 1] (3*3=9 points)
# ----> [] (2*2=4 points)
# Note: The number of boxes n would not exceed 100.
class Solution(object):
def removeBoxes(self, boxes):
"""
:type boxes: List[int]
:rtype: int
"""
def dfs(boxes, l, r, k, lookup):
if l > r: return 0
if lookup[l][r][k]: return lookup[l][r][k]
ll, kk = l, k
while l < r and boxes[l+1] == boxes[l]:
l += 1
k += 1
result = dfs(boxes, l+1, r, 0, lookup) + (k+1) ** 2
for i in xrange(l+1, r+1):
if boxes[i] == boxes[l]:
result = max(result, dfs(boxes, l+1, i-1, 0, lookup) + dfs(boxes, i, r, k+1, lookup))
lookup[ll][r][kk] = result
return result
lookup = [[[0]*len(boxes) for _ in xrange(len(boxes)) ] for _ in xrange(len(boxes)) ]
return dfs(boxes, 0, len(boxes)-1, 0, lookup)
| mit | Python |
|
9a5bfb7f5bf114bb4bcf2dd4c88ddd8924a97ed9 | add menu function to menu.py | marshki/pyWipe,marshki/pyWipe | menu.py | menu.py | #!/usr/bin/end python
# Text-based menu for use in pyWype.py
def menu():
""" Menu prompt for user to select program option """
while True:
print 'I'
print 'II'
print 'III'
print 'IV'
print 'V'
choice = raw_input('Select an option (I, II, III, IV, V): ')
if choice in ('I', 'II', 'III', 'IV', 'V'):
return choice
menu()
| mit | Python |
|
fb61398b6a0cdd4f40d16729ab2ff0ca47730526 | Add the main file | pahumadad/raspi-relay-api | relay_api/__main__.py | relay_api/__main__.py | from relay_api.api.server import server
from relay_api.conf.config import relays
import relay_api.api.server as api
@server.route("/relay-api/relays", methods=["GET"])
def get_relays():
return api.get_relays(relays)
@server.route("/relay-api/relays/<int:relay_id>", methods=["GET"])
def get_relay(relay_id):
return api.get_relay(relays, relay_id)
| mit | Python |
|
72a0d635e497f0f4c6c58d84f7001ec04063ea90 | Add mfnd.py that prints todays date | mes32/mfnd | mfnd.py | mfnd.py | #!/usr/bin/env python3
"""
MFND - A simple to-do list application
"""
import datetime
today = datetime.date.today()
print( today.strftime('MFND - %B %d, %Y') )
| mit | Python |
|
d3469fd3ab39eeee381457588931636bf0987ea9 | Create impossible_bet.py | shyadow/impossible-bet | impossible_bet.py | impossible_bet.py | import random
def play_bet(participants=100, times=1000, checks=50):
"""Simulate the bet x times with x participants."""
wins = 0
losses = 0
for time in range(times):
boxes = list(range(1, participants + 1))
random.shuffle(boxes)
for participant in range(1, participants + 1):
found = False
count = 0
to_open = participant
while found == False and count < checks:
if boxes[to_open - 1] == participant:
found = True
else:
to_open = boxes[to_open - 1]
count += 1
if found == False:
losses += 1
break
elif found == True and participant == participants:
wins += 1
return (wins, losses)
def results(wins, losses):
total = wins + losses
win_percentage = (wins / total) * 100
lose_percentage = (losses / total) * 100
return win_percentage, lose_percentage
if __name__ == '__main__':
participants = int(input(print('participants')))
times = int(input(print('times')))
checks = int(input(print('checks')))
print(results(*play_bet(participants=participants, times=times, checks=checks)))
| mit | Python |
|
aeabe6bb89a359e644c5adcb4c6456fd3428f6de | Stop using intersphinx | bcrochet/python-ironicclient,varunarya10/python-ironicclient,redhat-openstack/python-ironicclient,NaohiroTamura/python-ironicclient,openstack/python-ironicclient,openstack/python-ironicclient,JioCloud/python-ironicclient,rdo-management/python-ironicclient,NaohiroTamura/python-ironicclient | doc/source/conf.py | doc/source/conf.py | # -*- coding: utf-8 -*-
#
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'oslosphinx',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'python-ironicclient'
copyright = u'OpenStack Foundation'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['ironicclient.']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme_path = ["."]
#html_theme = '_theme'
#html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
(
'index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack LLC',
'manual'
),
]
| # -*- coding: utf-8 -*-
#
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'oslosphinx',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'python-ironicclient'
copyright = u'OpenStack Foundation'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['ironicclient.']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme_path = ["."]
#html_theme = '_theme'
#html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
(
'index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack LLC',
'manual'
),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| apache-2.0 | Python |
e8c2406cbcff96196d2404e9df167cc96f468779 | add sources api | fkmclane/MCP,fkmclane/MCP,fkmclane/MCP,fkmclane/MCP | mcp/interface/sources.py | mcp/interface/sources.py | import json
from mcp import sources
from mcp.interface import common
class SourcesHandler(common.AuthorizedHandler):
def forbidden(self):
return True
def do_get(self):
return 200, json.dumps(list(iter(sources.source_db)))
class SourceHandler(common.AuthorizedHandler):
def __init__(self, request, response, groups):
common.AuthorizedHandler.__init__(self, request, response, groups)
self.source = sources.get(self.groups[0])
class SourceInfoHandler(SourceHandler):
def do_get(self):
return 200, json.dumps({'name': self.source.source, 'url': self.source.url, 'revision': self.source.revision})
sources_base = '/sources/'
source_base = sources_base + '(' + sources.sources_allowed + ')'
routes = {sources_base: SourcesHandler, source_base: SourceInfoHandler}
| mit | Python |
|
5dabba3941f870f3f365e186fdf852e834649595 | Move config to docs | leoc/home-assistant,open-homeautomation/home-assistant,ct-23/home-assistant,LinuxChristian/home-assistant,hmronline/home-assistant,bdfoster/blumate,Zyell/home-assistant,jnewland/home-assistant,aronsky/home-assistant,hmronline/home-assistant,miniconfig/home-assistant,soldag/home-assistant,joopert/home-assistant,shaftoe/home-assistant,FreekingDean/home-assistant,HydrelioxGitHub/home-assistant,HydrelioxGitHub/home-assistant,betrisey/home-assistant,kyvinh/home-assistant,molobrakos/home-assistant,keerts/home-assistant,emilhetty/home-assistant,happyleavesaoc/home-assistant,emilhetty/home-assistant,home-assistant/home-assistant,aronsky/home-assistant,jabesq/home-assistant,kennedyshead/home-assistant,stefan-jonasson/home-assistant,morphis/home-assistant,Julian/home-assistant,tinloaf/home-assistant,Duoxilian/home-assistant,turbokongen/home-assistant,emilhetty/home-assistant,florianholzapfel/home-assistant,miniconfig/home-assistant,bdfoster/blumate,alexmogavero/home-assistant,tboyce1/home-assistant,aequitas/home-assistant,varunr047/homefile,pschmitt/home-assistant,stefan-jonasson/home-assistant,hmronline/home-assistant,MartinHjelmare/home-assistant,w1ll1am23/home-assistant,jamespcole/home-assistant,LinuxChristian/home-assistant,kyvinh/home-assistant,leppa/home-assistant,qedi-r/home-assistant,mikaelboman/home-assistant,devdelay/home-assistant,MungoRae/home-assistant,oandrew/home-assistant,dmeulen/home-assistant,keerts/home-assistant,mKeRix/home-assistant,DavidLP/home-assistant,eagleamon/home-assistant,justyns/home-assistant,nugget/home-assistant,instantchow/home-assistant,open-homeautomation/home-assistant,tboyce1/home-assistant,florianholzapfel/home-assistant,emilhetty/home-assistant,stefan-jonasson/home-assistant,nevercast/home-assistant,leoc/home-assistant,titilambert/home-assistant,mezz64/home-assistant,happyleavesaoc/home-assistant,balloob/home-assistant,jabesq/home-assistant,coteyr/home-assistant,robbiet480/home-assistant,coteyr/home-assistant,bdfoster/blumate,sfam/home-assistant,devdelay/home-assistant,keerts/home-assistant,HydrelioxGitHub/home-assistant,Smart-Torvy/torvy-home-assistant,ma314smith/home-assistant,postlund/home-assistant,happyleavesaoc/home-assistant,eagleamon/home-assistant,instantchow/home-assistant,Danielhiversen/home-assistant,philipbl/home-assistant,mKeRix/home-assistant,sander76/home-assistant,adrienbrault/home-assistant,emilhetty/home-assistant,Duoxilian/home-assistant,Julian/home-assistant,leoc/home-assistant,MungoRae/home-assistant,oandrew/home-assistant,sfam/home-assistant,Smart-Torvy/torvy-home-assistant,alexmogavero/home-assistant,Cinntax/home-assistant,GenericStudent/home-assistant,persandstrom/home-assistant,miniconfig/home-assistant,balloob/home-assistant,molobrakos/home-assistant,LinuxChristian/home-assistant,mezz64/home-assistant,morphis/home-assistant,MartinHjelmare/home-assistant,eagleamon/home-assistant,rohitranjan1991/home-assistant,lukas-hetzenecker/home-assistant,varunr047/homefile,kennedyshead/home-assistant,jnewland/home-assistant,deisi/home-assistant,betrisey/home-assistant,sfam/home-assistant,persandstrom/home-assistant,happyleavesaoc/home-assistant,Danielhiversen/home-assistant,robjohnson189/home-assistant,mikaelboman/home-assistant,rohitranjan1991/home-assistant,partofthething/home-assistant,jaharkes/home-assistant,nevercast/home-assistant,luxus/home-assistant,ct-23/home-assistant,pschmitt/home-assistant,PetePriority/home-assistant,Zyell/home-assistant,Zac-HD/home-assistant,philipbl/home-assistant,varunr047/homefile,Teagan42/home-assistant,varunr047/homefile,jaharkes/home-assistant,betrisey/home-assistant,deisi/home-assistant,oandrew/home-assistant,sffjunkie/home-assistant,ewandor/home-assistant,jamespcole/home-assistant,alexmogavero/home-assistant,srcLurker/home-assistant,Zac-HD/home-assistant,deisi/home-assistant,LinuxChristian/home-assistant,tboyce1/home-assistant,GenericStudent/home-assistant,Julian/home-assistant,kyvinh/home-assistant,stefan-jonasson/home-assistant,hmronline/home-assistant,dmeulen/home-assistant,leoc/home-assistant,devdelay/home-assistant,home-assistant/home-assistant,PetePriority/home-assistant,titilambert/home-assistant,nugget/home-assistant,hexxter/home-assistant,DavidLP/home-assistant,xifle/home-assistant,Smart-Torvy/torvy-home-assistant,MartinHjelmare/home-assistant,jaharkes/home-assistant,ma314smith/home-assistant,robbiet480/home-assistant,Theb-1/home-assistant,tboyce021/home-assistant,Smart-Torvy/torvy-home-assistant,srcLurker/home-assistant,joopert/home-assistant,nevercast/home-assistant,oandrew/home-assistant,jamespcole/home-assistant,aequitas/home-assistant,bdfoster/blumate,morphis/home-assistant,bdfoster/blumate,sffjunkie/home-assistant,tinloaf/home-assistant,auduny/home-assistant,florianholzapfel/home-assistant,alexmogavero/home-assistant,xifle/home-assistant,xifle/home-assistant,ewandor/home-assistant,aoakeson/home-assistant,sdague/home-assistant,jnewland/home-assistant,instantchow/home-assistant,LinuxChristian/home-assistant,keerts/home-assistant,nnic/home-assistant,MungoRae/home-assistant,philipbl/home-assistant,jawilson/home-assistant,mikaelboman/home-assistant,soldag/home-assistant,fbradyirl/home-assistant,auduny/home-assistant,balloob/home-assistant,Theb-1/home-assistant,srcLurker/home-assistant,shaftoe/home-assistant,toddeye/home-assistant,Duoxilian/home-assistant,tinloaf/home-assistant,JshWright/home-assistant,mKeRix/home-assistant,persandstrom/home-assistant,sffjunkie/home-assistant,ct-23/home-assistant,Zac-HD/home-assistant,toddeye/home-assistant,philipbl/home-assistant,JshWright/home-assistant,aequitas/home-assistant,eagleamon/home-assistant,sdague/home-assistant,jaharkes/home-assistant,nnic/home-assistant,srcLurker/home-assistant,partofthething/home-assistant,MungoRae/home-assistant,jawilson/home-assistant,adrienbrault/home-assistant,open-homeautomation/home-assistant,caiuspb/home-assistant,varunr047/homefile,leppa/home-assistant,florianholzapfel/home-assistant,open-homeautomation/home-assistant,fbradyirl/home-assistant,ma314smith/home-assistant,postlund/home-assistant,nnic/home-assistant,luxus/home-assistant,deisi/home-assistant,ct-23/home-assistant,mikaelboman/home-assistant,robjohnson189/home-assistant,caiuspb/home-assistant,JshWright/home-assistant,shaftoe/home-assistant,aoakeson/home-assistant,morphis/home-assistant,hmronline/home-assistant,hexxter/home-assistant,Julian/home-assistant,betrisey/home-assistant,tboyce021/home-assistant,nkgilley/home-assistant,mikaelboman/home-assistant,turbokongen/home-assistant,robjohnson189/home-assistant,dmeulen/home-assistant,shaftoe/home-assistant,w1ll1am23/home-assistant,Zac-HD/home-assistant,ct-23/home-assistant,JshWright/home-assistant,hexxter/home-assistant,sffjunkie/home-assistant,Teagan42/home-assistant,coteyr/home-assistant,FreekingDean/home-assistant,ma314smith/home-assistant,rohitranjan1991/home-assistant,justyns/home-assistant,molobrakos/home-assistant,jabesq/home-assistant,luxus/home-assistant,devdelay/home-assistant,deisi/home-assistant,Duoxilian/home-assistant,nkgilley/home-assistant,caiuspb/home-assistant,sffjunkie/home-assistant,mKeRix/home-assistant,ewandor/home-assistant,tchellomello/home-assistant,sander76/home-assistant,aoakeson/home-assistant,hexxter/home-assistant,miniconfig/home-assistant,robjohnson189/home-assistant,xifle/home-assistant,qedi-r/home-assistant,DavidLP/home-assistant,tchellomello/home-assistant,Cinntax/home-assistant,justyns/home-assistant,dmeulen/home-assistant,Zyell/home-assistant,nugget/home-assistant,PetePriority/home-assistant,Theb-1/home-assistant,tboyce1/home-assistant,lukas-hetzenecker/home-assistant,auduny/home-assistant,MungoRae/home-assistant,kyvinh/home-assistant,fbradyirl/home-assistant | homeassistant/components/sensor/eliqonline.py | homeassistant/components/sensor/eliqonline.py | """
homeassistant.components.sensor.eliqonline
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Monitors home energy use for the eliq online service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.eliqonline/
"""
import logging
from homeassistant.helpers.entity import Entity
from homeassistant.const import (STATE_UNKNOWN, CONF_ACCESS_TOKEN, CONF_NAME)
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['eliqonline==1.0.11']
DEFAULT_NAME = "ELIQ Energy Usage"
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Set up the Eliq sensor. """
import eliqonline
access_token = config.get(CONF_ACCESS_TOKEN)
name = config.get(CONF_NAME, DEFAULT_NAME)
channel_id = config.get("channel_id")
if access_token is None:
_LOGGER.error(
"Configuration Error: "
"Please make sure you have configured your access token "
"that can be aquired from https://my.eliq.se/user/settings/api")
return False
api = eliqonline.API(access_token)
add_devices([EliqSensor(api, channel_id, name)])
class EliqSensor(Entity):
""" Implements a Eliq sensor. """
def __init__(self, api, channel_id, name):
self._name = name
self._unit_of_measurement = "W"
self._state = STATE_UNKNOWN
self.api = api
self.channel_id = channel_id
self.update()
@property
def name(self):
""" Returns the name. """
return self._name
@property
def unit_of_measurement(self):
""" Unit of measurement of this entity, if any. """
return self._unit_of_measurement
@property
def state(self):
""" Returns the state of the device. """
return self._state
def update(self):
""" Gets the latest data. """
response = self.api.get_data_now(channelid=self.channel_id)
self._state = int(response.power)
| """
homeassistant.components.sensor.eliqonline
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
monitors home energy use for the eliq online service
api documentation:
https://my.eliq.se/knowledge/sv-SE/49-eliq-online/299-eliq-online-api
access to api access token:
https://my.eliq.se/user/settings/api
current energy use:
https://my.eliq.se/api/datanow?accesstoken=<token>
history:
https://my.eliq.se/api/data?startdate=2015-12-14&intervaltype=6min&accesstoken=<token>
"""
import logging
from homeassistant.helpers.entity import Entity
from homeassistant.const import (STATE_UNKNOWN, CONF_ACCESS_TOKEN, CONF_NAME)
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['eliqonline==1.0.11']
DEFAULT_NAME = "ELIQ Energy Usage"
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Set up the sensors """
import eliqonline
access_token = config.get(CONF_ACCESS_TOKEN)
name = config.get(CONF_NAME, DEFAULT_NAME)
channel_id = config.get("channel_id")
if access_token is None:
_LOGGER.error(
"Configuration Error: "
"Please make sure you have configured your access token "
"that can be aquired from https://my.eliq.se/user/settings/api")
return False
api = eliqonline.API(access_token)
add_devices([EliqSensor(api, channel_id, name)])
class EliqSensor(Entity):
""" Implements a Eliq sensor. """
def __init__(self, api, channel_id, name):
self._name = name
self._unit_of_measurement = "W"
self._state = STATE_UNKNOWN
self.api = api
self.channel_id = channel_id
self.update()
@property
def name(self):
""" Returns the name. """
return self._name
@property
def unit_of_measurement(self):
""" Unit of measurement of this entity, if any. """
return self._unit_of_measurement
@property
def state(self):
""" Returns the state of the device. """
return self._state
def update(self):
""" Gets the latest data """
response = self.api.get_data_now(channelid=self.channel_id)
self._state = int(response.power)
| mit | Python |
b2aa91648fe3ae915381e68cac95e5c3f6e5a182 | add zhihu_login.py | dnxbjyj/python-basic,dnxbjyj/python-basic,dnxbjyj/python-basic | spider/login/zhihu_login.py | spider/login/zhihu_login.py | # coding:utf-8
# 模拟账户登录 | mit | Python |
|
c505927ae756fe1740e8603aadf23dae0ad12ff5 | Create 01.CenturiesToMinutes.py | stoyanov7/SoftwareUniversity,stoyanov7/SoftwareUniversity,stoyanov7/SoftwareUniversity,stoyanov7/SoftwareUniversity | TechnologiesFundamentals/ProgrammingFundamentals/DataTypesAndVariables-Lab/01.CenturiesToMinutes.py | TechnologiesFundamentals/ProgrammingFundamentals/DataTypesAndVariables-Lab/01.CenturiesToMinutes.py | centuries = int(input())
years = centuries * 100
days = int(years * 365.2422)
hours = days * 24
minutes = hours * 60
print("%d centuries = %d years = %d days = %d hours %d = minutes" %
(centuries, years, days, hours, minutes))
| mit | Python |
|
dca52ab05af01b96610e5044c45bd7c0655c17ec | introduce an Exporter API | gopro/gopro-lib-node.gl,gopro/gopro-lib-node.gl,gopro/gopro-lib-node.gl,gopro/gopro-lib-node.gl | pynodegl-utils/pynodegl_utils/export.py | pynodegl-utils/pynodegl_utils/export.py | import os
import platform
import subprocess
import pynodegl as ngl
from PyQt5 import QtGui, QtCore
class _PipeThread(QtCore.QThread):
def __init__(self, fd, unused_fd, w, h, fps):
super(_PipeThread, self).__init__()
self.fd = fd
self.unused_fd = unused_fd
self.w, self.h, self.fps = w, h, fps
def run(self):
try:
ret = self.run_with_except()
except:
raise
finally:
# very important in order to prevent deadlock if one thread fails
# one way or another
os.close(self.fd)
class _ReaderThread(_PipeThread):
def __init__(self, fd, unused_fd, w, h, fps, filename):
super(_ReaderThread, self).__init__(fd, unused_fd, w, h, fps)
self._filename = filename
def run_with_except(self):
cmd = ['ffmpeg', '-r', str(self.fps),
'-nostats', '-nostdin',
'-f', 'rawvideo',
'-video_size', '%dx%d' % (self.w, self.h),
'-pixel_format', 'rgba',
'-i', 'pipe:%d' % self.fd,
'-vf', 'vflip',
'-y', self._filename]
#print 'Executing: ' + ' '.join(cmd)
# Closing the unused file descriptor of the pipe is mandatory between
# the fork() and the exec() of ffmpeg in order to prevent deadlocks
return subprocess.call(cmd, preexec_fn=lambda: os.close(self.unused_fd))
class Exporter(QtCore.QObject):
progressed = QtCore.pyqtSignal(int)
def export(self, scene, filename, w, h, duration, fps):
fd_r, fd_w = os.pipe()
from pynodegl import Pipe, Scale
scene = Pipe(scene, fd_w, w, h)
reader = _ReaderThread(fd_r, fd_w, w, h, fps, filename)
reader.start()
# Surface Format
gl_format = QtGui.QSurfaceFormat()
gl_format.setVersion(3, 3)
gl_format.setProfile(QtGui.QSurfaceFormat.CoreProfile)
gl_format.setDepthBufferSize(24);
gl_format.setStencilBufferSize(8);
gl_format.setAlphaBufferSize(8);
# GL context
glctx = QtGui.QOpenGLContext()
glctx.setFormat(gl_format)
assert glctx.create() == True
assert glctx.isValid() == True
# Offscreen Surface
surface = QtGui.QOffscreenSurface()
surface.setFormat(gl_format)
surface.create()
assert surface.isValid() == True
glctx.makeCurrent(surface)
# Framebuffer
fbo = QtGui.QOpenGLFramebufferObject(w, h)
fbo.setAttachment(QtGui.QOpenGLFramebufferObject.CombinedDepthStencil)
assert fbo.isValid() == True
fbo.bind()
# node.gl context
ngl_viewer = ngl.Viewer()
ngl_viewer.set_scene(scene)
if platform.system() == 'Linux':
ngl_viewer.set_window(ngl.GLPLATFORM_GLX, ngl.GLAPI_OPENGL3)
elif platform.system() == 'Darwin':
ngl_viewer.set_window(ngl.GLPLATFORM_CGL, ngl.GLAPI_OPENGL3)
ngl_viewer.set_viewport(0, 0, w, h)
# Draw every frame
nb_frame = int(fps * duration)
for i in range(nb_frame):
time = i / float(fps)
ngl_viewer.draw(time)
self.progressed.emit(i*100 / nb_frame)
glctx.swapBuffers(surface)
self.progressed.emit(100)
os.close(fd_w)
fbo.release()
glctx.doneCurrent()
reader.wait()
def test_export():
import sys
def _get_scene(duration):
from examples import misc
class DummyCfg: pass
cfg = DummyCfg()
cfg.duration = duration
return misc.triangle(cfg)
def print_progress(progress):
sys.stdout.write('\r%d%%' % progress)
if progress == 100:
sys.stdout.write('\n')
if len(sys.argv) != 2:
print 'Usage: %s <outfile>' % sys.argv[0]
sys.exit(0)
filename = sys.argv[1]
duration = 5
scene = _get_scene(duration)
app = QtGui.QGuiApplication(sys.argv)
exporter = Exporter()
exporter.progressed.connect(print_progress)
exporter.export(scene, filename, 320, 240, duration, 60)
if __name__ == '__main__':
test_export()
| apache-2.0 | Python |
|
ab9800183b3ab229782016aa3f88e6825467d01b | Add forgot username tests | renalreg/radar,renalreg/radar,renalreg/radar,renalreg/radar | api/radar_api/tests/test_forgot_username.py | api/radar_api/tests/test_forgot_username.py | def test_forgot_username(app):
client = app.test_client()
response = client.post('/forgot-username', data={
'email': 'foo@example.org'
})
assert response.status_code == 200
def test_email_missing(app):
client = app.test_client()
response = client.post('/forgot-username', data={})
assert response.status_code == 422
def test_user_not_found(app):
client = app.test_client()
response = client.post('/forgot-username', data={
'email': '404@example.org'
})
assert response.status_code == 422
| agpl-3.0 | Python |
|
9b9fb4df30a8183c4de9f157200c5ff225d11d67 | Add the plot script | liveontologies/elk-justifications,liveontologies/elk-justifications,liveontologies/elk-justifications,liveontologies/elk-justifications | src/scripts/prepare_gnuplot.py | src/scripts/prepare_gnuplot.py | #!/usr/bin/python
import sys
import argparse
import csv
from string import Template
parser = argparse.ArgumentParser(description='Prepare gnuplot script from the supplied data files.')
parser.add_argument('files', nargs='+', help='The data files.')
MIN_Y_RANGE = 0.000001
GNUPLOT_SCRIPT_TEMPLATE = Template("""
reset
set terminal lua tikz latex
set output "plot.tex"
#set title "tau_m"
set style data lines
set key left top
set logscale y
#set tics axis
#shrink = 0.1
set xrange[0:100]
set yrange[${lower_y_range}:${upper_y_range}]
#set xtics shrink/2
#set ytics shrink/2
#set size square
set xlabel "\\\\% of queries"
set ylabel "time in seconds"
plot ${plot_cmd}
${data}
pause -1
""")
if __name__ == "__main__":
args = parser.parse_args()
plot_cmd = ""
data_string = ""
min_data = sys.float_info.max
max_data = sys.float_info.min
for data_file in args.files:
plot_cmd += """'-' title "%s", """ % data_file
with open(data_file) as fd:
reader = csv.reader(fd, delimiter=',', quotechar='"')
header = reader.next()
# print header
# time_index = header.index('time')
time_index = 0
for h in header:
if h.find('time') >= 0:
break
time_index += 1
data = []
for line in reader:
data.append(float(line[time_index])/1000)
data.sort()
if data[0] < min_data:
min_data = data[0]
if data[len(data) - 1] > max_data:
max_data = data[len(data) - 1]
step = 100.0/len(data)
x = step
for d in data:
data_string += "%f\t%f\n" % (x, d)
x += step
data_string += "e\n"
pass
min_data = max(min_data, MIN_Y_RANGE)
print GNUPLOT_SCRIPT_TEMPLATE.substitute(plot_cmd=plot_cmd, data=data_string, lower_y_range=min_data, upper_y_range=max_data)
pass
| apache-2.0 | Python |
|
6f1729a96e1fe0f4e8d00813b6c2829519cdc15f | Handle the case where we encounter a snap shot correctly. | n0ano/ganttclient | nova/tests/test_image_utils.py | nova/tests/test_image_utils.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import test
from nova import utils
from nova.virt import images
class ImageUtilsTestCase(test.TestCase):
def test_qemu_info(self):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
blah BLAH: bb
"""
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
image_info = images.qemu_img_info(path)
self.assertEquals('disk.config', image_info['image'])
self.assertEquals('raw', image_info['file format'])
self.assertEquals('64M (67108864 bytes)', image_info['virtual size'])
self.assertEquals('96K', image_info['disk size'])
self.assertEquals('bb', image_info['blah blah'])
self.assertEquals("65536", image_info['cluster_size'])
def test_qemu_info_snap(self):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
"""
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
image_info = images.qemu_img_info(path)
self.assertEquals('disk.config', image_info['image'])
self.assertEquals('raw', image_info['file format'])
self.assertEquals('64M (67108864 bytes)', image_info['virtual size'])
self.assertEquals('96K', image_info['disk size'])
self.assertEquals("65536", image_info['cluster_size'])
# This would be triggered if the split encountered this section
self.assertNotIn('snapshot list', image_info)
bad_cap = '1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10'
self.assertNotIn(bad_cap, image_info)
| apache-2.0 | Python |
|
27a3ca8d746890c7404845d18b8031763ec6b6a7 | add netcat-nonblock.py | chenshuo/recipes,chenshuo/recipes,chenshuo/recipes,chenshuo/recipes,chenshuo/recipes,chenshuo/recipes | python/netcat-nonblock.py | python/netcat-nonblock.py | #!/usr/bin/python
import errno
import fcntl
import os
import select
import socket
import sys
def setNonBlocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def nonBlockingWrite(fd, data):
try:
nw = os.write(fd, data)
return nw
except OSError as e:
if e.errno == errno.EWOULDBLOCK:
return -1
def relay(sock):
socketEvents = select.POLLIN
poll = select.poll()
poll.register(sock, socketEvents)
poll.register(sys.stdin, select.POLLIN)
setNonBlocking(sock)
# setNonBlocking(sys.stdin)
# setNonBlocking(sys.stdout)
done = False
stdoutOutputBuffer = ''
socketOutputBuffer = ''
while not done:
events = poll.poll(10000) # 10 seconds
for fileno, event in events:
if event & select.POLLIN:
if fileno == sock.fileno():
data = sock.recv(8192)
if data:
nw = sys.stdout.write(data) # stdout does support non-blocking write, though
else:
done = True
else:
assert fileno == sys.stdin.fileno()
data = os.read(fileno, 8192)
if data:
assert len(socketOutputBuffer) == 0
nw = nonBlockingWrite(sock.fileno(), data)
if nw < len(data):
if nw < 0:
nw = 0
socketOutputBuffer = data[nw:]
socketEvents |= select.POLLOUT
poll.register(sock, socketEvents)
poll.unregister(sys.stdin)
else:
sock.shutdown(socket.SHUT_WR)
poll.unregister(sys.stdin)
if event & select.POLLOUT:
if fileno == sock.fileno():
assert len(socketOutputBuffer) > 0
nw = nonBlockingWrite(sock.fileno(), data)
if nw < len(data):
assert nw > 0
socketOutputBuffer = socketOutputBuffer[nw:]
else:
socketOutputBuffer = ''
socketEvents &= ~select.POLLOUT
poll.register(sock, socketEvents)
poll.register(sys.stdin, select.POLLIN)
def main(argv):
if len(argv) < 3:
binary = argv[0]
print "Usage:\n %s -l port\n %s host port" % (argv[0], argv[0])
print (sys.stdout.write)
return
port = int(argv[2])
if argv[1] == "-l":
# server
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('', port))
server_socket.listen(5)
(client_socket, client_address) = server_socket.accept()
server_socket.close()
relay(client_socket)
else:
# client
sock = socket.create_connection((argv[1], port))
relay(sock)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause | Python |
|
34fd215d73d87c017cdae299aebd6484e6541991 | Revert 176254 > Android: upgrade sandbox_linux_unitests to a stable test > > > BUG=166704 > NOTRY=true > > Review URL: https://chromiumcodereview.appspot.com/11783106 | Chilledheart/chromium,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,fujunwei/chromium-crosswalk,M4sse/chromium.src,zcbenz/cefode-chromium,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,M4sse/chromium.src,hujiajie/pa-chromium,anirudhSK/chromium,dushu1203/chromium.src,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,jaruba/chromium.src,pozdnyakov/chromium-crosswalk,Fireblend/chromium-crosswalk,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,markYoungH/chromium.src,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,Fireblend/chromium-crosswalk,zcbenz/cefode-chromium,TheTypoMaster/chromium-crosswalk,M4sse/chromium.src,zcbenz/cefode-chromium,littlstar/chromium.src,anirudhSK/chromium,anirudhSK/chromium,anirudhSK/chromium,zcbenz/cefode-chromium,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,ltilve/chromium,axinging/chromium-crosswalk,zcbenz/cefode-chromium,krieger-od/nwjs_chromium.src,M4sse/chromium.src,markYoungH/chromium.src,axinging/chromium-crosswalk,pozdnyakov/chromium-crosswalk,Fireblend/chromium-crosswalk,markYoungH/chromium.src,ltilve/chromium,PeterWangIntel/chromium-crosswalk,hujiajie/pa-chromium,jaruba/chromium.src,pozdnyakov/chromium-crosswalk,chuan9/chromium-crosswalk,hujiajie/pa-chromium,pozdnyakov/chromium-crosswalk,mogoweb/chromium-crosswalk,dednal/chromium.src,nacl-webkit/chrome_deps,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,zcbenz/cefode-chromium,Just-D/chromium-1,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,nacl-webkit/chrome_deps,hujiajie/pa-chromium,dushu1203/chromium.src,jaruba/chromium.src,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,dushu1203/chromium.src,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,ltilve/chromium,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,mogoweb/chromium-crosswalk,Just-D/chromium-1,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,timopulkkinen/BubbleFish,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,nacl-webkit/chrome_deps,hgl888/chromium-crosswalk,anirudhSK/chromium,patrickm/chromium.src,mogoweb/chromium-crosswalk,hujiajie/pa-chromium,hujiajie/pa-chromium,nacl-webkit/chrome_deps,timopulkkinen/BubbleFish,anirudhSK/chromium,nacl-webkit/chrome_deps,ondra-novak/chromium.src,axinging/chromium-crosswalk,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,nacl-webkit/chrome_deps,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,jaruba/chromium.src,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,pozdnyakov/chromium-crosswalk,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,jaruba/chromium.src,littlstar/chromium.src,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,nacl-webkit/chrome_deps,markYoungH/chromium.src,patrickm/chromium.src,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,ChromiumWebApps/chromium,axinging/chromium-crosswalk,dushu1203/chromium.src,ChromiumWebApps/chromium,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,timopulkkinen/BubbleFish,mogoweb/chromium-crosswalk,patrickm/chromium.src,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,ChromiumWebApps/chromium,Just-D/chromium-1,fujunwei/chromium-crosswalk,fujunwei/chromium-crosswalk,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,ondra-novak/chromium.src,dednal/chromium.src,pozdnyakov/chromium-crosswalk,chuan9/chromium-crosswalk,ondra-novak/chromium.src,M4sse/chromium.src,dednal/chromium.src,Jonekee/chromium.src,Jonekee/chromium.src,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,jaruba/chromium.src,ltilve/chromium,patrickm/chromium.src,Chilledheart/chromium,hujiajie/pa-chromium,ChromiumWebApps/chromium,nacl-webkit/chrome_deps,PeterWangIntel/chromium-crosswalk,fujunwei/chromium-crosswalk,Chilledheart/chromium,nacl-webkit/chrome_deps,timopulkkinen/BubbleFish,hujiajie/pa-chromium,timopulkkinen/BubbleFish,axinging/chromium-crosswalk,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,Just-D/chromium-1,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,Chilledheart/chromium,ChromiumWebApps/chromium,Chilledheart/chromium,mogoweb/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,littlstar/chromium.src,ltilve/chromium,krieger-od/nwjs_chromium.src,hujiajie/pa-chromium,dednal/chromium.src,ondra-novak/chromium.src,dushu1203/chromium.src,mogoweb/chromium-crosswalk,Just-D/chromium-1,mogoweb/chromium-crosswalk,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,nacl-webkit/chrome_deps,hujiajie/pa-chromium,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,timopulkkinen/BubbleFish,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,hgl888/chromium-crosswalk,mogoweb/chromium-crosswalk,pozdnyakov/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,anirudhSK/chromium,zcbenz/cefode-chromium,M4sse/chromium.src,ChromiumWebApps/chromium,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,pozdnyakov/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,ChromiumWebApps/chromium,timopulkkinen/BubbleFish,dushu1203/chromium.src,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,mogoweb/chromium-crosswalk,littlstar/chromium.src,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,Fireblend/chromium-crosswalk,littlstar/chromium.src,dednal/chromium.src,ltilve/chromium,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,nacl-webkit/chrome_deps,timopulkkinen/BubbleFish,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,dednal/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,zcbenz/cefode-chromium,Just-D/chromium-1,Chilledheart/chromium,hgl888/chromium-crosswalk,patrickm/chromium.src,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,littlstar/chromium.src,dednal/chromium.src,timopulkkinen/BubbleFish,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,zcbenz/cefode-chromium,chuan9/chromium-crosswalk,Jonekee/chromium.src,zcbenz/cefode-chromium,zcbenz/cefode-chromium,pozdnyakov/chromium-crosswalk,hgl888/chromium-crosswalk,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,hujiajie/pa-chromium,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,patrickm/chromium.src,dushu1203/chromium.src,chuan9/chromium-crosswalk,patrickm/chromium.src,ltilve/chromium,Fireblend/chromium-crosswalk,mogoweb/chromium-crosswalk,chuan9/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,dednal/chromium.src,timopulkkinen/BubbleFish,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,Just-D/chromium-1,Just-D/chromium-1,timopulkkinen/BubbleFish,crosswalk-project/chromium-crosswalk-efl | build/android/pylib/gtest/gtest_config.py | build/android/pylib/gtest/gtest_config.py | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Configuration file for android gtest suites."""
# Add new suites here before upgrading them to the stable list below.
EXPERIMENTAL_TEST_SUITES = [
'sandbox_linux_unittests',
]
# Do not modify this list without approval of an android owner.
# This list determines which suites are run by default, both for local
# testing and on android trybots running on commit-queue.
STABLE_TEST_SUITES = [
'base_unittests',
'cc_unittests',
'content_unittests',
'gpu_unittests',
'ipc_tests',
'media_unittests',
'net_unittests',
'sql_unittests',
'sync_unit_tests',
'ui_unittests',
'unit_tests',
'webkit_compositor_bindings_unittests',
'android_webview_unittests',
]
| # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Configuration file for android gtest suites."""
# Add new suites here before upgrading them to the stable list below.
EXPERIMENTAL_TEST_SUITES = [
]
# Do not modify this list without approval of an android owner.
# This list determines which suites are run by default, both for local
# testing and on android trybots running on commit-queue.
STABLE_TEST_SUITES = [
'base_unittests',
'cc_unittests',
'content_unittests',
'gpu_unittests',
'ipc_tests',
'media_unittests',
'net_unittests',
'sandbox_linux_unittests',
'sql_unittests',
'sync_unit_tests',
'ui_unittests',
'unit_tests',
'webkit_compositor_bindings_unittests',
'android_webview_unittests',
]
| bsd-3-clause | Python |
4783f68cb2fc3eb25b611b4662d1ca3a2fddd7b3 | Fix issue #51: added throttled_ftpd.py script in the demo directory. | giampaolo/pyftpdlib | demo/throttled_ftpd.py | demo/throttled_ftpd.py | #!/usr/bin/env python
# throttled_ftpd.py
"""ftpd supporting bandwidth throttling capabilities for data transfer.
"""
import os
import time
import asyncore
from pyftpdlib import ftpserver
class ThrottledDTPHandler(ftpserver.DTPHandler):
"""A DTPHandler which wraps sending and receiving in a data counter and
sleep loop so that you burst to no more than x Kb/sec average.
"""
# maximum number of bytes to transmit in a second (0 == no limit)
read_limit = 0
write_limit = 0
# smaller the buffers, the less bursty and smoother the throughput
ac_in_buffer_size = 2048
ac_out_buffer_size = 2048
def __init__(self, sock_obj, cmd_channel):
ftpserver.DTPHandler.__init__(self, sock_obj, cmd_channel)
self.timenext = 0
self.datacount = 0
self.sleep = None
# --- overridden asyncore methods
def readable(self):
return self.receive and not self.sleeping()
def writable(self):
return (self.producer_fifo or (not self.connected)) and not \
self.sleeping()
def recv(self, buffer_size):
chunk = asyncore.dispatcher.recv(self, buffer_size)
if self.read_limit:
self.throttle_bandwidth(len(chunk), self.read_limit)
return chunk
def send(self, data):
num_sent = asyncore.dispatcher.send(self, data)
if self.write_limit:
self.throttle_bandwidth(num_sent, self.write_limit)
return num_sent
# --- new methods
def sleeping(self):
"""Return True if the channel is temporary blocked."""
if self.sleep:
if time.time() >= self.sleep:
self.sleep = None
else:
return True
return False
def throttle_bandwidth(self, len_chunk, max_speed):
"""A method which counts data transmitted so that you burst to
no more than x Kb/sec average."""
self.datacount += len_chunk
if self.datacount >= max_speed:
self.datacount = 0
now = time.time()
sleepfor = self.timenext - now
if sleepfor > 0:
# we've passed bandwidth limits
self.sleep = now + (sleepfor * 2)
self.timenext = now + 1
if __name__ == '__main__':
authorizer = ftpserver.DummyAuthorizer()
authorizer.add_user('user', '12345', os.getcwd(), perm=('r', 'w'))
# use the modified DTPHandler class; set a speed
# limit for both sending and receiving
dtp_handler = ThrottledDTPHandler
dtp_handler.read_limit = 30072 # 30 Kb/sec (30 * 1024)
dtp_handler.write_limit = 30072 # 30 Kb/sec (30 * 1024)
ftp_handler = ftpserver.FTPHandler
ftp_handler.authorizer = authorizer
# have the ftp handler use the different dtp handler
ftp_handler.dtp_handler = dtp_handler
ftpd = ftpserver.FTPServer(('127.0.0.1', 21), ftp_handler)
ftpd.serve_forever()
| mit | Python |
|
732fd24d06f49570c24016b7adfb3ad511e2e6af | Add test for ValidationResultIdentifier.to_tuple() | great-expectations/great_expectations,great-expectations/great_expectations,great-expectations/great_expectations,great-expectations/great_expectations | tests/data_context/test_data_context_resource_identifiers.py | tests/data_context/test_data_context_resource_identifiers.py | from great_expectations.data_context.types.resource_identifiers import (
ValidationResultIdentifier
)
def test_ValidationResultIdentifier_to_tuple(expectation_suite_identifier):
validation_result_identifier = ValidationResultIdentifier(
expectation_suite_identifier,
"my_run_id",
"my_batch_identifier"
)
assert validation_result_identifier.to_tuple() == (
"my", "expectation", "suite", "name",
"my_run_id",
"my_batch_identifier"
)
assert validation_result_identifier.to_fixed_length_tuple() == (
"my.expectation.suite.name",
"my_run_id",
"my_batch_identifier"
)
validation_result_identifier_no_run_id = ValidationResultIdentifier(
expectation_suite_identifier,
None,
"my_batch_identifier"
)
assert validation_result_identifier_no_run_id.to_tuple() == (
"my", "expectation", "suite", "name",
"__none__",
"my_batch_identifier"
)
assert validation_result_identifier_no_run_id.to_fixed_length_tuple() == (
"my.expectation.suite.name",
"__none__",
"my_batch_identifier"
)
validation_result_identifier_no_batch_identifier = ValidationResultIdentifier(
expectation_suite_identifier,
"my_run_id",
None
)
assert validation_result_identifier_no_batch_identifier.to_tuple() == (
"my", "expectation", "suite", "name",
"my_run_id",
"__none__"
)
assert validation_result_identifier_no_batch_identifier.to_fixed_length_tuple() == (
"my.expectation.suite.name",
"my_run_id",
"__none__"
)
validation_result_identifier_no_run_id_no_batch_identifier = ValidationResultIdentifier(
expectation_suite_identifier,
None,
None
)
assert validation_result_identifier_no_run_id_no_batch_identifier.to_tuple() == (
"my", "expectation", "suite", "name",
"__none__",
"__none__"
)
assert validation_result_identifier_no_run_id_no_batch_identifier.to_fixed_length_tuple() == (
"my.expectation.suite.name",
"__none__",
"__none__"
) | apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.