commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
8e1a3cc1a3d4e4d9bc63fb73a8787e5c627afb7d
add tests for service inspector
mylokin/servy
tests/test_service_inspector.py
tests/test_service_inspector.py
from __future__ import absolute_import import unittest import servy.server class Dummy(object): def fn(self): pass class Service(servy.server.Service): def __call__(self): pass class ServiceDetection(unittest.TestCase): def test_lambda(self): self.assertTrue(servy.server.ServiceInspector.is_service(lambda x: x)) def test_method(self): self.assertTrue(servy.server.ServiceInspector.is_service(Dummy().fn)) def test_callable_class_service(self): self.assertTrue(servy.server.ServiceInspector.is_service(Service())) def test_type(self): self.assertFalse(servy.server.ServiceInspector.is_service(dict)) def test_int(self): self.assertFalse(servy.server.ServiceInspector.is_service(1)) def test_string(self): self.assertFalse(servy.server.ServiceInspector.is_service("1")) def test_dummy_class(self): self.assertFalse(servy.server.ServiceInspector.is_service(Dummy)) class ContainerDetection(unittest.TestCase): def test_dict(self): self.assertTrue(servy.server.ServiceInspector.is_container({})) def test_service_class(self): self.assertTrue(servy.server.ServiceInspector.is_container(Service)) def test_service_class_instance(self): self.assertTrue(servy.server.ServiceInspector.is_container(Service())) def test_dummy_class(self): self.assertFalse(servy.server.ServiceInspector.is_container(Dummy)) class PublicMethodsDetection(unittest.TestCase): def test_double_underscores(self): items = { '__private': None, } self.assertEqual( servy.server.ServiceInspector.get_public(items.items()), {}, ) def test_single_underscores(self): items = { '_private': None, } self.assertEqual( servy.server.ServiceInspector.get_public(items.items()), {}, )
mit
Python
43de875bcb2dcf4213b881ff1de8f9e715fb2d30
Add brute_force.py
emschorsch/lineup-optimizer
brute_force.py
brute_force.py
from battingorder import * from itertools import permutations if __name__ == "__main__": parser = argparse.ArgumentParser(description='Brute force.') parser.add_argument("filename", nargs='?', default='braves.data', help="file with necessary statistics") args = parser.parse_args() player_matrices = readdata(args.filename) run_matrix = createrunmatrix() start_order = range(9) samples = [] for order in permutations(start_order): score = calculate(order, player_matrices, run_matrix) samples.append((score, order)) samples.sort(reverse=True) best = samples[0] print("Final ordering: {}".format(best[1])) print("This lineup will score an average of {} runs per game.".format(best[0]))
mit
Python
b013f059a5d39acf05ba8e5ef9d6cb1d9e3f724c
add a script to exercise the example jsonrpc methods
pijyoi/jsonrpc,pijyoi/jsonrpc
tester.py
tester.py
import zmq class JRPC: def __init__(self): self.id = 0 def make_req(self, method, params): req = {"jsonrpc":"2.0", "method":method, "params":params, "id":self.id} self.id += 1 return req zctx = zmq.Context.instance() zsock = zctx.socket(zmq.REQ) zsock.connect("tcp://127.0.0.1:10000") jrpc = JRPC() req = jrpc.make_req("echo", [10, 5]) zsock.send_json(req) print zsock.recv() req = jrpc.make_req("subtract", {"minuend":10, "subtrahend":5}) zsock.send_json(req) print zsock.recv() req = jrpc.make_req("subtract", [10, 5]) zsock.send_json(req) print zsock.recv() req_array = [] for k in range(10): req = jrpc.make_req("sum", range(1+k)) req_array.append(req) zsock.send_json(req_array) print zsock.recv()
mit
Python
061dcecdd7b691cefd34c8a254037a399b251378
add a new script to build a pypi 'simple' index from a dir containing wheels
antocuni/pypy-wheels,antocuni/pypy-wheels
build_index.py
build_index.py
import sys import py PACKAGES = [ 'netifaces', ] class IndexBuilder(object): def __init__(self, wheeldir, outdir): self.wheeldir = py.path.local(wheeldir) self.outdir = py.path.local(outdir) self.packages = [] def copy_wheels(self): for whl in self.wheeldir.visit('*.whl'): name, version = self.parse(whl) self.packages.append(name) d = self.outdir.join(name).ensure(dir=True) dst = d.join(whl.basename) if dst.check(file=False): whl.copy(d) def build_index(self): self._write_index(self.outdir, 'PyPy Wheel Index', self.packages) for pkg in self.packages: d = self.outdir.join(pkg) wheels = [whl.basename for whl in d.listdir('*.whl')] self._write_index(d, 'Links for %s' % pkg, wheels) def parse(self, f): name, version, _ = f.basename.split('-', 2) return name, version def _write_index(self, d, title, links): lines = [ '<html><body><h1>{title}</h1>'.format(title=title) ] for name in links: line = '<a href="{name}">{name}</a>'.format(name=name) lines.append(line) lines.append('</body></html>') html = '\n'.join(lines) d.join('index.html').write(html) def main(): wheeldir = sys.argv[1] outdir = sys.argv[2] index = IndexBuilder(wheeldir, outdir) index.copy_wheels() index.build_index() if __name__ == '__main__': main()
mit
Python
b22bf4e2431ac3598d9c8afee3f924d940e2297e
Create building_df.py
algoix/blog
building_df.py
building_df.py
"""Utility functions""" import os import pandas as pd def symbol_to_path(symbol, base_dir="data"): """Return CSV file path given ticker symbol.""" return os.path.join(base_dir, "{}.csv".format(str(symbol))) def get_data(symbols, dates): """Read stock data (adjusted close) for given symbols from CSV files.""" df = pd.DataFrame(index=dates) if 'SPY' not in symbols: # add SPY for reference, if absent symbols.insert(0, 'SPY') for symbol in symbols: #Read and join data for each symbol df.join(symbol,'inner') return df def test_run(): # Define a date range dates = pd.date_range('2010-01-22', '2010-01-26') # Choose stock symbols to read symbols = ['GOOG', 'IBM', 'GLD'] # Get stock data df = get_data(symbols, dates) print df if __name__ == "__main__": test_run()
mit
Python
7d84cf8c41105d9990b8cfdf176415f1bcb20e0f
Add tests for batch norm
explosion/thinc,spacy-io/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc
thinc/tests/integration/test_batch_norm.py
thinc/tests/integration/test_batch_norm.py
import pytest from mock import MagicMock import numpy import numpy.random from numpy.testing import assert_allclose from hypothesis import given, settings, strategies from ...neural._classes.batchnorm import BatchNorm from ...api import layerize, noop from ...neural._classes.affine import Affine from ..strategies import arrays_OI_O_BI from ..util import get_model, get_shape @pytest.fixture def shape(): return (10, 20) @pytest.fixture def layer(shape): dummy = layerize(noop()) dummy.nO = shape[-1] return dummy def test_batch_norm_init(layer): layer = BatchNorm(layer) def test_batch_norm_weights_init_to_one(layer): layer = BatchNorm(layer) assert layer.G is not None assert all(weight == 1. for weight in layer.G.flatten()) def test_batch_norm_runs_child_hooks(layer): mock_hook = MagicMock() layer.on_data_hooks.append(mock_hook) layer = BatchNorm(layer) for hook in layer.on_data_hooks: hook(layer, None) mock_hook.assert_called() def test_batch_norm_predict_maintains_shape(layer, shape): input_ = numpy.ones(shape) input1 = layer.predict(input_) assert_allclose(input1, input_) layer = BatchNorm(layer) output = layer.predict(input_) assert output.shape == input_.shape @given(arrays_OI_O_BI(max_batch=8, max_out=8, max_in=8)) def test_begin_update_matches_predict(W_b_input): model = get_model(W_b_input) nr_batch, nr_out, nr_in = get_shape(W_b_input) W, b, input_ = W_b_input model = BatchNorm(model) fwd_via_begin_update, finish_update = model.begin_update(input_) fwd_via_predict_batch = model.predict(input_) assert_allclose(fwd_via_begin_update, fwd_via_predict_batch) @given(arrays_OI_O_BI(max_batch=8, max_out=8, max_in=8)) def test_finish_update_calls_optimizer_with_weights(W_b_input): model = get_model(W_b_input) nr_batch, nr_out, nr_in = get_shape(W_b_input) W, b, input_ = W_b_input model = BatchNorm(model) output, finish_update = model.begin_update(input_) seen_keys = set() def sgd(data, gradient, key=None, **kwargs): seen_keys.add(key) assert data.shape == gradient.shape assert data.ndim == 1 assert gradient.ndim == 1 grad_BO = numpy.ones((nr_batch, nr_out)) grad_BI = finish_update(grad_BO, sgd) assert seen_keys == {id(model._mem), id(model.child._mem)}
mit
Python
cca6eee8dbf4dda84c74dfedef1cf4bcb5264ca5
Add the first database revision
kaiyou/freeposte.io,kaiyou/freeposte.io,kaiyou/freeposte.io,kaiyou/freeposte.io
admin/migrations/versions/ff0417f4318f_.py
admin/migrations/versions/ff0417f4318f_.py
""" Initial schema Revision ID: ff0417f4318f Revises: None Create Date: 2016-06-25 13:07:11.132070 """ # revision identifiers, used by Alembic. revision = 'ff0417f4318f' down_revision = None from alembic import op import sqlalchemy as sa def upgrade(): op.create_table('domain', sa.Column('created_at', sa.Date(), nullable=False), sa.Column('updated_at', sa.Date(), nullable=True), sa.Column('comment', sa.String(length=255), nullable=True), sa.Column('name', sa.String(length=80), nullable=False), sa.Column('max_users', sa.Integer(), nullable=False), sa.Column('max_aliases', sa.Integer(), nullable=False), sa.PrimaryKeyConstraint('name') ) op.create_table('alias', sa.Column('created_at', sa.Date(), nullable=False), sa.Column('updated_at', sa.Date(), nullable=True), sa.Column('comment', sa.String(length=255), nullable=True), sa.Column('localpart', sa.String(length=80), nullable=False), sa.Column('destination', sa.String(), nullable=False), sa.Column('domain_name', sa.String(length=80), nullable=False), sa.Column('email', sa.String(length=255), nullable=False), sa.ForeignKeyConstraint(['domain_name'], ['domain.name'], ), sa.PrimaryKeyConstraint('email') ) op.create_table('user', sa.Column('created_at', sa.Date(), nullable=False), sa.Column('updated_at', sa.Date(), nullable=True), sa.Column('comment', sa.String(length=255), nullable=True), sa.Column('localpart', sa.String(length=80), nullable=False), sa.Column('password', sa.String(length=255), nullable=False), sa.Column('quota_bytes', sa.Integer(), nullable=False), sa.Column('global_admin', sa.Boolean(), nullable=False), sa.Column('enable_imap', sa.Boolean(), nullable=False), sa.Column('enable_pop', sa.Boolean(), nullable=False), sa.Column('forward_enabled', sa.Boolean(), nullable=False), sa.Column('forward_destination', sa.String(length=255), nullable=True), sa.Column('reply_enabled', sa.Boolean(), nullable=False), sa.Column('reply_subject', sa.String(length=255), nullable=True), sa.Column('reply_body', sa.Text(), nullable=True), sa.Column('displayed_name', sa.String(length=160), nullable=False), sa.Column('spam_enabled', sa.Boolean(), nullable=False), sa.Column('spam_threshold', sa.Numeric(), nullable=False), sa.Column('domain_name', sa.String(length=80), nullable=False), sa.Column('email', sa.String(length=255), nullable=False), sa.ForeignKeyConstraint(['domain_name'], ['domain.name'], ), sa.PrimaryKeyConstraint('email') ) op.create_table('fetch', sa.Column('created_at', sa.Date(), nullable=False), sa.Column('updated_at', sa.Date(), nullable=True), sa.Column('comment', sa.String(length=255), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('user_email', sa.String(length=255), nullable=False), sa.Column('protocol', sa.Enum('imap', 'pop3'), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), sa.Column('port', sa.Integer(), nullable=False), sa.Column('tls', sa.Boolean(), nullable=False), sa.Column('username', sa.String(length=255), nullable=False), sa.Column('password', sa.String(length=255), nullable=False), sa.ForeignKeyConstraint(['user_email'], ['user.email'], ), sa.PrimaryKeyConstraint('id') ) op.create_table('manager', sa.Column('domain_name', sa.String(length=80), nullable=True), sa.Column('user_email', sa.String(length=255), nullable=True), sa.ForeignKeyConstraint(['domain_name'], ['domain.name'], ), sa.ForeignKeyConstraint(['user_email'], ['user.email'], ) ) def downgrade(): op.drop_table('manager') op.drop_table('fetch') op.drop_table('user') op.drop_table('alias') op.drop_table('domain')
mit
Python
7473384155edbf85304cc541325d0a94a75d2cf4
Add converting script
edosedgar/stm32f0_ARM,edosedgar/stm32f0_ARM,edosedgar/stm32f0_ARM,edosedgar/stm32f0_ARM
labs/12_i2c_oled_display/convert.py
labs/12_i2c_oled_display/convert.py
import imageio import sys import os import numpy as np if (len(sys.argv) != 2): print("Format: python convert.py grayscale_image_name") sys.exit(1) try: data = imageio.imread(sys.argv[1]) except: print("Wrong image name!") sys.exit(1) if (len(data.shape) != 2): print("Image must be grayscale!") sys.exit(1) output = open(os.path.splitext(sys.argv[1])[0] + ".c", "w") output.write("const unsigned char my_pic[] = {\n") image = data.flatten(order='C') fimage = np.array_split(image, image.shape[0]//16) for chunk in fimage: fstr = ', '.join(['0x%02x'%x for x in chunk]) output.write(" " + fstr) output.write(",\n") output.write("}") output.close() print("Done! The array is stored in " +\ os.path.splitext(sys.argv[1])[0] + ".c")
mit
Python
3db7c5502bcba0adbfbcf6649c0b4179b37cd74a
Create redis_board.py
streed/simpleRaft
simpleRaft/boards/redis_board.py
simpleRaft/boards/redis_board.py
import redis from board import Board class RedisBoard( Board ): """This will create a message board that is backed by Redis.""" def __init__( self, *args, **kwargs ): """Creates the Redis connection.""" self.redis = redis.Redis( *args, **kwargs ) def set_owner( self, owner ): self.owner = owner def post_message( self, message ): """This will append the message to the list.""" pass def get_message( self ): """This will pop a message off the list.""" pass def _key( self ): if not self.key: self.key = "%s-queue" % self.owner return self.key
mit
Python
69fbab70f09f83e763f9af7ff02d028af62d8d89
Create weighted_4_node_probability_convergence.py
smellydog521/classicPlayParsing,smellydog521/classicPlayParsing
weighted_4_node_probability_convergence.py
weighted_4_node_probability_convergence.py
# statistics on convergence_weighted_4_node.txt # output into a csv file import re,sys, numpy as np, pandas as pd from pandas import Series, DataFrame def main(argv): author = '' play = '' sub = [] play_subgraph=Series() l='' subgraph = '' subgraphs = [] pro = 0.0 pros = [] f = open('./convergence_weighted_4_node.txt','r') fi = open('./convergence_weighted_4_node.csv','w') # first to get the full index of subgraphs for line in f: if '*:' in line or '-:' in line: continue l = re.split(':',line.strip()) subgraph = l[0] if subgraph not in sub: sub.append(subgraph) df = DataFrame(index=sub) f.seek(0) for line in f: if '*:' in line: author = line[10:12] elif '-:' in line: if play!='': play_subgraph = Series(pros,index=subgraphs) #play_subgraph=Series(sub_pro,index=sub,dtype=float) play_subgraph.name=author+':'+play play_subgraph.index.name='probability' df[play_subgraph.name]=play_subgraph #if author=='Sh': # print 'play_subgraph.name = '+play_subgraph.name # print play_subgraph # print 'df' # print df[play_subgraph.name] play = re.split('-',line)[6] subgraphs = [] pros = [] else: l = re.split(':',line.strip()) subgraph = l[0] pro = float(l[-1]) subgraphs.append(subgraph) pros.append(pro) #sub_pro[subgraph] = pro print 'sub has '+str(len(sub))+' lines.' #df.fillna(0) #print df df.to_csv(fi) #print sub if __name__ == '__main__': main(sys.argv)
apache-2.0
Python
f414c122eea771da74efc5837b7bd650ec022445
normalise - adds new ffv1/mkv script
kieranjol/IFIscripts
normalise.py
normalise.py
#!/usr/bin/env python ''' Performs normalisation to FFV1/Matroska. This performs a basic normalisation and does not enforce any folder structure. This supercedes makeffv1 within our workflows. This is mostly because makeffv1 imposes a specific, outdated folder structure, and it's best to let SIPCREATOR handle the folder structure and let normalise.py handle the actual normalisation. ''' import sys import os import subprocess import ififuncs def extract_provenance(filename): ''' This will extract mediainfo and mediatrace XML ''' parent_folder = os.path.dirname(filename) inputxml = "%s/%s_mediainfo.xml" % (parent_folder, os.path.basename(filename)) inputtracexml = "%s/%s_mediatrace.xml" % (parent_folder, os.path.basename(filename)) print(' - Generating mediainfo xml of input file and saving it in %s' % inputxml) ififuncs.make_mediainfo(inputxml, 'mediaxmlinput', filename) print ' - Generating mediatrace xml of input file and saving it in %s' % inputtracexml ififuncs.make_mediatrace(inputtracexml, 'mediatracexmlinput', filename) return parent_folder def normalise_process(filename): ''' Begins the actual normalisation process using FFmpeg ''' output_uuid = ififuncs.create_uuid() print(' - The following UUID has been generated: %s' % output_uuid) parent_folder = os.path.dirname(filename) output = "%s/%s.mkv" % ( parent_folder, output_uuid ) print(' - The normalise file will have this filename: %s' % output) fmd5 = "%s/%s_source.framemd5" % ( parent_folder, os.path.basename(filename) ) print(' - Framemd5s for each frame of your input file will be stored in: %s' % fmd5) ffv1_logfile = os.path.join(parent_folder, '%s_normalise.log' % output_uuid) print(' - The FFmpeg logfile for the transcode will be stored in: %s' % ffv1_logfile) print(' - FFmpeg will begin normalisation now.') ffv1_env_dict = ififuncs.set_environment(ffv1_logfile) ffv1_command = [ 'ffmpeg', '-i', filename, '-c:v', 'ffv1', # Use FFv1 codec '-g', '1', # Use intra-frame only aka ALL-I aka GOP=1 '-level', '3', # Use Version 3 of FFv1 '-c:a', 'copy', # Copy and paste audio bitsream with no transcoding '-map', '0', '-dn', '-report', '-slicecrc', '1', '-slices', '16', ] if ififuncs.check_for_fcp(filename) is True: print(' - A 720/576 file with no Pixel Aspect Ratio and scan type metadata has been detected.') ffv1_command += [ '-vf', 'setfield=tff, setdar=4/3' ] print(' - -vf setfield=tff, setdar=4/3 will be added to the FFmpeg command.') ffv1_command += [ output, '-f', 'framemd5', '-an', # Create decoded md5 checksums for every frame of the input. -an ignores audio fmd5 ] print(ffv1_command) subprocess.call(ffv1_command, env=ffv1_env_dict) return output, output_uuid, fmd5 def verify_losslessness(parent_folder, output, output_uuid, fmd5): ''' Verify the losslessness of the process using framemd5. An additional metadata check should also occur. ''' fmd5_logfile = os.path.join(parent_folder, '%s_framemd5.log' % output_uuid) fmd5ffv1 = "%s/%s.framemd5" % (parent_folder, output_uuid) print(' - Framemd5s for each frame of your output file will be stored in: %s' % fmd5ffv1) fmd5_env_dict = ififuncs.set_environment(fmd5_logfile) print(' - FFmpeg will attempt to verify the losslessness of the normalisation by using Framemd5s.') fmd5_command = [ 'ffmpeg', # Create decoded md5 checksums for every frame '-i', output, '-report', '-f', 'framemd5', '-an', fmd5ffv1 ] print fmd5_command subprocess.call(fmd5_command, env=fmd5_env_dict) checksum_mismatches = ififuncs.diff_framemd5s(fmd5, fmd5ffv1) if len(checksum_mismatches) > 0: print 'not lossless' else: print 'lossless' def main(): print('\n - Normalise.py started') source = sys.argv[1] file_list = ififuncs.get_video_files(source) for filename in file_list: print('\n - Processing: %s' % filename) parent_folder = extract_provenance(filename) output, output_uuid, fmd5 = normalise_process(filename) verify_losslessness(parent_folder, output, output_uuid, fmd5) if __name__ == '__main__': main()
mit
Python
944ec176f4d6db70f9486dddab9a6cf901d6d575
Create MyUsefulExample.py
firiceguo/Recommendation-NLP,firiceguo/Recommendation-NLP
src/zhang/MyUsefulExample.py
src/zhang/MyUsefulExample.py
#JUST EXAMPLES import pyspark.ml.recommendation df = spark.createDataFrame( ... [(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)], ... ["user", "item", "rating"]) als = ALS(rank=10, maxIter=5, seed=0) model = als.fit(df) model.rank #10 model.userFactors.orderBy("id").collect() #[Row(id=0, features=[...]), Row(id=1, ...), Row(id=2, ...)] test = spark.createDataFrame([(0, 2), (1, 0), (2, 0)], ["user", "item"]) predictions = sorted(model.transform(test).collect(), key=lambda r: r[0]) predictions[0] #Row(user=0, item=2, prediction=-0.13807615637779236) predictions[1] #Row(user=1, item=0, prediction=2.6258413791656494) predictions[2] #Row(user=2, item=0, prediction=-1.5018409490585327) als_path = temp_path + "/als" als.save(als_path) als2 = ALS.load(als_path) als.getMaxIter() #5 model_path = temp_path + "/als_model" model.save(model_path) model2 = ALSModel.load(model_path) model.rank == model2.rank #True sorted(model.userFactors.collect()) == sorted(model2.userFactors.collect()) #True sorted(model.itemFactors.collect()) == sorted(model2.itemFactors.collect()) #True # --------------------------------------- from pyspark.ml.evaluation import RegressionEvaluator from pyspark.ml.recommendation import ALS from pyspark.sql import Row lines = spark.read.text("../zhang/proj/sample_movielens_ratings.txt").rdd parts = lines.map(lambda row: row.value.split("::")) ratingsRDD = parts.map(lambda p: Row(userId=int(p[0]), movieId=int(p[1]), rating=float(p[2]), timestamp=long(p[3]))) ratings = spark.createDataFrame(ratingsRDD) (training, test) = ratings.randomSplit([0.8, 0.2]) # Build the recommendation model using ALS on the training data als = ALS(maxIter=5, regParam=0.01, userCol="userId", itemCol="movieId", ratingCol="rating") model = als.fit(training) # Evaluate the model by computing the RMSE on the test data # prediction is a dataframe DataFrame[movieId: bigint, rating: double, timestamp: bigint, userId: bigint, prediction: float] predictions = model.transform(test) evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating", predictionCol="prediction") rmse = evaluator.evaluate(predictions) print("Root-mean-square error = " + str(rmse))
mit
Python
5ba36ca805b002af63c619e17dd00400650da14b
Add script to rewrite the agents used by scc.
mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju
agent_paths.py
agent_paths.py
#!/usr/bin/env python3 from argparse import ArgumentParser import json import os.path import re import sys from generate_simplestreams import json_dump def main(): parser = ArgumentParser() parser.add_argument('input') parser.add_argument('output') args = parser.parse_args() paths_hashes = {} with open(args.input) as input_file: stanzas = json.load(input_file) hashes = {} for stanza in stanzas: path = os.path.join('agent', os.path.basename(stanza['path'])) path = re.sub('-win(2012(hv)?(r2)?|7|8|81)-', '-windows-', path) path_hash = stanza['sha256'] paths_hashes.setdefault(path, stanza['sha256']) if paths_hashes[path] != path_hash: raise ValueError('Conflicting hash') stanza['path'] = path hashes[path] = path_hash ph_list = {} for path, path_hash in hashes.items(): ph_list.setdefault(path_hash, set()).add(path) for path_hash, paths in ph_list.items(): if len(paths) > 1: print(paths) json_dump(stanzas, args.output) if __name__ == '__main__': sys.exit(main())
agpl-3.0
Python
5cc627d0c0cb18e236a055ce7fceb05b63b45385
Add flask backend file
tkiapril/wooglecalendar,tkiapril/wooglecalendar,tkiapril/wooglecalendar
woogle.py
woogle.py
""":mod:`woogle` --- Flask Backend for Woogle Calendar ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ from flask import Flask app = Flask(__name__) @app.route("/") def calendar(): return "Hello World!" if __name__ == "__main__": app.run()
agpl-3.0
Python
f82ef484f6440c2b5b10eb144af09b770fa413c9
Add python script for extracting server i18n msgs
beavyHQ/beavy,beavyHQ/beavy,beavyHQ/beavy,beavyHQ/beavy
.infrastructure/i18n/extract-server-msgs.py
.infrastructure/i18n/extract-server-msgs.py
import os # Keys indicating the fn symbols that pybabel should search for # when finding translations. keys = '-k format -k format_time -k format_date -k format_datetime' # Extraction os.system("pybabel extract -F babel.cfg {} -o messages.pot .".format(keys)) os.system("pybabel init -i messages.pot -d . -o './beavy-server.po' -l en") os.system("./node_modules/.bin/po2json beavy-server.po var/server-messages/beavy-server.json -F -f mf --fallback-to-msgid") # Clean up os.system("rm messages.pot") os.system("rm beavy-server.po")
mpl-2.0
Python
5046ff8ba17899893a9aa30687a1ec58a6e95af2
Add solution for Square Detector.
changyuheng/hacker-cup-solutions
2014/qualification-round/square-detector.py
2014/qualification-round/square-detector.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys class QuizzesParser: def __init__(self, src): self.src = src with open(src) as f: self.raw = f.read().splitlines() self.amount = int(self.raw[0]) def quizpool(self): cur_line = 1 for i in range(self.amount): offset = int(self.raw[cur_line]) prev_line = cur_line cur_line = prev_line + offset + 1 yield self.raw[prev_line:cur_line] class QuizSolver: def __init__(self, quiz): self.quiz = quiz def solve(self): N = int(self.quiz[0]) started = False start_line = -1 mask = list() length = 0 for i in range(N): line = self.quiz[i] if not started and '#' not in line: continue if not started: if line.count('#') > N - i: return 'NO' for j in range(len(line)): if len(line) > 2 and j > 0 and j < len(line) - 1 \ and line[j] != '#' and '#' in line[:j] \ and '#' in line[j:]: return 'NO' mask.append(1 if line[j] == '#' else 0) start_line = i length = line.count('#') started = True continue if i - start_line >= length: if '#' in line: return 'NO' else: continue mask_pair = list() for j in range(len(line)): mask_pair.append(1 if line[j] == '#' else 0) if any(map(lambda x, y: x ^ y, mask, mask_pair)): return 'NO' return 'YES' def main(): qsparser = QuizzesParser(sys.argv[1]) with open(sys.argv[2], 'w') as f: for i, quiz in enumerate(qsparser.quizpool()): qsolver = QuizSolver(quiz) f.write('Case #{num}: {ans}\n'.format(num=i+1, ans=qsolver.solve())) if __name__ == '__main__': main()
mit
Python
3df4cc086bf6c85eebc12094cc3ca459bd2bcd3d
Add unit test for programmatic application and approval
HelsinkiHacklab/asylum,HelsinkiHacklab/asylum,hacklab-fi/asylum,jautero/asylum,rambo/asylum,jautero/asylum,hacklab-fi/asylum,rambo/asylum,HelsinkiHacklab/asylum,rambo/asylum,jautero/asylum,jautero/asylum,hacklab-fi/asylum,hacklab-fi/asylum,rambo/asylum,HelsinkiHacklab/asylum
project/members/tests/test_application.py
project/members/tests/test_application.py
# -*- coding: utf-8 -*- import pytest from members.tests.fixtures.memberlikes import MembershipApplicationFactory from members.tests.fixtures.types import MemberTypeFactory from members.models import Member @pytest.mark.django_db def test_application_approve(): mtypes = [MemberTypeFactory(label='Normal member')] application = MembershipApplicationFactory() email = application.email application.approve(set_mtypes=mtypes) Member.objects.get(email=email)
mit
Python
b1b799c224418b1639850305a7136a3042c5e9b5
Add station_data.py
SPP1665DataAnalysisCourse/wetter
station_data.py
station_data.py
from ftplib import FTP import csv import os def unicode_csv_reader(latin1_data, **kwargs): csv_reader = csv.reader(latin1_data, **kwargs) for row in csv_reader: yield [unicode(cell, "latin-1") for cell in row] def get_station_data(): filename = "/tmp/station_list.txt" # remove old filename try: os.remove(filename) except OSError: pass # write stations_list_soil.txt into filename with open(filename,'wb') as file: ftp = FTP("ftp-cdc.dwd.de") ftp.login() ftp.retrbinary('RETR /pub/CDC/help/stations_list_soil.txt', file.write) ftp.quit() id_idx = 0 name2id = {} id2meta = {} first = True # parse csv file with open(filename, 'r') as csvfile: spamreader = unicode_csv_reader(csvfile, delimiter=';') for row in spamreader: # first row contains header info if first: first = False else: name2id[row[4].strip()] = int(row[0].strip()) id2meta[int(row[id_idx].strip())] = {} id2meta[int(row[id_idx].strip())]['id'] = int(row[id_idx].strip()) id2meta[int(row[id_idx].strip())]['height'] = row[1].strip() id2meta[int(row[id_idx].strip())]['latitude'] = row[2].strip() id2meta[int(row[id_idx].strip())]['longitude'] = row[3].strip() id2meta[int(row[id_idx].strip())]['name'] = row[4].strip() id2meta[int(row[id_idx].strip())]['state'] = row[5].strip() return name2id, id2meta def get_daily_recent(): ftp = FTP("ftp-cdc.dwd.de") ftp.login() ftp.cwd("/pub/CDC/observations_germany/climate/daily/kl/recent/") ls = [] ftp.retrlines('NLST', lambda l: ls.append(l)) id2file = {} for l in ls: try: id2file[int(l.split("_")[2])] = "ftp-cdc.dwd.de/pub/CDC/observations_germany/climate/daily/kl/recent/" + l except: continue ftp.quit() return id2file def get_daily_hist(): ftp = FTP("ftp-cdc.dwd.de") ftp.login() ftp.cwd("/pub/CDC/observations_germany/climate/daily/kl/historical/") ls = [] ftp.retrlines('NLST', lambda l: ls.append(l)) id2file = {} for l in ls: try: id2file[int(l.split("_")[1])] = "ftp://ftp-cdc.dwd.de/pub/CDC/observations_germany/climate/daily/kl/historical/" + l except: continue ftp.quit() return id2file def suggest_names(name, name2id): station_names=sorted(list(name2id.keys())) return [st for st in station_names if unicode(name,"utf8").lower() in st.lower()] def get_name(name2id): while True: name = raw_input("Enter station name: ") ns = suggest_names(name, name2id) if len(ns) == 1: return ns[0] elif len(ns) == 0: print "Nothing found. Repeat!" else: print "Reduce selection: ", for n in ns: print "'"+n+"'", print def cli(): name2id, id2meta = get_station_data() id2recent = get_daily_recent() id2hist = get_daily_hist() station_name = get_name(name2id) station_id = name2id[station_name] print "Station name:", station_name print " - id:", station_id print " - height:", id2meta[station_id]['height'] print " - latitude:", id2meta[station_id]['latitude'] print " - longitude:", id2meta[station_id]['longitude'] print " - federal state:", id2meta[station_id]['state'] print " - Recent file:", id2recent[station_id] print " - History file:", id2hist[station_id] if __name__ == '__main__': cli()
bsd-3-clause
Python
1a682405904dcc711d889881d6a216b3eff9e1dd
remove off method from status light
grvrulz/BookPlayer,nerk/BookPlayer
status_light.py
status_light.py
import time import config import RPi.GPIO as GPIO class StatusLight(object): """available patterns for the status light""" patterns = { 'on' : (.1, [True]), 'off' : (.1, [False]), 'blink_fast' : (.1, [False, True]), 'blink' : (.1, [False, False, False, True, True, True, True, True, True, True, True, True, True]), 'blink_pauze' : (.1, [False, False, False, False, False, False, False, False, False, False, False, False, False, False, True]), } """placeholder for pattern to tenmporarily interrupt status light with different pattern""" interrupt_pattern = [0, []] """continue flashing, controlled by the stop""" cont = True pin_id = None def __init__(self, pin_id): self.pin_id = pin_id GPIO.setmode(GPIO.BCM) GPIO.setup(pin_id, GPIO.OUT) self.action = 'on' def interrupt(self, action, repeat = 1): """Interupt the current status of the light with a names action parameters: action the name of the action repeat: the number of times to repeatthe interruption""" self.interrupt_pattern[0] = self.patterns[action][0] for i in range(0, repeat): self.interrupt_pattern[1].extend(list(self.patterns[action][1][:])) def start(self): """Perform a status light action""" while True: for state in self.patterns[self.action][1]: # if the interrupt_pattern is not empty, prioritize it while len(self.interrupt_pattern[1]): time.sleep(self.interrupt_pattern[0]) self.set_state(state = self.interrupt_pattern[1].pop(0)) # peform the regular action when not interrupted time.sleep(self.patterns[self.action][0]) self.set_state(state) def set_state(self, state): """Turn the light on or off""" GPIO.output(self.pin_id, state) def __del__(self): GPIO.cleanup() if __name__ == '__main__': light = StatusLight(config.status_light_pin) light.interrupt('blink_fast', 3) light.start()
import time import config import RPi.GPIO as GPIO class StatusLight(object): """available patterns for the status light""" patterns = { 'on' : (.1, [True]), 'off' : (.1, [False]), 'blink_fast' : (.1, [False, True]), 'blink' : (.1, [False, False, False, True, True, True, True, True, True, True, True, True, True]), 'blink_pauze' : (.1, [False, False, False, False, False, False, False, False, False, False, False, False, False, False, True]), } """placeholder for pattern to tenmporarily interrupt status light with different pattern""" interrupt_pattern = [0, []] """continue flashing, controlled by the stop""" cont = True pin_id = None def __init__(self, pin_id): self.pin_id = pin_id GPIO.setmode(GPIO.BCM) GPIO.setup(pin_id, GPIO.OUT) self.action = 'on' def interrupt(self, action, repeat = 1): """Interupt the current status of the light with a names action parameters: action the name of the action repeat: the number of times to repeatthe interruption""" self.interrupt_pattern[0] = self.patterns[action][0] for i in range(0, repeat): self.interrupt_pattern[1].extend(list(self.patterns[action][1][:])) def start(self): """Perform a status light action""" while True: for state in self.patterns[self.action][1]: # if the interrupt_pattern is not empty, prioritize it while len(self.interrupt_pattern[1]): time.sleep(self.interrupt_pattern[0]) self.set_state(state = self.interrupt_pattern[1].pop(0)) # peform the regular action when not interrupted time.sleep(self.patterns[self.action][0]) self.set_state(state) def off(self, state): """Turn off status light""" self.cont = False self.set_state(state) def set_state(self, state): """Turn the light on or off""" GPIO.output(self.pin_id, state) def __del__(self): GPIO.cleanup() if __name__ == '__main__': light = StatusLight(config.status_light_pin) light.interrupt('blink_fast', 3) light.start()
mit
Python
aa292c2f180ffcfdfc55114750f22b6c8790a69b
Add Jaro-Winkler distance based on code on RosettaCode
studiawan/pygraphc
pygraphc/similarity/RosettaJaroWinkler.py
pygraphc/similarity/RosettaJaroWinkler.py
from __future__ import division from itertools import combinations from time import time def jaro(s, t): s_len = len(s) t_len = len(t) if s_len == 0 and t_len == 0: return 1 match_distance = (max(s_len, t_len) // 2) - 1 s_matches = [False] * s_len t_matches = [False] * t_len matches = 0 transpositions = 0 for i in range(s_len): start = max(0, i - match_distance) end = min(i + match_distance + 1, t_len) for j in range(start, end): if t_matches[j]: continue if s[i] != t[j]: continue s_matches[i] = True t_matches[j] = True matches += 1 break if matches == 0: return 0 k = 0 for i in range(s_len): if not s_matches[i]: continue while not t_matches[k]: k += 1 if s[i] != t[k]: transpositions += 1 k += 1 return ((matches / s_len) + (matches / t_len) + ((matches - transpositions / 2) / matches)) / 3 start = time() log_file = '/home/hs32832011/Git/labeled-authlog/dataset/Hofstede2014/dataset1_perday/Dec 1.log' with open(log_file, 'r') as f: lines = f.readlines() log_length = len(lines) for line1, line2 in combinations(xrange(log_length), 2): s = lines[line1] t = lines[line2] print("%.10f" % (jaro(s, t))) # print runtime duration = time() - start minute, second = divmod(duration, 60) hour, minute = divmod(minute, 60) print "Runtime: %d:%02d:%02d" % (hour, minute, second)
mit
Python
2674aa95c69c6e0fe0d8fd71d9116150cfab6507
add xdawn decoding example
mne-tools/mne-python,teonlamont/mne-python,wmvanvliet/mne-python,Teekuningas/mne-python,dimkal/mne-python,ARudiuk/mne-python,trachelr/mne-python,jaeilepp/mne-python,bloyl/mne-python,dimkal/mne-python,yousrabk/mne-python,Teekuningas/mne-python,antiface/mne-python,olafhauk/mne-python,larsoner/mne-python,mne-tools/mne-python,wronk/mne-python,yousrabk/mne-python,leggitta/mne-python,kingjr/mne-python,larsoner/mne-python,jaeilepp/mne-python,drammock/mne-python,trachelr/mne-python,andyh616/mne-python,antiface/mne-python,larsoner/mne-python,teonlamont/mne-python,pravsripad/mne-python,rkmaddox/mne-python,ARudiuk/mne-python,nicproulx/mne-python,drammock/mne-python,alexandrebarachant/mne-python,nicproulx/mne-python,bloyl/mne-python,lorenzo-desantis/mne-python,Eric89GXL/mne-python,jniediek/mne-python,pravsripad/mne-python,olafhauk/mne-python,leggitta/mne-python,mne-tools/mne-python,alexandrebarachant/mne-python,kingjr/mne-python,drammock/mne-python,Eric89GXL/mne-python,kambysese/mne-python,olafhauk/mne-python,jmontoyam/mne-python,pravsripad/mne-python,andyh616/mne-python,cmoutard/mne-python,cmoutard/mne-python,Teekuningas/mne-python,jmontoyam/mne-python,kingjr/mne-python,adykstra/mne-python,cjayb/mne-python,jniediek/mne-python,kambysese/mne-python,wmvanvliet/mne-python,cjayb/mne-python,wronk/mne-python,rkmaddox/mne-python,wmvanvliet/mne-python,adykstra/mne-python,lorenzo-desantis/mne-python
examples/decoding/plot_decoding_xdawn_meg.py
examples/decoding/plot_decoding_xdawn_meg.py
""" ============================= XDAWN Decoding From MEG data ============================= ERF decoding with Xdawn. For each event type, a set of spatial Xdawn filters are trained and apply on the signal. Channels are concatenated and rescaled to create features vectors that will be fed into a Logistic Regression. """ # Authors: Alexandre Barachant <alexandre.barachant@gmail.com> # # License: BSD (3-clause) import mne from mne import io from mne.datasets import sample from mne.preprocessing.xdawn import Xdawn from mne.decoding import ConcatenateChannels from sklearn.cross_validation import StratifiedKFold from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix from sklearn.preprocessing import MinMaxScaler import numpy as np import matplotlib.pyplot as plt print(__doc__) data_path = sample.data_path() ############################################################################### # Set parameters and read data raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' tmin, tmax = -0.1, 0.3 event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4) # Setup for reading the raw data raw = io.Raw(raw_fname, preload=True) raw.filter(1, 20, method='iir') events = mne.read_events(event_fname) raw.info['bads'] = ['MEG 2443'] # set bad channels picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False, exclude='bads') epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False, picks=picks, baseline=None, preload=True, add_eeg_ref=False, verbose=False) # Create classification pipeline clf = make_pipeline(Xdawn(3), ConcatenateChannels(), MinMaxScaler(), LogisticRegression(penalty='l1')) # Get the labels labels = epochs.events[:, -1] # Cross validator cv = StratifiedKFold(labels, 10, shuffle=True, random_state=42) # Do cross-validation preds = np.empty(len(labels)) for train, test in cv: clf.fit(epochs[train], labels[train]) preds[test] = clf.predict(epochs[test]) # Classification report target_names = ['aud_l', 'aud_r', 'vis_l', 'vis_r'] report = classification_report(labels, preds, target_names=target_names) print(report) # Normalized confusion matrix cm = confusion_matrix(labels, preds) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # Plot confusion matrix plt.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues) plt.title('Normalized Confusion matrix') plt.colorbar() tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show()
bsd-3-clause
Python
34986c7bfd1d4634861a5c4b54cf90ef18090ff4
test versions of required libs across different places
honnibal/spaCy,honnibal/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy,spacy-io/spaCy,spacy-io/spaCy,honnibal/spaCy,explosion/spaCy,explosion/spaCy,honnibal/spaCy,explosion/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,spacy-io/spaCy
spacy/tests/test_requirements.py
spacy/tests/test_requirements.py
import re from pathlib import Path def test_build_dependencies(en_vocab): libs_ignore_requirements = ["pytest", "pytest-timeout", "mock", "flake8", "jsonschema"] libs_ignore_setup = ["fugashi", "natto-py", "pythainlp"] # check requirements.txt root_dir = Path(__file__).parent.parent.parent req_file = root_dir / "requirements.txt" req_dict = {} with req_file.open() as f: lines = f.readlines() for line in lines: line = line.strip() if not line.startswith("#"): lib, v = _parse_req(line) if lib and lib not in libs_ignore_requirements: req_dict[lib] = v # check setup.cfg and compare to requirements.txt # also fails when there are missing or additional libs setup_file = root_dir / "setup.cfg" with setup_file.open() as f: lines = f.readlines() setup_keys = set() for line in lines: line = line.strip() if not line.startswith("#"): lib, v = _parse_req(line) if lib and not lib.startswith("cupy") and lib not in libs_ignore_setup: req_v = req_dict.get(lib, None) assert req_v is not None # if fail: setup.cfg contains a lib not in requirements.txt assert (lib+v) == (lib+req_v) # if fail: setup.cfg & requirements.txt have conflicting versions setup_keys.add(lib) assert sorted(setup_keys) == sorted(req_dict.keys()) # if fail: requirements.txt contains a lib not in setup.cfg # check pyproject.toml and compare the versions of the libs to requirements.txt # does not fail when there are missing or additional libs toml_file = root_dir / "pyproject.toml" with toml_file.open() as f: lines = f.readlines() toml_keys = set() for line in lines: line = line.strip() line = line.strip(",") line = line.strip("\"") if not line.startswith("#"): lib, v = _parse_req(line) if lib: req_v = req_dict.get(lib, None) assert (lib+v) == (lib+req_v) # if fail: pyproject.toml & requirements.txt have conflicting versions toml_keys.add(lib) def _parse_req(line): lib = re.match(r"^[a-z0-9\-]*", line).group(0) v = line.replace(lib, "").strip() if not re.match(r"^[<>=][<>=].*", v): return None, None return lib, v
mit
Python
08b4e97d3e3bcf07bdc8b0e0c02ce5d29fe5ee9e
Create battleship.py
mayoms/battleship
battleship.py
battleship.py
from random import randint from random import randrange ships = 0 board = [] BuiltShips = {} board_size = int(input("How big would you like the board to be?")) for x in range(board_size): board.append(["O"] * board_size) def print_board(board): for row in board: print(" ".join(row)) class BattleShip(object): def __init__(self, id): self.id = id self.location = { "x": [], "y": [] } self.hits = 0 self.orientation = "" x = [] # Keep Track of all X Coordinates y = [] # Keep Track of all Y Coordinates sank = 0 # All battleships start with these attributes, hit count, whether or not it is sank, location and orientation def ExcludeRand(self,exclude): #this will generate a random number, while excluding coordinates already assigned points = None while points in exclude or points is None: points = randrange(0, len(board)-1) return points # Battleship, Build Thyself! def build(self): if randint(0, 1) == 1: # Flip a coin to determine orientation self.orientation = "vertical" else: self.orientation = "horizontal" # If there aren't any ships built yet, we can put it anywhere if self.orientation == "horizontal": #If the coin flipped to horizontal, build it that way self.location["x"].append(int(self.ExcludeRand(self.x))) #Assign Random X Coordinate self.x.append(self.location["x"][0]) print ("X's:", self.x) print (self.location) self.location["y"].append(int(self.ExcludeRand(self.y))) #Assign Random Y Coordinate self.y.append(self.location["y"][0]) print (self.location) if self.location["x"] == len(board) - 1: self.location["x"][0].append(len(board) - 2) print (self.location) else: self.location["x"].append(self.location["x"][0] + 1) print (self.location) print (self.location) else: self.location["x"].append(int(self.ExcludeRand(self.x))) #Random X self.x.append(self.location["x"][0]) print (self.location) self.location["y"].append(int(self.ExcludeRand(self.y))) #Random Y self.y.append(self.location["y"][0]) print ("Y's:",self.y) print (self.location) if self.location["y"][0] == len(board) - 1: #Y plus or minus 1 self.location["y"].append(len(board) - 2) print (self.location) else: self.location["y"].append(self.location["y"][0] + 1) print (self.location) def is_int(n): try: return int(n) except ValueError: is_int(input("Sorry, not a number. Try again:")) ships = is_int(input("How many ships?")) for each in range(ships): BuiltShips["ship" + str(each)] = BattleShip(each) BuiltShips["ship" + str(each)].build() def Assault(x,y): for each in BuiltShips: if x in BuiltShips[each].location["x"] and y in BuiltShips[each].location["y"]: BuiltShips[each].hits += 1 if BuiltShips[each].hits == 2: print ("You sank a ship!") BattleShip.sank += 1 return True else: print (BuiltShips[each].location) return False turns = 3 * ships while BattleShip.sank < ships: if turns > 0: print ("%s turns left" % turns) print_board(board) guess_x = int(input("Guess Row:")) guess_y = int(input("Guess Column:")) if board[guess_x][guess_y] == "X" or board[guess_x][guess_y] == "!": print ("You already guessed there!") turns -= 1 elif Assault(guess_x,guess_y) == True: print ("You got a hit!") board[guess_x][guess_y] = "!" print_board(board) print ("Ships sank:",BattleShip.sank) else: print ("Miss!") board[guess_x][guess_y] = "X" turns -= 1 else: print ("Sorry, out of turns.") break else: print ("You won.")
unlicense
Python
13959dbce03b44f15c4c05ff0715b7d26ff6c0fa
Add a widget.
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
python/tkinter/python3/animation_print.py
python/tkinter/python3/animation_print.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org) # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # See also: http://effbot.org/tkinterbook/widget.htm import tkinter as tk def main(): """Main function""" root = tk.Tk() def increment_counter(): # Do something... print("Hello") # Reschedule event in 3 seconds root.after(3000, increment_counter) # Schedule event in 3 seconds root.after(3000, increment_counter) root.mainloop() if __name__ == '__main__': main()
mit
Python
49253451d65511713cd97a86c7fe54e64b3e80a9
Add a separate test of the runtest.py --qmtest option.
azatoth/scons,azatoth/scons,azatoth/scons,azatoth/scons,azatoth/scons
test/runtest/qmtest.py
test/runtest/qmtest.py
#!/usr/bin/env python # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" """ Test that the --qmtest option invokes tests directly via QMTest, not directly via Python. """ import os.path import string import TestRuntest test = TestRuntest.TestRuntest() test.subdir('test') test_fail_py = os.path.join('test', 'fail.py') test_no_result_py = os.path.join('test', 'no_result.py') test_pass_py = os.path.join('test', 'pass.py') workpath_fail_py = test.workpath(test_fail_py) workpath_no_result_py = test.workpath(test_no_result_py) workpath_pass_py = test.workpath(test_pass_py) test.write_failing_test(test_fail_py) test.write_no_result_test(test_no_result_py) test.write_passing_test(test_pass_py) # NOTE: the FAIL and PASS lines below have trailing spaces. expect_stdout = """\ qmtest run --output results.qmr --format none --result-stream="scons_tdb.AegisChangeStream" test/fail.py test/no_result.py test/pass.py --- TEST RESULTS ------------------------------------------------------------- test/fail.py : FAIL FAILING TEST STDOUT FAILING TEST STDERR test/no_result.py : NO_RESULT NO RESULT TEST STDOUT NO RESULT TEST STDERR test/pass.py : PASS --- TESTS THAT DID NOT PASS -------------------------------------------------- test/fail.py : FAIL test/no_result.py : NO_RESULT --- STATISTICS --------------------------------------------------------------- 3 tests total 1 ( 33%) tests PASS 1 ( 33%) tests FAIL 1 ( 33%) tests NO_RESULT """ testlist = [ test_fail_py, test_no_result_py, test_pass_py, ] test.run(arguments='--qmtest %s' % string.join(testlist), status=1, stdout=expect_stdout) test.pass_test() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
mit
Python
0c2fb46c977d8d8ee03d295fee8ddf37cee8cc06
Add script to calculate recalls of track zip files.
myfavouritekk/TPN
tools/stats/zip_track_recall.py
tools/stats/zip_track_recall.py
#!/usr/bin/env python from vdetlib.utils.protocol import proto_load, proto_dump, track_box_at_frame from vdetlib.utils.common import iou import argparse import numpy as np import glob import cPickle if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('vid_file') parser.add_argument('annot_file') parser.add_argument('track_dir') args = parser.parse_args() vid_proto = proto_load(args.vid_file) annot_proto = proto_load(args.annot_file) track_files = glob.glob(args.track_dir + "/*.pkl") tracks = [] frames = [] for track_file in track_files: track = cPickle.loads(open(track_file, 'rb').read()) tracks.append(track['bbox']) frames.append(track['frame']) gt_count = 0 recall_count = 0 for frame in vid_proto['frames']: frame_id = frame['frame'] # annot boxes annot_boxes = [track_box_at_frame(annot_track['track'], frame_id) \ for annot_track in annot_proto['annotations']] annot_boxes = [box for box in annot_boxes if box is not None] if len(annot_boxes) == 0: continue gt_count += len(annot_boxes) # track boxes track_boxes = [track[frame==frame_id,:].flatten() for track, frame \ in zip(tracks, frames) if np.any(frame==frame_id)] if len(track_boxes) == 0: continue overlaps = iou(np.asarray(annot_boxes), np.asarray(track_boxes)) max_overlaps = overlaps.max(axis=1) recall_count += np.count_nonzero(max_overlaps >= 0.5) print "{} {} {} {}".format(vid_proto['video'], gt_count, recall_count, float(recall_count) / gt_count)
mit
Python
3ee41b704e98e143d23eb0d714c6d79e8d6e6130
Write test for RequestTypeError
BakeCode/performance-testing,BakeCode/performance-testing
tests/web/test_request_type_error.py
tests/web/test_request_type_error.py
import unittest from performance.web import RequestTypeError class RequestTypeErrorTestCase(unittest.TestCase): def test_init(self): type = 'get' error = RequestTypeError(type) self.assertEqual(type, error.type) def test_to_string(self): type = 'get' error = RequestTypeError(type) self.assertEqual('Invalid request type "%s"' % type, error.__str__())
mit
Python
00e75bc59dfec20bd6b96ffac7d17da5760f584c
Add Slack integration
healthchecks/healthchecks,avoinsystems/healthchecks,iphoting/healthchecks,healthchecks/healthchecks,avoinsystems/healthchecks,iphoting/healthchecks,avoinsystems/healthchecks,BetterWorks/healthchecks,BetterWorks/healthchecks,healthchecks/healthchecks,healthchecks/healthchecks,iphoting/healthchecks,iphoting/healthchecks,avoinsystems/healthchecks,BetterWorks/healthchecks,BetterWorks/healthchecks
hc/api/migrations/0012_auto_20150930_1922.py
hc/api/migrations/0012_auto_20150930_1922.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('api', '0011_notification'), ] operations = [ migrations.AlterField( model_name='channel', name='kind', field=models.CharField(choices=[('email', 'Email'), ('webhook', 'Webhook'), ('slack', 'Slack'), ('pd', 'PagerDuty')], max_length=20), ), ]
bsd-3-clause
Python
f92d06346b3d28513c5f5b9833dbf5a4d48c3e46
Create rot_alpha.py
Matir/analysis-tools
rot_alpha.py
rot_alpha.py
#!/usr/bin/env python from string import uppercase, lowercase, maketrans import sys class ROTAlpha(): def rot_alpha(self, data, rot): upper = ''.join([uppercase[(i+rot)%26] for i in xrange(26)]) lower = ''.join([lowercase[(i+rot)%26] for i in xrange(26)]) table = maketrans(uppercase + lowercase, upper + lower) print(data.translate(table)) if __name__ == '__main__': try: data = sys.argv[1] rot = sys.argv[2] rot = int(rot, 0) table = ROTAlpha() table.rot_alpha(data, rot) except IndexError: print('Usage: rot_alpha.py <alpha numeric data> <int to rotate>') sys.exit(1)
mit
Python
d96acd58ecf5937da344942f387d845dc5b26871
Add db tests
thiderman/piper
test/test_db.py
test/test_db.py
from piper.db import DbCLI import mock import pytest class DbCLIBase(object): def setup_method(self, method): self.cli = DbCLI() self.ns = mock.Mock() self.config = mock.Mock() class TestDbCLIRun(DbCLIBase): def test_plain_run(self): self.cli.init = mock.Mock() ret = self.cli.run(self.ns, self.config) assert ret == 0 self.cli.init.assert_called_once_with(self.ns, self.config) class TestDbCLIInit(DbCLIBase): def test_no_db(self): self.config.db.host = None with pytest.raises(AssertionError): self.cli.init(self.ns, self.config) def test_calls(self): self.cli.handle_sqlite = mock.Mock() self.cli.create_tables = mock.Mock() self.cli.init(self.ns, self.config) self.cli.handle_sqlite.assert_called_once_with(self.config.db.host) self.cli.create_tables.assert_called_once_with( self.config.db.host, echo=self.ns.verbose, ) class TestDbCLIHandleSqlite(DbCLIBase): @mock.patch('piper.utils.mkdir') @mock.patch('os.path.dirname') @mock.patch('os.path.exists') def test_sqlite_handling_creates_dir(self, exists, dirname, mkdir): self.config.db.host = 'sqlite:///amaranthine.db' exists.return_value = False self.cli.handle_sqlite(self.ns.host) mkdir.assert_called_once_with(dirname.return_value) class TestDbCLICreateTables(DbCLIBase): def setup_method(self, method): super(TestDbCLICreateTables, self).setup_method(method) self.cli.tables = (mock.Mock(), mock.Mock()) for x, table in enumerate(self.cli.tables): table.__tablename__ = x @mock.patch('piper.db.Session') @mock.patch('piper.db.create_engine') def test_creation(self, ce, se): eng = ce.return_value host = self.config.host self.cli.create_tables(host) ce.assert_called_once_with(host, echo=False) se.configure.assert_called_once_with(bind=eng) for table in self.cli.tables: assert table.metadata.bind is eng table.metadata.create_all.assert_called_once_with()
mit
Python
83afa054e3bee18aba212394973978fd49429afa
Create test_ratings.py
rupendrab/py_unstr_parse
test_ratings.py
test_ratings.py
#!/usr/bin/env python3.5 import sys import re import os import csv from extract_toc import parseargs from get_ratings import Ratings, Ratings2 def nvl(v1,v2): if v1: return v1 else: return v2 def process_ratings_for_file(ratings, filename): ratings.process_file(filename) ratings.map_ratings() improvement = 0 for k in ratings.all_available_ratings: v = ratings.ratings_mapped.get(k) if not v: v = [None] * 3 v_current = ratings.current_ratings_alt.get(k) if v_current: if (not v[0] or v[0] != v_current): improvement += 1 elif (not v_current): if (v[0]): improvement -= 1 print("%-30s %-2s/%-2s %-2s %-2s" % (k, nvl(v[0], "_"), nvl(v_current, "_"), nvl(v[1], "_"), nvl(v[2], "_"))) # print(ratings.current_ratings_alt) print("") print("Number of improvements using new methodology = %d" % (improvement)) print("") def main(args): argsmap = parseargs(args) files = argsmap.get('files') if (not files): sys.exit(0) ratings_mapper_file = argsmap.get("rmap") if ratings_mapper_file: ratings_mapper_file = ratings_mapper_file[0] if not ratings_mapper_file: print("Ratings Mapper File file name must be entered using the --rmap option...") sys.exit(1) ratings = Ratings(ratings_mapper_file) for filename in files: print("Processing file: " + filename) print("============================") process_ratings_for_file(ratings, filename) if __name__ == '__main__': args = sys.argv[1:] main(args)
mit
Python
f804300765f036f375768e57e081b070a549a800
Add test script with only a few packages
hdashnow/python-dependencies
test-extract-dependencies.py
test-extract-dependencies.py
from dependencies import extract_package import xmlrpc.client as xmlrpclib import random client = xmlrpclib.ServerProxy('http://pypi.python.org/pypi') packages = ['gala', 'scikit-learn', 'scipy', 'scikit-image', 'Flask'] random.shuffle(packages) for i, package in enumerate(packages): extract_package(package, to='test-pypi-deps.txt', client=client, n=i)
mit
Python
7de55b168a276b3d5cdea4d718680ede46edf4d8
Create file to test thinc.extra.search
explosion/thinc,explosion/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,spacy-io/thinc,spacy-io/thinc
thinc/tests/unit/test_beam_search.py
thinc/tests/unit/test_beam_search.py
from ...extra.search import MaxViolation def test_init_violn(): v = MaxViolation()
mit
Python
38b839405f9976df2d63c08d3c16441af6cdebd1
Add test
kr41/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,prasannav7/ggrc-core,kr41/ggrc-core
test/selenium/src/tests/test_risk_threats_page.py
test/selenium/src/tests/test_risk_threats_page.py
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: jernej@reciprocitylabs.com # Maintained By: jernej@reciprocitylabs.com """All smoke tests relevant to risks/threats page""" import pytest # pylint: disable=import-error from lib import base from lib.constants import url class TestRiskThreatPage(base.Test): """Tests the threat/risk page, a part of smoke tests, section 8.""" @pytest.mark.smoke_tests def test_app_redirects_to_new_risk_page(self, new_risk): """Tests if after saving and closing the lhn_modal the app redirects to the object page. Generally we start at a random url. Here we verify that after saving and closing the lhn_modal we're redirected to an url that contains an object id. """ # pylint: disable=no-self-use # pylint: disable=invalid-name assert url.RISKS + "/" + new_risk.object_id in \ new_risk.url
apache-2.0
Python
3d6f78447175d7f34e2eaedc2b0df82acb1e0e0e
Add a simple script I used to grep all SVN sources for control statements.
YueLinHo/Subversion,YueLinHo/Subversion,YueLinHo/Subversion,YueLinHo/Subversion,YueLinHo/Subversion,YueLinHo/Subversion,YueLinHo/Subversion,YueLinHo/Subversion
tools/dev/find-control-statements.py
tools/dev/find-control-statements.py
#!/usr/bin/python # # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # # Find places in our code that are part of control statements # i.e. "for", "if" and "while". That output is then easily # searched for various interesting / complex pattern. # # # USAGE: find-control-statements.py FILE1 FILE2 ... # import sys header_shown = False last_line_num = None def print_line(fname, line_num, line): """ Print LINE of number LINE_NUM in file FNAME. Show FNAME only once per file and LINE_NUM only for non-consecutive lines. """ global header_shown global last_line_num if not header_shown: print('') print(fname) header_shown = True if last_line_num and (last_line_num + 1 == line_num): print(" %s" % line), else: print('%5d:%s' % (line_num, line)), last_line_num = line_num def is_control(line, index, word): """ Return whether LINE[INDEX] is actual the start position of control statement WORD. It must be followed by an opening parantheses and only whitespace in between WORD and the '('. """ if index > 0: if not (line[index-1] in [' ', '\t', ';']): return False index = index + len(word) parantheses_index = line.find('(', index) if parantheses_index == -1: return False while index < parantheses_index: if not (line[index] in [' ', '\t',]): return False index += 1 return True def find_specific_control(line, control): """ Return the first offset of the control statement CONTROL in LINE, or -1 if it can't be found. """ current = 0 while current != -1: index = line.find(control, current) if index == -1: break if is_control(line, index, control): return index current = index + len(control); return -1 def find_control(line): """ Return the offset of the first control in LINE or -1 if there is none. """ current = 0 for_index = find_specific_control(line, "for") if_index = find_specific_control(line, "if") while_index = find_specific_control(line, "while") first = len(line) if for_index >= 0 and first > for_index: first = for_index if if_index >= 0 and first > if_index: first = if_index if while_index >= 0 and first > while_index: first = while_index if first == len(line): return -1 return first def parantheses_delta(line): """ Return the number of opening minus the number of closing parantheses in LINE. Don't count those inside strings or chars. """ escaped = False in_squote = False in_dquote = False delta = 0 for c in line: if escaped: escaped = False elif in_dquote: if c == '\\': escaped = True elif c == '"': in_dquote = False elif in_squote: if c == '\\': escaped = True elif c == "'": in_squote = False elif c == '(': delta += 1 elif c == ')': delta -= 1 elif c == '"': in_dquote = True elif c == "'": in_squote -= True return delta def scan_file(fname): lines = open(fname).readlines() line_num = 1 parantheses_level = 0 for line in lines: if parantheses_level > 0: index = 0 else: index = find_control(line) if index >= 0: print_line(fname, line_num, line) parantheses_level += parantheses_delta(line[index:]) line_num += 1 if __name__ == '__main__': for fname in sys.argv[1:]: header_shown = False last_line_num = None scan_file(fname)
apache-2.0
Python
f00c22f79d8f1cd210830957e6c79d75638c7c5b
add test for role
fiaas/k8s
tests/k8s/test_role.py
tests/k8s/test_role.py
#!/usr/bin/env python # -*- coding: utf-8 # Copyright 2017-2019 The FIAAS Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import pytest from k8s.client import NotFound from k8s.models.common import ObjectMeta from k8s.models.role import Role, PolicyRule NAME = "my-role" NAMESPACE = "my-namespace" @pytest.mark.usefixtures("k8s_config") class TestRole(object): def test_created_if_not_exists(self, post, api_get): api_get.side_effect = NotFound() role = _create_default_role() call_params = role.as_dict() post.return_value.json.return_value = call_params assert role._new role.save() assert not role._new pytest.helpers.assert_any_call(post, _uri(NAMESPACE), call_params) def test_updated_if_exists(self, get, put): mock_response = _create_mock_response() get.return_value = mock_response role = _create_default_role() from_api = Role.get_or_create( metadata=role.metadata, rules=role.rules, ) assert not from_api._new assert from_api.rules == role.rules def test_deleted(self, delete): Role.delete(NAME, namespace=NAMESPACE) pytest.helpers.assert_any_call(delete, _uri(NAMESPACE, NAME)) def _create_mock_response(): mock_response = mock.Mock() mock_response.json.return_value = { "apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role", "metadata": { "creationTimestamp": "2017-09-08T13:37:00Z", "generation": 1, "labels": { "test": "true" }, "name": NAME, "namespace": NAMESPACE, "resourceVersion": "42", "selfLink": _uri(NAMESPACE, NAME), "uid": "d8f1ba26-b182-11e6-a364-fa163ea2a9c4" }, "rules": [ { "apiGroups": ["fiaas.schibsted.io"], "resources": ["applications", "application-statuses"], "verbs": ["get", "list", "watch"], }, ], } return mock_response def _create_default_role(): object_meta = ObjectMeta(name=NAME, namespace=NAMESPACE, labels={"test": "true"}) policy_rules = [ PolicyRule( apiGroups=[], resources=[], verbs=[], resourceNames=[], nonResourceURLs=[], ) ] return Role(metadata=object_meta, rules=policy_rules) def _uri(namespace, name=""): uri = "/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles/{name}" return uri.format(name=name, namespace=namespace)
apache-2.0
Python
51030039f68d0dc4243b6ba125fb9b7aca44638d
Add Pipeline tests
pytorch/text,pytorch/text,pytorch/text,pytorch/text
test/data/test_pipeline.py
test/data/test_pipeline.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals import torchtext.data as data from ..common.torchtext_test_case import TorchtextTestCase class TestPipeline(TorchtextTestCase): @staticmethod def repeat_n(x, n=3): """ Given a sequence, repeat it n times. """ return x * n def test_pipeline(self): pipeline = data.Pipeline(str.lower) assert pipeline("Test STring") == "test string" assert pipeline("ᑌᑎIᑕOᗪᕮ_Tᕮ᙭T") == "ᑌᑎiᑕoᗪᕮ_tᕮ᙭t" assert pipeline(["1241", "Some String"]) == ["1241", "some string"] args_pipeline = data.Pipeline(TestPipeline.repeat_n) assert args_pipeline("test", 5) == "testtesttesttesttest" assert args_pipeline(["ele1", "ele2"], 2) == ["ele1ele1", "ele2ele2"] def test_composition(self): pipeline = data.Pipeline(TestPipeline.repeat_n) pipeline.add_before(str.lower) pipeline.add_after(str.capitalize) other_pipeline = data.Pipeline(str.swapcase) other_pipeline.add_before(pipeline) # Assert pipeline gives proper results after composition # (test that we aren't modfifying pipes member) assert pipeline("teST") == "Testtesttest" assert pipeline(["ElE1", "eLe2"]) == ["Ele1ele1ele1", "Ele2ele2ele2"] # Assert pipeline that we added to gives proper results assert other_pipeline("teST") == "tESTTESTTEST" assert other_pipeline(["ElE1", "eLe2"]) == ["eLE1ELE1ELE1", "eLE2ELE2ELE2"] def test_exceptions(self): with self.assertRaises(ValueError): data.Pipeline("Not Callable")
bsd-3-clause
Python
ca4f6e72c152f975c8bf01b920bcbdb3b611876b
add script to save_segment to disk
williballenthin/idawilli
scripts/save_segment.py
scripts/save_segment.py
''' IDAPython script that saves the content of a segment to a file. Prompts the user for: - segment name - file path Useful for extracting data from memory dumps. Author: Willi Ballenthin <william.ballenthin@fireeye.com> Licence: Apache 2.0 ''' import logging from collections import namedtuple import idaapi import ida_bytes import ida_segment logger = logging.getLogger(__name__) class BadInputError(Exception): pass Segment = namedtuple('SegmentBuffer', ['path', 'name']) def prompt_for_segment(): ''' :returns: a Segment instance, or raises BadInputError ''' class MyForm(idaapi.Form): def __init__(self): idaapi.Form.__init__(self, """STARTITEM 0 add segment by buffer <##segment name:{name}> <##output path:{path}> """, { 'path': idaapi.Form.FileInput(save=True), 'name': idaapi.Form.StringInput(), }) def OnFormChange(self, fid): return 1 f = MyForm() f.Compile() f.path.value = "" f.name.value = "" ok = f.Execute() if ok != 1: raise BadInputError('user cancelled') path = f.path.value if path == "" or path is None: raise BadInputError('bad path provided') name = f.name.value if name == "" or name is None: raise BadInputError('bad name provided') f.Free() return Segment(path, name) def main(argv=None): if argv is None: argv = sys.argv[:] try: seg_spec = prompt_for_segment() except BadInputError: logger.error('bad input, exiting...') return -1 seg = ida_segment.get_segm_by_name(seg_spec.name) if not seg: logger.error("bad segment, exiting...") buf = ida_bytes.get_bytes(seg.start_ea, seg.end_ea - seg.start_ea) with open(seg_spec.path, "wb") as f: f.write(buf) logger.info("wrote %x bytes", len(buf)) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) main()
apache-2.0
Python
369eed75c8a2fdc916885344fabb14e116bb60f9
add datatype test
jplusplus/statscraper
tests/test_datatype.py
tests/test_datatype.py
# encoding: utf-8 from unittest import TestCase from statscraper import Datatype, NoSuchDatatype class TestDatatype(TestCase): def test_datatype(self): dt = Datatype("str") self.assertTrue(str(dt) == "str") def test_datatype_with_values(self): dt = Datatype("region") self.assertTrue(len(dt.allowed_values)) def test_none_existing_datatype(self): with self.assertRaises(NoSuchDatatype): Datatype("donkey_power") def test_allowed_values(self): dt = Datatype("region") self.assertTrue(u"Växjö kommun" in dt.allowed_values) self.assertEqual(str(dt.allowed_values["eu"]), "eu")
mit
Python
9f7a8e01f7897e8979997b8845a9ace3f64d5412
Add more tests
WhatWorksWhenForWhom/nlppln,WhatWorksWhenForWhom/nlppln,WhatWorksWhenForWhom/nlppln
tests/test_generate.py
tests/test_generate.py
import pytest from nlppln.generate import to_bool def test_to_bool_correct(): assert to_bool('y') == True assert to_bool('n') == False def test_to_bool_error(): with pytest.raises(ValueError): to_bool('foo')
apache-2.0
Python
2cd1ab91ca48b8a8d34eabcc2a01b4014a97bcf6
add unit tests
valgur/ncompress,valgur/ncompress
test/test_ncompress.py
test/test_ncompress.py
import shutil import subprocess from io import BytesIO import pytest from ncompress import compress, decompress @pytest.fixture def sample_data(): chars = [] for i in range(15): chars += [i * 16] * (i + 1) chars += [0, 0, 0] return bytes(chars) @pytest.fixture def sample_compressed(sample_data): compress_cmd = shutil.which("compress") if compress_cmd: return subprocess.check_output(compress_cmd, input=sample_data) return compress(sample_data) def test_string_string(sample_data, sample_compressed): assert compress(sample_data) == sample_compressed assert decompress(sample_compressed) == sample_data def test_string_stream(sample_data, sample_compressed): out = BytesIO() compress(sample_data, out) out.seek(0) assert out.read() == sample_compressed out = BytesIO() decompress(sample_compressed, out) out.seek(0) assert out.read() == sample_data def test_stream_stream(sample_data, sample_compressed): out = BytesIO() compress(BytesIO(sample_data), out) out.seek(0) assert out.read() == sample_compressed out = BytesIO() decompress(BytesIO(sample_compressed), out) out.seek(0) assert out.read() == sample_data def test_stream_string(sample_data, sample_compressed): assert compress(BytesIO(sample_data)) == sample_compressed assert decompress(BytesIO(sample_compressed)) == sample_data def test_empty_input(sample_data): assert decompress(compress(b"")) == b"" with pytest.raises(ValueError): decompress(b"") with pytest.raises(TypeError): compress() with pytest.raises(TypeError): decompress() def test_corrupted_input(sample_compressed): sample = sample_compressed for x in [ b"123", sample[1:], sample[:1], b"\0" * 3 + sample[:3], sample * 2, b"\0" + sample ]: with pytest.raises(ValueError) as ex: decompress(x) assert ("not in LZW-compressed format" in str(ex.value) or "corrupt input - " in str(ex.value)) def test_str(sample_data, sample_compressed): with pytest.raises(TypeError): compress(sample_data.decode("latin1", errors="replace")) with pytest.raises(TypeError): decompress(sample_compressed.decode("latin1", errors="replace")) def test_closed_input(sample_data, sample_compressed): expected = "I/O operation on closed file." with pytest.raises(ValueError) as ex: stream = BytesIO(sample_data) stream.close() compress(stream) assert expected in str(ex.value) with pytest.raises(ValueError) as ex: stream = BytesIO(sample_compressed) stream.close() decompress(stream) assert expected in str(ex.value) def test_file_input(): with open(__file__, "rb") as f: expected = f.read() f.seek(0) assert decompress(compress(f)) == expected
unlicense
Python
c297de3964c53beffdf33922c0bffd022b376ae6
Create __init__.py
kkokey/crawler,kkokey/crawler
crawl/__init__.py
crawl/__init__.py
apache-2.0
Python
405385e1c840cd8a98d6021358c603964fa8d0d3
Create simulator.py
shauryashahi/final-year-project,shauryashahi/final-year-project
simulator.py
simulator.py
import numpy as np import random from naive_selector import NaiveSelector from bayesian_selector import BayesianSelector from multiarm_selector import MultiarmSelector NUM_SIM = 30 NUM_USERS = 1000 def coin_flip(prob_true): if random.random() < prob_true: return True else: return False def simulate(prob_click_a, prob_click_b, num_users): naive_selector = NaiveSelector() bayesian_selector = BayesianSelector() multiarm_selector = MultiarmSelector() user_clicks = [ { "A": coin_flip(prob_click_a), "B": coin_flip(prob_click_b) } for i in range(num_users) ] for user_click in user_clicks: naive_selector.handle_response_from_new_user(user_click) bayesian_selector.handle_response_from_new_user(user_click) multiarm_selector.handle_response_from_new_user(user_click) return naive_selector, bayesian_selector, multiarm_selector def main(): f = open("./data/simulation_results.csv", "w", 0) f.write( "prob_click_a, prob_click_b," " num_click_naive, num_click_bayesian, num_click_multiarm," " prob_correct_naive, prob_correct_bayesian, prob_correct_multiarm\n" ) prob_click_as = [0.1, 0.3, 0.6] for prob_click_a in prob_click_as: prob_click_bs = np.arange(prob_click_a + 0.01, prob_click_a + 0.2, 0.01) for prob_click_b in prob_click_bs: print "working on", prob_click_a, prob_click_b num_click_naive = np.zeros(NUM_SIM) num_click_bayesian = np.zeros(NUM_SIM) num_click_multiarm = np.zeros(NUM_SIM) is_correct_naive = np.zeros(NUM_SIM) is_correct_bayesian = np.zeros(NUM_SIM) is_correct_multiarm = np.zeros(NUM_SIM) # do 1000 simulations for each run for i in range(NUM_SIM): naive_selector, bayesian_selector, multiarm_selector = simulate( prob_click_a=prob_click_a, prob_click_b=prob_click_b, num_users=NUM_USERS ) num_click_naive[i], _ = naive_selector.prepare_report() num_click_bayesian[i], _ = bayesian_selector.prepare_report() num_click_multiarm[i], _ = multiarm_selector.prepare_report() is_correct_naive[i] = naive_selector.did_give_correct_answer() is_correct_bayesian[i] = bayesian_selector.did_give_correct_answer() is_correct_multiarm[i] = multiarm_selector.did_give_correct_answer() f.write( "{}, {}, {}, {}, {}, {}, {}, {}\n".format( prob_click_a, prob_click_b, np.mean(num_click_naive), np.mean(num_click_bayesian), np.mean(num_click_multiarm), np.mean(is_correct_naive), np.mean(is_correct_bayesian), np.mean(is_correct_multiarm) ) ) f.close() if __name__ == "__main__": main()
apache-2.0
Python
ccf21faf0110c9c5a4c28a843c36c53183d71550
add missing file
chfw/pyexcel-xls,chfw/pyexcel-xls
pyexcel_xls/__init__.py
pyexcel_xls/__init__.py
""" pyexcel_xls ~~~~~~~~~~~~~~~~~~~ The lower level xls/xlsm file format handler using xlrd/xlwt :copyright: (c) 2015-2016 by Onni Software Ltd :license: New BSD License """ from pyexcel_io.io import get_data as read_data, isstream, store_data as write_data def get_data(afile, file_type=None, **keywords): if isstream(afile) and file_type is None: file_type = 'xls' return read_data(afile, file_type=file_type, **keywords) def save_data(afile, data, file_type=None, **keywords): if isstream(afile) and file_type is None: file_type = 'xls' write_data(afile, data, file_type=file_type, **keywords)
bsd-3-clause
Python
5c4ed354d1bfd5c4443cc031a29e6535b2063178
add test-env
sikuli/sikuli,bx5974/sikuli,bx5974/sikuli,bx5974/sikuli,sikuli/sikuli,sikuli/sikuli,sikuli/sikuli,sikuli/sikuli,bx5974/sikuli,bx5974/sikuli,bx5974/sikuli,sikuli/sikuli,bx5974/sikuli,sikuli/sikuli
sikuli-script/src/test/python/test-env.py
sikuli-script/src/test/python/test-env.py
from __future__ import with_statement from sikuli.Sikuli import * print Env.getOS(), Env.getOSVersion() print "MAC?", Env.getOS() == OS.MAC print Env.getMouseLocation()
mit
Python
1828f7bb8cb735e755dbcb3a894724dec28748cc
add sort file
Daetalus/Algorithms
sort/sort.py
sort/sort.py
#!/usr/bin/env python # -*- coding:utf-8 -*- from __future__ import division from __future__ import unicode_literals from __future__ import print_function
unlicense
Python
fbd6db138ce65825e56a8d39bf30ed8525b88503
Add exception handler for db not found errors.
sketchturnerr/WaifuSim-backend,sketchturnerr/WaifuSim-backend
resources/middlewares/db_not_found_handler.py
resources/middlewares/db_not_found_handler.py
import falcon def handler(ex, req, resp, params): raise falcon.HTTPNotFound()
cc0-1.0
Python
a1eaf66efa2041849e906010b7a4fb9412a9b781
Add instance method unit tests
FriendCode/funky
tests/test_instancemethod.py
tests/test_instancemethod.py
# Imports import random import unittest from funky import memoize, timed_memoize class Dummy(object): @memoize def a(self): return random.random() class TestInstanceMethod(unittest.TestCase): def test_dummy(self): dummy = Dummy() v1 = dummy.a() v2 = dummy.a() dummy.a.clear() v3 = dummy.a() self.assertEqual(v1, v2) self.assertNotEqual(v1, v3) if __name__ == '__main__': unittest.main()
apache-2.0
Python
c3221d70f829dc2968ebfb1a47efd9538a1ef59f
test gaussian + derivatives
svenpeter42/fastfilters,svenpeter42/fastfilters,svenpeter42/fastfilters,svenpeter42/fastfilters,svenpeter42/fastfilters
tests/vigra_compare.py
tests/vigra_compare.py
import fastfilters as ff import numpy as np import sys try: import vigra except ImportError: print("WARNING: vigra not available - skipping tests.") with open(sys.argv[1], 'w') as f: f.write('') exit() a = np.random.randn(1000000).reshape(1000,1000) for order in [0,1,2]: for sigma in [1.0, 5.0, 10.0]: res_ff = ff.gaussian2d(a, order, sigma) res_vigra = vigra.filters.gaussianDerivative(a, sigma, [order,order]) if not np.allclose(res_ff, res_vigra, atol=1e-6): print(order, sigma, np.max(np.abs(res_ff - res_vigra))) raise Exception() np.unique(ff.hog2d(a, 1.0))
mit
Python
8ec1d35fe79554729e52aec4e0aabd1d9f64a9c7
Put main.py display functions in its own module so they can be used in other parts of the package
fire-rs-laas/fire-rs-saop,fire-rs-laas/fire-rs-saop,fire-rs-laas/fire-rs-saop,fire-rs-laas/fire-rs-saop
fire_rs/display.py
fire_rs/display.py
from mpl_toolkits.mplot3d import Axes3D import matplotlib import matplotlib.pyplot as plt from matplotlib.colors import LightSource from matplotlib.ticker import FuncFormatter from matplotlib import cm def get_default_figure_and_axis(): fire_fig = plt.figure() fire_ax = fire_fig.gca(aspect='equal', xlabel="X position [m]", ylabel="Y position [m]") ax_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False) fire_ax.yaxis.set_major_formatter(ax_formatter) fire_ax.xaxis.set_major_formatter(ax_formatter) return fire_fig, fire_ax def plot_firefront_contour(ax, x, y, firefront, nfronts=20): fronts = ax.contour(x, y, firefront, nfronts, cmap=cm.Set1) labels = ax.clabel(fronts, inline=True, fontsize='smaller', inline_spacing=1, linewidth=2, fmt='%.0f') return fronts, labels def plot_elevation_contour(ax, x, y, z): contour = ax.contour(x, y, z, 15, cmap=cm.gist_earth) labels = plt.clabel(contour, inline=1, fontsize=10) return contour, labels def plot_elevation_shade(ax, x, y, z, dx=25, dy=25): cbar_lim = (z.min(), z.max()) image_scale = (x[0][0], x[0][x.shape[0] - 1], y[0][0], y[y.shape[0] - 1][0]) ls = LightSource(azdeg=315, altdeg=45) ax.imshow(ls.hillshade(z, vert_exag=5, dx=dx, dy=dy), extent=image_scale, cmap='gray') return ax.imshow(ls.shade(z, cmap=cm.terrain, blend_mode='overlay', vert_exag=1, dx=dx, dy=dy, vmin=cbar_lim[0], vmax=cbar_lim[1]), extent=image_scale, vmin=cbar_lim[0], vmax=cbar_lim[1], cmap=cm.terrain) def plot_wind_flow(ax, x, y, wx, wy, wvel): return ax.streamplot(x, y, wx, wy, density=1, linewidth=1, color='dimgrey') def plot_wind_arrows(ax, x, y, wx, wy): return ax.quiver(x, y, wx, wy, pivot='middle', color='dimgrey') def plot3d_elevation_shade(ax, x, y, z, dx=25, dy=25): ls = LightSource(azdeg=120, altdeg=45) rgb = ls.shade(z, cmap=cm.terrain, vert_exag=0.1, blend_mode='overlay') return ax.plot_surface(x, y, z, facecolors=rgb, rstride=5, cstride=5, linewidth=0, antialiased=True, shade=True) def plot3d_wind_arrows(ax, x, y, z, wx, wy, wz): return ax.quiver(x, y, z, wx, wy, wz, pivot='middle', cmap=cm.viridis)
bsd-2-clause
Python
b17252a0b1becfda77e4244cf48c2fb9f868c03b
add method to pregenerate html reports
armijnhemel/binaryanalysis
src/bat/generateuniquehtml.py
src/bat/generateuniquehtml.py
#!/usr/bin/python ## Binary Analysis Tool ## Copyright 2012 Armijn Hemel for Tjaldur Software Governance Solutions ## Licensed under Apache 2.0, see LICENSE file for details ''' This is a plugin for the Binary Analysis Tool. It takes the output of hexdump -Cv and writes it to a file with gzip compression. The output is later used in the (upcoming) graphical user interface. This should be run as a postrun scan ''' import os, os.path, sys, gzip, cgi def generateHTML(filename, unpackreport, leafscans, scantempdir, toplevelscandir, envvars={}): if not unpackreport.has_key('sha256'): return scanenv = os.environ.copy() if envvars != None: for en in envvars.split(':'): try: (envname, envvalue) = en.split('=') scanenv[envname] = envvalue except Exception, e: pass ## TODO: check if BAT_REPORTDIR exists reportdir = scanenv.get('BAT_REPORTDIR', '.') for i in leafscans: if i.keys()[0] == 'ranking': if len(i['ranking']['reports']) != 0: htmllinks = [] for j in i['ranking']['reports']: if len(j[2]) != 0: ## here we should either do a database lookup to get the checksum, ## or check if they are already in the report htmllinks.append((j[1], j[2])) if htmllinks != []: uniquehtml = "<html><body><h1>Unique matches per package</h1><p><ul>" ## first generate a header for h in htmllinks: uniquehtml = uniquehtml + "<li><a href=\"#%s\">%s</a>" % (h[0], h[0]) uniquehtml = uniquehtml + "</ul></p>" for h in htmllinks: uniquehtml = uniquehtml + "<hr><h2><a name=\"%s\" href=\"#%s\">Matches for: %s (%d)</a></h2>" % (h[0], h[0], h[0], len(h[1])) for k in h[1]: ## we have a list of tuples, per unique string we have a list of sha256sums and meta info if len(k) > 1: uniquehtml = uniquehtml + "<h5>%s</h5><p><table><td><b>Filename</b></td><td><b>Version</b></td><td><b>Line number</b></td><td><b>SHA256</b></td></tr>" % cgi.escape(k[0]) uniqtablerows = map(lambda x: "<tr><td>%s</td><td><a href=\"unique:/%s#%d\">%s</a></td><td>%d</td><td>%s</td></tr>" % (x[3], x[0], x[2], x[1], x[2], x[0]), k[1]) uniquehtml = uniquehtml + reduce(lambda x, y: x + y, uniqtablerows) + "</table></p>\n" else: uniquehtml = uniquehtml + "<h5>%s</h5>" % cgi.escape(k[0]) uniquehtml = uniquehtml + "</body></html>" uniquehtmlfile = open("%s/%s-unique.html" % (reportdir, unpackreport['sha256']), 'w') uniquehtmlfile.write(uniquehtml) uniquehtmlfile.close()
apache-2.0
Python
78f730b405c6e67988cdc9efab1aa5316c16849f
Add initial test for web response
elastic-coders/aiohttp,jashandeep-sohi/aiohttp,mind1master/aiohttp,pfreixes/aiohttp,panda73111/aiohttp,AlexLisovoy/aiohttp,arthurdarcet/aiohttp,z2v/aiohttp,saghul/aiohttp,noplay/aiohttp,alexsdutton/aiohttp,Eyepea/aiohttp,jettify/aiohttp,AlexLisovoy/aiohttp,noplay/aiohttp,juliatem/aiohttp,jashandeep-sohi/aiohttp,arthurdarcet/aiohttp,pathcl/aiohttp,panda73111/aiohttp,rutsky/aiohttp,jashandeep-sohi/aiohttp,saghul/aiohttp,Insoleet/aiohttp,vedun/aiohttp,mind1master/aiohttp,vaskalas/aiohttp,moden-py/aiohttp,jojurajan/aiohttp,moden-py/aiohttp,singulared/aiohttp,KeepSafe/aiohttp,Srogozins/aiohttp,esaezgil/aiohttp,mind1master/aiohttp,z2v/aiohttp,juliatem/aiohttp,elastic-coders/aiohttp,decentfox/aiohttp,esaezgil/aiohttp,iksteen/aiohttp,z2v/aiohttp,danielnelson/aiohttp,jettify/aiohttp,KeepSafe/aiohttp,pfreixes/aiohttp,elastic-coders/aiohttp,vaskalas/aiohttp,noodle-learns-programming/aiohttp,esaezgil/aiohttp,singulared/aiohttp,vaskalas/aiohttp,morgan-del/aiohttp,alex-eri/aiohttp-1,hellysmile/aiohttp,playpauseandstop/aiohttp,iksteen/aiohttp,AraHaanOrg/aiohttp,singulared/aiohttp,jettify/aiohttp,vasylbo/aiohttp,arthurdarcet/aiohttp,jojurajan/aiohttp,hellysmile/aiohttp,alex-eri/aiohttp-1,rutsky/aiohttp,rutsky/aiohttp,moden-py/aiohttp,decentfox/aiohttp,sterwill/aiohttp,alunduil/aiohttp,flying-sheep/aiohttp,alexsdutton/aiohttp,decentfox/aiohttp,KeepSafe/aiohttp,panda73111/aiohttp,avanov/aiohttp,alex-eri/aiohttp-1,andyaguiar/aiohttp,AraHaanOrg/aiohttp
tests/test_web_response.py
tests/test_web_response.py
import unittest from unittest import mock from aiohttp.web import Request, StreamResponse from aiohttp.protocol import Request as RequestImpl class TestStreamResponse(unittest.TestCase): def make_request(self, method, path, headers=()): self.app = mock.Mock() self.transport = mock.Mock() message = RequestImpl(self.transport, method, path) message.headers.extend(headers) self.payload = mock.Mock() self.protocol = mock.Mock() req = Request(self.app, message, self.payload, self.protocol) return req def test_ctor(self): req = self.make_request('GET', '/') resp = StreamResponse(req) self.assertEqual(req, resp._request) self.assertIsNone(req._response) self.assertEqual(200, resp.status_code) self.assertTrue(resp.keep_alive)
apache-2.0
Python
644a678d3829513361fdc099d759ca964100f2e6
Add script to replace text
jleung51/scripts,jleung51/scripts,jleung51/scripts
text-files/replace-text.py
text-files/replace-text.py
#!/usr/bin/env python3 # This Python 3 script replaces text in a file, in-place. # For Windows, use: #!python import fileinput import os import sys def isValidFile(filename): return (filename.lower().endswith('.m3u') or filename.lower().endswith('.m3u8')) def processFile(filename): '''Makes custom text modifications to a single file. Returns true if modified, false if not modified. ''' modified = False with fileinput.input(filename, inplace=True) as f: for line in f: # Check any condition if '\\' in line: modified = True # Make the modifications newline = line.replace('\\', '/') sys.stdout.write(newline) return modified if __name__ == '__main__': for filename in os.listdir(os.getcwd()): if not isValidFile(filename): continue modified = processFile(filename) if modified: print(filename) # Wait for user input to finish input()
mit
Python
8d8f6b99357912fa9a29098b0744712eeb1d4c70
Add coder/decoder skeleton
ademinn/AdaptiveIPFilter,ademinn/AdaptiveIPFilter
src/coder.py
src/coder.py
from bitarray import bitarray from datetime import datetime, timedelta def decode(): with open() as f: timestamps = [] start = [0, 0, 0] end = [1, 1, 1] delta = timedelta(seconds=1) for line in f: ts = line.split(" ", 1)[0] ts = datetime.strptime(ts, '%H:%M:%S.%f') timestamps.append(ts) bits = [int(t2 - t1 > delta) for t2, t1 in zip(timestamps[1:], timestamps[:-1])] bits = extract_message(bits, start, end) print get_message(bits) def find_index(list, sublist): print('Find {} in {}'.format(sublist, list)) for i in range(len(list) - len(sublist) + 1): if list[i:i+len(sublist)] == sublist: return i return None def extract_message(bits, start, end): start_index = find_index(bits, start) + len(start) end_index = find_index(bits[start_index:], end) return bits[start_index:start_index + end_index] def get_message(bits): return bitarray(bits).tostring() def get_bits(msg): ba = bitarray.bitarray() ba.fromstring(msg) return ba.tolist()
bsd-2-clause
Python
3c18ace928b0339b0edf4763f4132d327936cbe8
add utils
jvpoulos/Attention-OCR
src/utils.py
src/utils.py
def set_trace(): from IPython.core.debugger import Pdb import sys Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back) def plot_ROC(actual, predictions): # plot the FPR vs TPR and AUC for a two class problem (0,1) import matplotlib.pyplot as plt from sklearn.metrics import roc_curve, auc false_positive_rate, true_positive_rate, thresholds = roc_curve(actual, predictions) roc_auc = auc(false_positive_rate, true_positive_rate) plt.title('Receiver Operating Characteristic') plt.plot(false_positive_rate, true_positive_rate, 'b', label='AUC = %0.2f'% roc_auc) plt.legend(loc='lower right') plt.plot([0,1],[0,1],'r--') plt.xlim([-0.1,1.2]) plt.ylim([-0.1,1.2]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show()
mit
Python
0bca09339bb49e4540c5be8162e11ea3e8106200
Create a PySide GUI window.
mattdeckard/wherewithal
budget.py
budget.py
#!/usr/bin/env python import sys from PySide import QtGui app = QtGui.QApplication(sys.argv) wid = QtGui.QWidget() wid.resize(250, 150) wid.setWindowTitle('Simple') wid.show() sys.exit(app.exec_())
apache-2.0
Python
c02b8011b20e952460a84b7edf1b44fcb0d07319
add re07.py
devlights/try-python
trypython/stdlib/re_/re07.py
trypython/stdlib/re_/re07.py
""" 正規表現のサンプルです 部分正規表現(「(」と「)」)のグルーピングについて REFERENCES:: http://bit.ly/2TVtVNY http://bit.ly/2TVRy8Z http://bit.ly/2TWQQs4 """ import re from trypython.common.commoncls import SampleBase from trypython.common.commonfunc import pr from trypython.stdlib.re_ import util class Sample(SampleBase): def exec(self): # --------------------------------------------------- # 部分正規表現のグルーピング # ----------------------------- # 「(」と「)」に囲まれた正規表現をひとまとまりにして # 部分正規表現を定義することができる. # # 「(」と「)」を入れ子にすることも可能. # # 例えば (abc)+ という正規表現は # 「abcという文字列が一回以上連続した文字列」を表す. # # また、通常の四則計算と同様に優先度を変更するためにも利用する. # # 例えば、「田中|佐藤 太郎」という正規表現は # 「田中、または佐藤 太郎を意味する。これを # 「(田中|佐藤) 太郎」とすると、田中 太郎、または佐藤 太郎 # という意味となる。 # # グルーピングには、もう一つ大きな役目があり # 「マッチした範囲をキャプチャ(記憶)する」という事もできる. # キャプチャした内容は後から特別な記法により取り出す事が可能. # 置換などを行う際に重宝する. # --------------------------------------------------- s = '田中 太郎' p = r'田中|佐藤 太郎' r = re.compile(p) m = r.match(s) if m: # 田中 太郎でマッチせずに 「田中」のみがマッチする util.print_match_object(m) m = r.fullmatch(s) if not m: # fullmatch 指定した場合はマッチしないと判定される # fullmatch メソッドは python 3.4 で追加された pr(f'({p}).fullmatch({s})', 'マッチせず') p = r'(田中|佐藤) 太郎' r = re.compile(p) m = r.match(s) if m: # グルーピング指定しているので「田中 太郎」でマッチする # かつ、グルーピングにより「田中」の部分がキャプチャされる util.print_match_object(m) m = r.fullmatch(s) if m: # グルーピング指定しているので「田中 太郎」でフルマッチする # かつ、グルーピングにより「田中」の部分がキャプチャされる util.print_match_object(m) def go(): obj = Sample() obj.exec() if __name__ == '__main__': go()
mit
Python
bc28f6ab7ba5bb5e82bf38c544a4d091d89973ea
Use servoblaster to control servo
emilv/candycrush,emilv/candycrush
candycrush.py
candycrush.py
#!/usr/bin/env python import os.path import subprocess import time def scaler(OldMin, OldMax, NewMin, NewMax): def fn(OldValue): return (((OldValue - OldMin) * (NewMax - NewMin)) / (OldMax - OldMin)) + NewMin return fn def setup_servod(): if not os.path.exists("/dev/servoblaster"): subprocess.call(["servod"]) def set_servo(physical_pin, degrees): servodegrees = scaler(0, 180, 53, 240) with open("/dev/servoblaster", "w") as f: servovalue = int(servodegrees(degrees)) f.write("P1-{}={}".format(physical_pin, servovalue)) def main(): set_servo(11, 0) time.sleep(2) set_servo(11, 180) time.sleep(2) set_servo(11, 90) time.sleep(2) set_servo(11, 45) time.sleep(2) set_servo(11, 30) if __name__ =='__main__': main()
agpl-3.0
Python
26595ad3dd7dcd9dfd16ae551345db9b7e58412a
Add updater
BamX/taggy-api,BamX/taggy-api,BamX/taggy-api,BamX/taggy-api
updater/openexchangerates.py
updater/openexchangerates.py
#!env/bin/python import urllib2 import simplejson import datetime APP_ID = "40639356d56148f1ae26348d670e889f" TARGET_URL = "http://taggy-api.bx23.net/api/v1/currency/" def main(): print 'Getting rates...' request = urllib2.Request("http://openexchangerates.org/api/latest.json?app_id=%s" % (APP_ID)) opener = urllib2.build_opener() f = opener.open(request) result = simplejson.load(f) rates = result['rates'] date = datetime.datetime.fromtimestamp(int(result['timestamp'])) print 'Rates [%s] size: %s' % (date, len(rates)) print 'Sending to API...' update_j = {"currency" : [], "timestamp" : result['timestamp']} for name, value in rates.iteritems(): update_j["currency"].append({"name" : name, "value" : value}) request = urllib2.Request(TARGET_URL, simplejson.dumps(update_j), {'Content-Type': 'application/json'}) f = urllib2.urlopen(request) response = f.read() f.close() print ' API: %s' % (response) if __name__ == '__main__': main()
mit
Python
3ef4fdcc98a12111aee6f0d214af98ef68315773
add reboot module
melmothx/jsonbot,melmothx/jsonbot,melmothx/jsonbot
gozerlib/reboot.py
gozerlib/reboot.py
# gozerbot/utils/reboot.py # # """ reboot code. """ ## gozerlib imports from gozerlib.fleet import fleet from gozerlib.config import cfg as config ## basic imports from simplejson import dump import os import sys import pickle import tempfile def reboot(): """ reboot the bot. .. literalinclude:: ../../gozerbot/reboot.py :pyobject: reboot """ os.execl(sys.argv[0], *sys.argv) def reboot_stateful(bot, ievent, fleet, partyline): """ reboot the bot, but keep the connections. :param bot: bot on which the reboot command is given :type bot: gozerbot.botbase.BotBase :param ievent: event that triggered the reboot :type ievent: gozerbot.eventbase. EventBase :param fleet: the fleet of the bot :type fleet: gozerbot.fleet.Fleet :param partyline: partyline of the bot :type partyline: gozerbot.partyline.PartyLine .. literalinclude:: ../../gozerbot/reboot.py :pyobject: reboot_stateful """ config.reload() session = {'bots': {}, 'name': bot.name, 'channel': ievent.channel, 'partyline': []} for i in fleet.bots: session['bots'].update(i._resumedata()) session['partyline'] = partyline._resumedata() sessionfile = tempfile.mkstemp('-session', 'gozerbot-')[1] dump(session, open(sessionfile, 'w')) fleet.save() fleet.exit(jabber=True) os.execl(sys.argv[0], sys.argv[0], '-r', sessionfile)
mit
Python
edec2fc1f57c31e15793fd56b0f24bb58ba345d9
Create evalFunctionsLib.py
emilydolson/forestcat,emilydolson/forestcat,emilydolson/forestcat,emilydolson/forestcat,emilydolson/forestcat,emilydolson/forestcat,emilydolson/forestcat
evalFunctionsLib.py
evalFunctionsLib.py
def plot_stream(stream, data, r): """ Plots the values of a specific stream over time. Inputs: stream - an int indicating the index of the desired stream data - An array of all sensor data r = a RAVQ object Returns: The number of -1s (indicating invalid data) in this stream """ values = [] negs = 0 for i in range(len(states)): if states[i] == -1: negs += 1 else: values.append(r.models[states[i]].vector[stream]) plot(data[1][negs:len(states)], values, 'r-') return negs def plotColorStatesNoNumber(states): """ Makes a color-bar plot of states over time. Inputs: states - a list indicating the state at each time step. """ fig = figure(figsize=(9.0,6)) for i in range(len(states)): if states[i] == -1: plot(i, 0, "ko", hold=True) elif states[i] == 0: plot(i, 1, "|", color="LawnGreen") elif states[i] == 1: plot(i, 1, "|", color="LimeGreen") elif states[i] == 2: plot(i, 1, "|", color="teal") elif states[i] == 7: plot(i, 1, "|", color="DarkGreen") elif states[i] == 4: plot(i, 1, "b|") elif states[i] == 5: plot(i, 1, "|", color="DarkBlue") elif states[i] == 6: plot(i, 1, "|", color="purple") elif states[i] == 3: plot(i, 1, "|", color="green") elif states[i] == 8: plot(i, 1, "|", color="yellow") elif states[i] == 9: plot(i, 1, "|", color="navy") elif states[i] == 11: plot(i, 1, "|", color="GreenYellow") elif states[i] == 10: plot(i, 1, "|", color="orange") elif states[i] == 12: plot(i, 1, "|", color="red") else: plot(i, 1, "-") print i def plotColorStates(states): """ Makes a plot in which state is on the y axis and time is on the x axis and points are colored by state. Input: states - a list of ints representing the state at each time step. """ fig = figure(figsize=(9.0,6)) for i in range(len(states)): if states[i] == -1: plot(i, 0, "ko", hold=True) elif states[i] == 0: plot(i, 9, "|", color="LawnGreen") elif states[i] == 1: plot(i, 8, "|", color="LimeGreen") elif states[i] == 2: plot(i, 5, "|", color="teal") elif states[i] == 7: plot(i, 6, "|", color="DarkGreen") elif states[i] == 4: plot(i, 4, "b|") elif states[i] == 5: plot(i, 2, "|", color="DarkBlue") elif states[i] == 6: plot(i, 1, "|", color="purple") elif states[i] == 3: plot(i, 7, "|", color="green") elif states[i] == 8: plot(i, 11, "|", color="yellow") elif states[i] == 9: plot(i, 3, "|", color="navy") elif states[i] == 11: plot(i, 10, "|", color="GreenYellow") elif states[i] == 10: plot(i, 12, "|", color="orange") elif states[i] == 12: plot(i, 13, "|", color="red") else: plot(i, 1, "-") print i def printTransitions(states): for i in range(1,len(states)): if states[i-1] != states[i]: print data[0][i], ":", states[i-1], "->", states[i]
agpl-3.0
Python
3c37f63f65a9d85c605dde55ae19c8d5d62ad777
add missing file
fedora-conary/rmake-2,fedora-conary/rmake-2,fedora-conary/rmake-2,fedora-conary/rmake-2
rmake/plugins/plugin.py
rmake/plugins/plugin.py
# # Copyright (c) 2006 rPath, Inc. # # This program is distributed under the terms of the Common Public License, # version 1.0. A copy of this license should have been distributed with this # source file in a file called LICENSE. If it is not present, the license # is always available at http://www.opensource.org/licenses/cpl.php. # # This program is distributed in the hope that it will be useful, but # without any warranty; without even the implied warranty of merchantability # or fitness for a particular purpose. See the Common Public License for # full details. # """ Definition of plugins available for rmake plugins. Plugin writers should derive from one of these classes. The plugin will be called with the hooks described here, if the correct program is being run. For example, when running rmake-server, the server hooks will be run. """ from rmake.lib.pluginlib import Plugin TYPE_CLIENT = 0 TYPE_SERVER = 1 TYPE_SUBSCRIBER = 2 class ClientPlugin(Plugin): types = [TYPE_CLIENT] def client_preInit(self, main): """ Called right after plugins have been loaded. """ pass def client_preCommand(self, main, client): """ Called after the command-line client has instantiated, but before the command has been executed. """ pass class ServerPlugin(Plugin): types = [TYPE_SERVER] def server_preConfig(self, main): """ Called before the configuration file has been read in. """ pass def server_preInit(self, main, argv): """ Called before the server has been instantiated. """ pass def server_postInit(self, server): """ Called after the server has been instantiated but before serving is done. """ pass def server_pidDied(self, pid, status): """ Called when the server collects a child process that has died. """ pass def server_loop(self, server): """ Called once per server loop, between requests. """ pass def server_builderInit(self, server, builder): """ Called when the server instantiates a builder for a job. """ pass def server_shutDown(self, server): """ Called when the server is halting. """ pass class SubscriberPlugin(Plugin): types = [TYPE_SUBSCRIBER] protocol = None def subscriber_get(self, uri, name): """ Should return a child of the StatusSubscirber class. """ pass
apache-2.0
Python
2100eb3e0a72395f23571c6be2bada9939739869
add ex
yuzheng/python-ex
checkDigit.py
checkDigit.py
#-*-coding:UTF-8 -*- # # 判斷輸入是否為整數(int) input_string = input('Please input n:') #while input_string.isdigit() == False: while not input_string.isdigit(): print("Error, %s is not digit!" % input_string) input_string = input('Please input n:') print("%s is digit!" % input_string)
mit
Python
09592b081a68f912bf9bb73c5269af8398c36f64
Add unit test for treating Ordering as a collection
madman-bob/python-order-maintenance
tests/test_collection.py
tests/test_collection.py
from unittest import TestCase from ordering import Ordering class TestOrderingAsCollection(TestCase): def setUp(self) -> None: self.ordering = Ordering[int]() self.ordering.insert_start(0) for n in range(10): self.ordering.insert_after(n, n + 1) def test_length(self) -> None: self.assertEqual(len(self.ordering), 11) def test_iterates_over_correct_elements(self) -> None: self.assertListEqual( list(self.ordering), list(range(11)) ) def test_contains_correct_elements(self) -> None: for n in range(11): self.assertIn(n, self.ordering) for n in range(11, 20): self.assertNotIn(n, self.ordering) for n in range(-10, 0): self.assertNotIn(n, self.ordering)
mit
Python
6f1ed2fcdd43a5237d0211b426a216fd25930734
add test preprocess
ratnania/pyccel,ratnania/pyccel
tests/test_preprocess.py
tests/test_preprocess.py
# coding: utf-8 code = ''' n = 10 for i in range(0,n): x = 2 * i y = x / 3 # a comment if y > 1: print(y) for j in range(0, 3): x = x * y y = x + 1 if x > 1: print(x) ''' code = ''' #$ header legendre(int) def legendre(p): k = p + 1 x = zeros(k, double) w = zeros(k, double) if p == 1: x[0] = -0.577350269189625765 x[1] = 0.577350269189625765 w[0] = 1.0 w[1] = 1.0 elif p == 2: x[0] = -0.774596669241483377 x[1] = 0.0 x[2] = 0.774596669241483377 w[0] = 0.55555555555555556 w[1] = 0.888888888888888889 w[2] = 0.55555555555555556 elif p == 3: x[0] = -0.861136311594052575 x[1] = -0.339981043584856265 x[2] = 0.339981043584856265 x[3] = 0.861136311594052575 w[0] = 0.347854845137453853 w[1] = 0.65214515486254615 w[2] = 0.65214515486254614 w[3] = 0.34785484513745386 return x,w #$ comment if x > 1: print(x) ''' from pyccel.codegen import preprocess_as_str txt = preprocess_as_str(code) print txt
mit
Python
175b36b0eb1e84378e350ddc31da3ef7fcae32c2
Add test.
douban/PyCharlockHolmes,douban/PyCharlockHolmes,douban/PyCharlockHolmes,douban/PyCharlockHolmes,douban/PyCharlockHolmes
test/test.py
test/test.py
#!/usr/bin/env python # Test PyCharlockHolmes # from charlockholmes import detect TEST_FILES = { "py": [ "file/test.py", {'confidence': 34, 'type': 'text', 'language': 'en', 'encoding': 'ISO-8859-1'} ], "txt": [ "file/test.txt", {'confidence': 16, 'type': 'text', 'language': 'en', 'encoding': 'ISO-8859-1'} ], "c": [ "file/test.c", {'confidence': 50, 'type': 'text', 'language': 'en', 'encoding': 'ISO-8859-1'} ], "sh": [ "file/test.sh", {'confidence': 21, 'type': 'text', 'language': 'en', 'encoding': 'ISO-8859-1'} ], "elf": [ "file/test", {'confidence': 100, 'type': 'text'} ], "bz2": [ "file/test.tar.bz2", {'confidence': 100, 'type': 'text'} ], "gz": [ "file/test.tar.gz", {'confidence': 100, 'type': 'text'} ], } for test in TEST_FILES: file_path = TEST_FILES[test][0] file_result = TEST_FILES[test][1] content = open(file_path).read() test_result = detect(content) if test_result == file_result: print file_path + ": OK" else: print file_path + ": ERROR"
bsd-3-clause
Python
829defd825d5e311ad187569ba61381ecb40dd08
Add q1 2019
matthewelse/british-informatics-olympiad,matthewelse/british-informatics-olympiad,matthewelse/british-informatics-olympiad
2019/q1.py
2019/q1.py
""" BIO 2019 Q1: Palindromes This ended up being surprisingly difficult, for whatever reason I found it surprisingly difficult to reason about. I found it easier to think about how, given a palindrome, I would calculate the following palindrome. There are ~2 cases: Odd number of digits: [left][middle][right = reversed(right)] Even number of digits: [left][right = reversed(right)] In the first case, we can (hopefully) obviously generate the next palindrome by adding one to the middle digit, and carrying the one into the left hand side as if you were doing regular addition, and then reflecting the new value to produce a new palindrome. In the second case, we can basically do the same thing, but without the middle digit. And then if we are still 'carrying' anything by the time we get to the end, this becomes a new left-most digit, and the right most digit becomes the new middle digit. """ class Palindrome: def __init__(self, left, middle): assert middle is None or middle < 10 and middle >= 0 self.left = list(int(x) for x in str(left)) self.middle = middle def add_one_left(self, carry): for i in range(len(self.left)): ix = -(i + 1) if self.left[ix] == 9: self.left[ix] = 0 carry = True else: self.left[ix] += 1 carry = False break if carry and self.middle is None: self.middle = self.left[-1] self.left = [1] + self.left[:-1] elif carry and self.middle is not None: self.left = [1] + self.left self.middle = None def next_palindrome(self): if self.middle is not None: if self.middle == 9: self.middle = 0 self.add_one_left(carry = True) else: self.middle += 1 else: self.add_one_left(carry = False) def as_int(self): if self.middle is None: l = self.left + list(reversed(self.left)) else: l = self.left + [self.middle] + list(reversed(self.left)) return int("".join(str(x) for x in l)) @staticmethod def of_int(i): s = str(i) if len(s) % 2 == 0: left = [int(x) for x in s[:len(s) //2]] middle = None else: left = [int(x) for x in s[:len(s) //2]] middle = int(s[len(left)]) return Palindrome("".join(str(x) for x in left), middle) def __str__(self): return str(self.as_int()) i = input() in_int = int(i) p = Palindrome.of_int(i) p_int = p.as_int() if p_int > in_int: print(p_int) else: p.next_palindrome() print(p)
mit
Python
77cb3a711170d0f37a0d5d5cf9744ffc8f7242fd
Add test for comparing performance on virtual machines
henrist/aqmt,henrist/aqmt,henrist/aqmt,henrist/aqmt
henrste/test-vm.py
henrste/test-vm.py
#!/usr/bin/env python3 from framework.test_framework import Testbed, TestEnv, require_on_aqm_node from framework.test_utils import * import time def test_compose(): udp_rate = 0 def branch_udp_rate(rate_list, title='UDP-rate: %d Mb/s'): def branch(testdef): nonlocal udp_rate for rate in rate_list: udp_rate = rate yield { 'tag': 'udp-rate-%d' % rate, 'title': title % rate, 'titlelabel': 'UDP Rate [Mb/s]', } return branch def branch_titles(titles): def branch(testdef): for tag, title in titles: yield { 'tag': 'title-%s' % tag, 'title': title, 'titlelabel': '', } return branch def branch_cc(testdef): pass def my_test(testcase): for i in range(15): testcase.run_greedy(node='a', tag='node-a') testcase.run_greedy(node='b', tag='node-b') if udp_rate > 0: time.sleep(1) testcase.run_udp(node='a', bitrate=udp_rate * MBIT, ect='nonect', tag='udp-rate') testbed = Testbed() testbed.ta_samples = 30 testbed.ta_idle = 5 testbed.ta_delay = 500 testbed.cc('a', 'cubic', testbed.ECN_ALLOW) testbed.cc('b', 'dctcp-drop', testbed.ECN_INITIATE) run_test( folder='results/vm-test-1', title='Testing VM', subtitle='Using 15 flows of CUBIC, 15 flows of DCTCP (with ECN) and 1 flow UDP', testenv=TestEnv(testbed, retest=False), steps=( branch_titles([ ('dqa', 'dqa'), ('dqa1', 'dqa1'), ('dqa2', 'dqa2'), ('dqa3', 'dqa3'), ('dqa4', 'dqa4'), ('dqa5', 'dqa5'), ('x250', 'x250'), ]), plot_swap(), branch_sched([ ('pi2', 'PI2: dualq target 15ms tupdate 15ms alpha 5 beta 50 sojourn k 2 t\\\\_shift 30ms l\\\\_drop 100', lambda testbed: testbed.aqm_pi2(params='dualq target 15ms tupdate 15ms alpha 5 beta 50 sojourn k 2 t_shift 30ms l_drop 100')), ('pie', 'PIE', lambda testbed: testbed.aqm_pie('ecn target 15ms tupdate 15ms alpha 1 beta 10 ecndrop 25')), #('pfifo', 'pfifo', lambda testbed: testbed.aqm_pfifo()), ]), plot_swap(), branch_rtt([10]), plot_swap(), branch_bitrate([100,250,500]), plot_swap(), branch_udp_rate([50]), plot_swap(), branch_runif([ #('config-3', lambda testenv: False, '8 GiB / 6 vCPU'), #('config-6144-1', lambda testenv: False, '6 GiB / 1 vCPU'), #('config-512-6', lambda testenv: False, '512 MiB / 6 vCPU'), #('config-4', lambda testenv: False, '512 MiB / 1 vCPU'), #('config-3072-2', lambda testenv: False, '3 GiB / 2 vCPU'), ('config-3072-2', lambda testenv: False, '-'), #('config-1', lambda testenv: False, '2 GiB / 1 vCPU'), #('config-2', lambda testenv: False, '1 GiB / 1 vCPU'), ]), #branch_repeat(2), branch_repeat(10), my_test, ), ) if __name__ == '__main__': require_on_aqm_node() test_compose()
mit
Python
5a1518bc2bd8b509bc5c00850ba1da59989147f8
Add basic tests
Leibniz137/python-jproperties3
test_main.py
test_main.py
#!/usr/bin/env python import sys from io import StringIO from jproperties import Properties def _test_deserialize(*data): for s, items in data: props = Properties() props.load(StringIO(s)) assert list(props.items()) == items def test_eq_separator(): _test_deserialize( ("a=b", [("a", "b")]), ("a= b", [("a", "b")]), ("a = b", [("a", "b")]), ("a =b", [("a", "b")]), ) def test_colon_separator(): _test_deserialize( ("a:b", [("a", "b")]), ("a: b", [("a", "b")]), ("a : b", [("a", "b")]), ("a :b", [("a", "b")]), ) def test_space_separator(): _test_deserialize( ("a b", [("a", "b")]), ("a b", [("a", "b")]), ("a b", [("a", "b")]), ) def test_space_in_key(): _test_deserialize( ("key\ with\ spaces = b", [("key with spaces", "b")]), ("key\ with\ spaces b", [("key with spaces", "b")]), ("key\ with\ spaces : b", [("key with spaces", "b")]), ("key\ with\ spaces\ : b", [("key with spaces ", "b")]), ) def main(): for name, f in globals().items(): if name.startswith("test_") and callable(f): f() if __name__ == "__main__": main()
mit
Python
4249c6456ca21ad6bbec0eccdf66aef629deb511
Add basic tag testing script
wikimedia/labs-tools-wikibugs2,wikimedia/labs-tools-wikibugs2
test_tags.py
test_tags.py
import sys import requests from wikibugs import Wikibugs2 from channelfilter import ChannelFilter import configfetcher conf = configfetcher.ConfigFetcher() w = Wikibugs2(conf) c = ChannelFilter() print("\n\n\n\n\n\n\n\n") page = requests.get(sys.argv[1]).text tags = w.get_tags(page) for tag in tags: print(tag, c.channels_for([tag]))
mit
Python
377f44ea05d8fc550be5916a1ca6c085df8f8cdc
add mysql database backup script
vanzhiganov/backup
backupmysql.py
backupmysql.py
#!/usr/bin/python # -*- coding: utf-8 -*- #Author: Andrew McDonald andrew@mcdee.com.au http://mcdee.com.au # Example: config file #[client] #host = localhost #user = root #password = root-pass from datetime import datetime import sys, os, subprocess, tarfile import zipfile, glob, logging date_format = [ "%A %d.%m.%Y", "%Y%m%d" ] default_date_format = 1 def print_usage(script): print 'Usage:', script, '--cnf <config file>', '--todir <directory>' sys.exit(1) def usage(args): if not len(args) == 5: print_usage(args[0]) else: req_args = ['--cnf', '--todir'] for a in req_args: if not a in req_args: print_usage() if not os.path.exists(args[args.index(a)+1]): print 'Error: Path not found:', args[args.index(a)+1] print_usage() cnf = args[args.index('--cnf')+1] dir = args[args.index('--todir')+1] return cnf, dir def mysql_dblist(cnf): no_backup = ['Database', 'information_schema', 'performance_schema', 'test'] cmd = ['mysql', '--defaults-extra-file='+cnf, '-e', 'show databases'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode > 0: print 'MySQL Error:' print stderr sys.exit(1) dblist = stdout.strip().split('\n') for item in no_backup: try: dblist.remove(item) except ValueError: continue if len(dblist) == 1: print "Doesn't appear to be any user databases found" return dblist def mysql_backup(dblist, dir, cnf): for db in dblist: bdate = datetime.now().strftime('%Y%m%d%H%M') bfile = db+'_'+bdate+'.sql' dumpfile = open(os.path.join(dir, bfile), 'w') if db == 'mysql': cmd = ['mysqldump', '--defaults-extra-file='+cnf, '--events', db] else: cmd = ['mysqldump', '--defaults-extra-file='+cnf, db] p = subprocess.Popen(cmd, stdout=dumpfile) retcode = p.wait() dumpfile.close() if retcode > 0: print 'Error:', db, 'backup error' backup_compress(dir, bfile) def backup_compress(dir, bfile): tar = tarfile.open(os.path.join(dir, bfile)+'.tar.gz', 'w:gz') tar.add(os.path.join(dir, bfile), arcname=bfile) tar.close() os.remove(os.path.join(dir, bfile)) def main(): cnf, dir = usage(sys.argv) dblist = mysql_dblist(cnf) mysql_backup(dblist, dir, cnf) if __name__ == '__main__': main()
unlicense
Python
a3a2f645d3154334e8ae6af93fe56a3f2368c4c7
Add multiprocessing pool example
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
multiprocessing_pool.py
multiprocessing_pool.py
from multiprocessing.pool import ThreadPool as Pool from multiprocessing import Queue as PQueue import Queue my_dict = { 'url1': 'url2', 'url3': 'url4', } my_q = PQueue() def test_p(uq): q, url = uq[0], uq[1] q.put(url, False) def main(): global my_dict global my_q print "Going to process (%d)" % len(my_dict.keys() + my_dict.values()) p = Pool(processes=8) print p.map(test_p, [(my_q, url) for url in my_dict.keys() + my_dict.values()]) its = [] while True: # If we go more than 30 seconds without something, die try: print "Waiting for item from queue for up to 5 seconds" i = my_q.get(True, 5) print "found %s from the queue !!" % i its.append(i) except Queue.Empty: print "Caught queue empty exception, done" break print "processed %d items, completion successful" % len(its) p.close() p.join() if __name__ == '__main__': main()
mit
Python
b117fbc82de4fb6acd8a044651c95e2425d9e71c
Create preprocess_MS_dataset_utils_test.py
googleinterns/smart-content-summary,googleinterns/smart-content-summary,googleinterns/smart-content-summary
preprocess_MS_dataset_utils_test.py
preprocess_MS_dataset_utils_test.py
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing for preprocess_MS_dataset_utils.py.""" import unittest from unittest import TestCase from preprocess_MS_dataset_utils import process_row class PreprocessMSDatasetUtilsTest(TestCase): def test_process_row_without_excluded_sample(self): row = ["PlaceHolder ||| PlaceHolder ||| OriginalSentence ||| " "Summary1 ||| 6 ||| 6 ||| 6 ||| Most important meaning Flawless language " "||| Summary2 ||| 7 ||| 7 ||| 7 ||| Most important meaning Minor errors"] output_original_sentence, output_shortened_sentences_list, \ output_shortened_ratings_list, count_excluded = process_row(row) self.assertEqual(output_original_sentence, 'OriginalSentence') self.assertEqual(output_shortened_sentences_list, ['Summary1', 'Summary2']) self.assertEqual(output_shortened_ratings_list, [['6'], ['7']]) self.assertEqual(count_excluded, 0) def test_process_row_with_excluded_sample(self): row = ["PlaceHolder ||| PlaceHolder ||| OriginalSentence ||| " "Summary1 ||| 7 ||| 7 ||| 7 ||| Most important meaning Minor errors " "||| Summary2 ||| 9 ||| 9 ||| 9 ||| Most important meaning Disfluent or incomprehensible"] output_original_sentence, output_shortened_sentences_list, \ output_shortened_ratings_list, count_excluded = process_row(row) self.assertEqual(output_original_sentence, 'OriginalSentence') self.assertEqual(output_shortened_sentences_list, ['Summary1']) self.assertEqual(output_shortened_ratings_list, [['7']]) self.assertEqual(count_excluded, 1) if __name__ == '__main__': unittest.main()
apache-2.0
Python
6bf43087967dee2bfb9f31a5de61c91ed0664586
update get ids and columns in pecanstreet, much faster
dssg/wikienergy,dssg/wikienergy,dssg/wikienergy,dssg/wikienergy,dssg/wikienergy
proto/pylearn2/create_ev_dataset.py
proto/pylearn2/create_ev_dataset.py
import sys import os.path sys.path.append(os.path.join(os.pardir,os.pardir)) import disaggregator as da import disaggregator.PecanStreetDatasetAdapter as psda db_url = "postgresql://USERNAME:PASSWORD@db.wiki-energy.org:5432/postgres" psda.set_url(db_url) schema = 'shared' table_names = psda.get_table_names(schema) all_ids = [] all_columns = [] for table_name in table_names: ids,columns = psda.get_table_dataids_and_column_names(schema,table_name) all_ids.append(ids) all_columns.append(columns) print all_ids print all_columns
mit
Python
10524dd2c42ef499d36b3f64e31150885d45e51b
Add slot_usage command for checking cluster balance
codywilbourn/streamparse,Parsely/streamparse,Parsely/streamparse,codywilbourn/streamparse
streamparse/cli/slot_usage.py
streamparse/cli/slot_usage.py
""" Display slots used by every topology on the cluster """ from __future__ import absolute_import, print_function from collections import Counter, defaultdict from pkg_resources import parse_version from prettytable import PrettyTable from six import iteritems from .common import add_environment from ..util import get_ui_json, storm_lib_version def subparser_hook(subparsers): """ Hook to add subparser for this command. """ subparser = subparsers.add_parser('slot_usage', description=__doc__, help=main.__doc__) subparser.set_defaults(func=main) add_environment(subparser) def display_slot_usage(env_name): print('Querying Storm UI REST service for slot usage stats (this can take a while)...') topology_summary = '/api/v1/topology/summary' topology_detail = '/api/v1/topology/{topology}' component = '/api/v1/topology/{topology}/component/{component}' topo_summary_json = get_ui_json(env_name, topology_summary) topology_ids = [x['id'] for x in topo_summary_json['topologies']] # Keep track of the number of workers used by each topology on each machine topology_worker_ports = defaultdict(lambda: defaultdict(set)) topology_executor_counts = defaultdict(Counter) topology_names = set() for topology in topology_ids: topology_detail_json = get_ui_json(env_name, topology_detail.format(topology=topology)) spouts = [x['spoutId'] for x in topology_detail_json['spouts']] bolts = [x['boltId'] for x in topology_detail_json['bolts']] for comp in spouts + bolts: comp_detail = get_ui_json(env_name, component.format(topology=topology, component=comp)) for worker in comp_detail['executorStats']: topology_worker_ports[worker['host']][topology_detail_json['name']].add(worker['port']) topology_executor_counts[worker['host']][topology_detail_json['name']] += 1 topology_names.add(topology_detail_json['name']) print("# Slot (and Executor) Counts by Topology") topology_names = sorted(topology_names) table = PrettyTable(["Host"] + topology_names) table.align = 'l' for host, host_dict in sorted(iteritems(topology_worker_ports)): row = [host] + ['{} ({})'.format(len(host_dict.get(topology, set())), topology_executor_counts[host][topology]) for topology in topology_names] table.add_row(row) print(table) print() def main(args): """ Display uptime for Storm workers. """ storm_version = storm_lib_version() if storm_version >= parse_version('0.9.2-incubating'): display_slot_usage(args.environment) else: print("ERROR: Storm {0} does not support this command." .format(storm_version))
apache-2.0
Python
b69cc15467456a070333ff00f886f27ca391b85b
Add script for appending entries to .gitignore.
mwgoldsmith/ilbc,TimothyGu/libilbc,ShiftMediaProject/libilbc,ShiftMediaProject/libilbc,mwgoldsmith/libilbc,mwgoldsmith/ilbc,TimothyGu/libilbc,TimothyGu/libilbc,TimothyGu/libilbc,ShiftMediaProject/libilbc,ShiftMediaProject/libilbc,ShiftMediaProject/libilbc,mwgoldsmith/ilbc,mwgoldsmith/libilbc,TimothyGu/libilbc,mwgoldsmith/ilbc,mwgoldsmith/libilbc,mwgoldsmith/libilbc
webrtc/build/extra_gitignore.py
webrtc/build/extra_gitignore.py
#!/usr/bin/env python # Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license # that can be found in the LICENSE file in the root of the source # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. """ Adds extra patterns to the root .gitignore file. Reads the contents of the filename given as the first argument and appends them to the root .gitignore file. The new entires are intended to be additional ignoring patterns, or negating patterns to override existing entries (man gitignore for more details). """ import os import sys MODIFY_STRING = '# The following added by %s\n' def main(argv): if not argv[1]: # Special case; do nothing. return 0 modify_string = (MODIFY_STRING % argv[0]) gitignore_file = os.path.dirname(argv[0]) + '/../.gitignore' lines = open(gitignore_file, 'r').readlines() for i, line in enumerate(lines): if line == modify_string: lines = lines[:i] break lines.append(modify_string) f = open(gitignore_file, 'w') f.write(''.join(lines)) f.write(open(argv[1], 'r').read()) f.close() if __name__ == '__main__': sys.exit(main(sys.argv))
bsd-3-clause
Python
5e07a21cce64e1845832641b6de1951182d41ea0
add back module changed mixin
CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend
core/mixins.py
core/mixins.py
""" core.mixins - Mixins available to use with models """ from django.db.models.signals import post_save def on_changed(sender, **kwargs): """ Calls the `model_changed` method and then resets the state. """ instance = kwargs.get("instance") is_new = kwargs.get("created") dirty_fields = instance.get_dirty_fields() instance.model_changed(instance.original_state, dirty_fields, is_new) instance.original_state = instance.to_dict() class ModelChangedMixin(object): """ Mixin for detecting changes to a model """ def __init__(self, *args, **kwargs): super(ModelChangedMixin, self).__init__(*args, **kwargs) self.original_state = self.to_dict() identifier = "{0}_model_changed".format(self.__class__.__name__) post_save.connect( on_changed, sender=self.__class__, dispatch_uid=identifier) def to_dict(self): """ Returns the model as a dict """ # Get all the field names that are not relations keys = (f.name for f in self._meta.local_fields if not f.rel) return {field: getattr(self, field) for field in keys} def get_dirty_fields(self): """ Returns the fields dirty on the model """ dirty_fields = {} current_state = self.to_dict() for key, value in current_state.items(): if self.original_state[key] != value: dirty_fields[key] = value return dirty_fields def is_dirty(self): """ Return whether the model is dirty An unsaved model is dirty when it has no primary key or has at least one dirty field. """ if not self.pk: return True return {} != self.get_dirty_fields() def model_changed(self, old_fields, new_fields, is_new): """ Post-hook for all fields that have been changed. """ raise NotImplementedError("Missing method `model_changed`")
apache-2.0
Python
87413a50fa61761f8e669eda641635a0ab7bede3
Create migration for message
mbalamat/ting,dionyziz/ting,gtklocker/ting,mbalamat/ting,dionyziz/ting,dionyziz/ting,gtklocker/ting,mbalamat/ting,dionyziz/ting,gtklocker/ting,gtklocker/ting,mbalamat/ting
API/chat/migrations/0005_auto_20160511_1921.py
API/chat/migrations/0005_auto_20160511_1921.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('chat', '0004_auto_20150905_1700'), ] operations = [ migrations.RenameField( model_name='message', old_name='text', new_name='message_content', ), migrations.AddField( model_name='message', name='message_type', field=models.CharField(default=b'text', max_length=10, choices=[(b'text', b'text'), (b'image', b'image')]), ), ]
mit
Python
b38527cccf970e069f55c531a4490cdb6eb7042b
Add a widget.
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
python/pyqt/pyqt5/hello_as_class.py
python/pyqt/pyqt5/hello_as_class.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org) # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import sys from PyQt5.QtWidgets import QApplication, QMainWindow class Window(QMainWindow): def __init__(self): super().__init__() self.resize(250, 150) self.setWindowTitle('Hello') self.show() app = QApplication(sys.argv) window = Window() # The mainloop of the application. The event handling starts from this point. # The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead. exit_code = app.exec_() # The sys.exit() method ensures a clean exit. # The environment will be informed, how the application ended. sys.exit(exit_code)
mit
Python
b41b2edde5ac7c786b5ce23adec116fe8311d5d7
Add tests for createaccount command
mociepka/saleor,mociepka/saleor,mociepka/saleor
tests/test_account_service_account.py
tests/test_account_service_account.py
from unittest.mock import ANY, Mock import requests from django.core.management import call_command from saleor.account.models import ServiceAccount from saleor.core.permissions import get_permissions def test_createaccount_command_creates_service_account(): name = "SA name" permissions = ["account.manage_users", "order.manage_orders"] call_command("createserviceaccount", name, permission=permissions) sa_accounts = ServiceAccount.objects.filter(name=name) assert len(sa_accounts) == 1 sa_account = sa_accounts[0] tokens = sa_account.tokens.all() assert len(tokens) == 1 def test_createaccount_command_service_account_has_all_required_permissions(): name = "SA name" permission_list = ["account.manage_users", "order.manage_orders"] expected_permission = get_permissions(permission_list) call_command("createserviceaccount", name, permission=permission_list) sa_accounts = ServiceAccount.objects.filter(name=name) assert len(sa_accounts) == 1 sa_account = sa_accounts[0] assert set(sa_account.permissions.all()) == set(expected_permission) def test_createaccount_command_sends_data_to_target_url(monkeypatch): mocked_response = Mock() mocked_response.status_code = 200 mocked_post = Mock(return_value=mocked_response) monkeypatch.setattr(requests, "post", mocked_post) name = "SA name" target_url = "https://ss.shop.com/register" permissions = [ "account.manage_users", ] call_command( "createserviceaccount", name, permission=permissions, target_url=target_url ) service_account = ServiceAccount.objects.filter(name=name)[0] token = service_account.tokens.all()[0].auth_token mocked_post.assert_called_once_with( target_url, headers={"x-saleor-domain": "mirumee.com"}, json={ "auth_token": token, "name": "SA name", "permissions": ["account.manage_users"], }, timeout=ANY, )
bsd-3-clause
Python
1390de93f8f9703416dc465fc546a8883e96bada
add a header generator
hikui/EMControllerManager
EMControllerManagerHeaderGenerator.py
EMControllerManagerHeaderGenerator.py
#!/usr/bin/env python #coding:utf8 import getopt import json import sys def generate_definition(input_file, output_path, prefix): with open(input_file, 'r') as json_file: json_string = json_file.read() config_dict = json.loads(json_string) if not isinstance(config_dict,dict): sys.stderr.write('configuration file is not failed') exit(-1) with open(output_path, 'w') as o: o.write('/* Generated by EMControllerManagerHeaderGenerator, do not edit it manually. */\n\n\n') for controller_name in config_dict: if prefix is None: def_name = controller_name else: def_name = "%s_%s" % (prefix, controller_name) o.write('#define %s @"%s"\n' % (def_name, controller_name)) def main(): try: options, args = getopt.getopt(sys.argv[1:],'i:o:p:') except Exception, e: print str(e) raise e input_file = None output_path = None prefix = None for o, a in options: if o == '-i': input_file = a elif o == '-o': output_path = a elif o == '-p': prefix = a if input_file is None or output_path is None: print "input error" exit(-1) generate_definition (input_file, output_path, prefix) if __name__ == '__main__': main()
mit
Python
10b8043463b6bcc89d4ce559548fa113f3d26190
drop tables no longer needed by application
praekelt/molo-gem,praekelt/molo-gem,praekelt/molo-gem
gem/migrations/0044_remove_deprecated_tables.py
gem/migrations/0044_remove_deprecated_tables.py
# Generated by Django 2.2.15 on 2020-08-14 11:23 from django.db import migrations TABLES = [ 'surveys_articletagrule', 'surveys_combinationrule', 'surveys_groupmembershiprule', 'surveys_molosurveyformfield', 'surveys_molosurveypage', 'surveys_molosurveypage_translated_pages', 'surveys_molosurveypageview', 'surveys_molosurveysubmission', 'surveys_personalisablesurvey', 'surveys_personalisablesurveyformfield', 'surveys_segmentusergroup', 'surveys_segmentusergroup_users', 'surveys_surveyresponserule', 'surveys_surveysindexpage', 'surveys_surveysubmissiondatarule', 'surveys_surveytermsconditions', 'surveys_termsandconditionsindexpage', 'surveys_termsandconditionsindexpage_translated_pages', 'yourwords_termsandconditions', 'yourwords_thankyou', 'yourwords_yourwordscompetition', 'yourwords_yourwordscompetition_translated_pages', 'yourwords_yourwordscompetitionentry', 'yourwords_yourwordscompetitionindexpage', 'polls_choice', 'polls_choice_choice_votes', 'polls_choice_translated_pages', 'polls_choicevote', 'polls_choicevote_choice', 'polls_freetextquestion', 'polls_freetextvote', 'polls_pollsindexpage', 'polls_question', 'polls_question_translated_pages', ] def remove_tables(apps, schema_editor): migrations.RunSQL('DROP TABLE IF EXISTS {} CASCADE;'.format(','.join(TABLES))) class Migration(migrations.Migration): dependencies = [ ('gem', '0043_invite_site'), ] operations = [ migrations.RunPython(remove_tables) ]
bsd-2-clause
Python
9a678f5e856a5fcba82a1a9017dfbc841a660686
Create ompotdar.py
vansjyo/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,vansjyo/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17
Python/ompotdar.py
Python/ompotdar.py
print("Hello World!")
mit
Python
0f23004da949b974a071a788ff084c2cb685b95d
use a similar `repair_wheel.py` script as cmake
scikit-build/ninja-python-distributions
scripts/repair_wheel.py
scripts/repair_wheel.py
import argparse import shutil import subprocess import sys import tempfile from pathlib import Path from convert_to_generic_platform_wheel import convert_to_generic_platform_wheel def main(): if sys.platform.startswith("linux"): os_ = "linux" elif sys.platform == "darwin": os_ = "macos" elif sys.platform == "win32": os_ = "windows" else: raise NotImplementedError(f"sys.platform '{sys.platform}' is not supported yet.") p = argparse.ArgumentParser(description="Convert wheel to be independent of python implementation and ABI") p.set_defaults(prog=Path(sys.argv[0]).name) p.add_argument("WHEEL_FILE", help="Path to wheel file.") p.add_argument( "-w", "--wheel-dir", dest="WHEEL_DIR", help=('Directory to store delocated wheels (default: "wheelhouse/")'), default="wheelhouse/", ) args = p.parse_args() file = Path(args.WHEEL_FILE).resolve(strict=True) wheelhouse = Path(args.WHEEL_DIR).resolve() wheelhouse.mkdir(parents=True, exist_ok=True) with tempfile.TemporaryDirectory() as tmpdir_: tmpdir = Path(tmpdir_) # use the platform specific repair tool first if os_ == "linux": subprocess.run(["auditwheel", "repair", "-w", str(tmpdir), str(file)], check=True, stdout=subprocess.PIPE) elif os_ == "macos": subprocess.run( ["delocate-wheel", "--require-archs", "x86_64", "-w", str(tmpdir), str(file)], check=True, stdout=subprocess.PIPE, ) elif os_ == "windows": # no specific tool, just copy shutil.copyfile(file, tmpdir / file.name) files = list(tmpdir.glob("*.whl")) assert len(files) == 1, files file = files[0] # make this a py2.py3 wheel convert_to_generic_platform_wheel( str(file), out_dir=str(wheelhouse), py2_py3=True, ) if __name__ == "__main__": main()
apache-2.0
Python
cafb83befb2cee459d44a1332e5fc7e57edf81a6
Add script to update cvsanaly databases
benmishkanian/ASF-JIRA-mine,benmishkanian/ASF-JIRA-mine,benmishkanian/ASF-JIRA-mine
updateGit.py
updateGit.py
from jiradb import * if __name__ == "__main__": log.setLevel(logging.DEBUG) # Add console log handler ch = logging.StreamHandler() ch.setLevel(logging.INFO) ch.setFormatter(logging.Formatter('%(message)s')) log.addHandler(ch) # Add file log handler fh = logging.FileHandler('updateGit.log') fh.setLevel(logging.DEBUG) fh.setFormatter(logging.Formatter('[%(levelname)s @ %(asctime)s]: %(message)s')) log.addHandler(fh) # Add error file log handler efh = logging.FileHandler('updateGitErrors.log') efh.setLevel(logging.ERROR) efh.setFormatter(logging.Formatter('[%(levelname)s @ %(asctime)s]: %(message)s')) log.addHandler(efh) args = getArguments() jiradb = JIRADB(**args) projectList = args['projects'] for project in projectList: jiradb.getGitDB(project).update()
mit
Python
23c65cc59f1cdf595090a7f25e80c03828aaba68
add `examples/references`
scott-maddox/openbandparams
src/openbandparams/examples/references.py
src/openbandparams/examples/references.py
# # Copyright (c) 2013-2015, Scott J Maddox # # This file is part of openbandparams. # # openbandparams is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # openbandparams is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with openbandparams. If not, see <http://www.gnu.org/licenses/>. # ############################################################################# # Make sure we import the local openbandparams version import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) from openbandparams import * # Print all references used to calculate a parameter parameter = InAsSb.Eg for ref in parameter.get_references(): print ref
agpl-3.0
Python
574659044cb501a2ac61006ddc1c389622172207
add script to calculate cv from energy intervals
js850/nested_sampling,js850/nested_sampling
compute_cv.py
compute_cv.py
import argparse import numpy as np def compute_Z(energies, T, K): beta = 1./T N = len(energies) Z = 0. U = 0. U2 = 0. Cv = 0. Emin = energies[-1] Ediff = energies - Emin for n in xrange(1, len(energies)-2): # Z += (np.exp(-float(n-1) / K) - np.exp(-float(n+1) / K)) * np.exp(-beta * energies[n]) E = Ediff[n] Zpref = np.exp(-float(n-1) / K - beta * E) * (1. - np.exp(-2. / K)) Z += Zpref U += Zpref * (E + Emin) U2 += Zpref * (E + Emin)**2 U /= Z U2 /= Z Cv = (U2 - U**2) * beta**2 return Z, Cv, U, U2 if __name__ == "__main__": parser = argparse.ArgumentParser(description="load energy intervals and compute cv") # parser.add_argument("--db", type=str, nargs=1, help="database filename", # default="otp.db") parser.add_argument("K", type=int, help="number of replicas") parser.add_argument("fname", type=str, help="filenames with energies") args = parser.parse_args() print args.fname energies = np.genfromtxt(args.fname) Tmin = .02 Tmax = .5 nT = 100 dT = (Tmax-Tmin) / nT T = np.array([Tmin + dT*i for i in range(nT)]) Z, Cv, U, U2 = compute_Z(energies, T, args.K) print Z, Cv with open("cv", "w") as fout: fout.write("#T Cv <E> <E**2>\n") for vals in zip(T, Cv, U, U2): fout.write("%g %g %g %g\n" % vals)
bsd-2-clause
Python
df6aa6962dd9e265786e7337de69964f2fadfb1d
Create professor.py
carthage-college/django-djspace,carthage-college/django-djspace,carthagecollege/django-djspace,carthage-college/django-djspace,carthagecollege/django-djspace,carthage-college/django-djspace,carthagecollege/django-djspace
djspace/registration/form_models/professor.py
djspace/registration/form_models/professor.py
# -*- coding: utf-8 -*- from django.conf import settings from django.db import models, connection from django.contrib.auth.models import User from djspace.registration.base_models import * from djtools.fields import BINARY_CHOICES, SALUTATION_TITLES, STATE_CHOICES from djtools.fields import GENDER_CHOICES EMPLOYER = ( (':\(','OH NO, WE NEED TO FILL THESE OUT') ) INTEREST = ( ('aeronauticalaerospace','Aeronautical/Aerospace'), ('agricultural','Agricultural'), ('anthropology','Anthropology'), ('architectural','Architectural'), ('architecture','Architecture'), ('art','Art'), ('astronomy','Astronomy'), ('astrophysics','Astrophysics'), ('atmosphericscience','Atmospheric Science'), ('biochemistry','Biochemistry'), ('bioengineering','Bioengineering'), ('biology','Biology'), ('botany','Botany'), ('chemical','Chemistry'), ('civil','Civil'), ('climatologymeteorology','Climatology/Meteorology'), ('computer','Computer'), ('computerscience','Computer Science'), ('earthsciences','Earth Sciences'), ('economics','Economics'), ('educationelementaryschool','Elementary Education School'), ('educationhighschool','Education High School'), ('educationk12','Education K12'), ('educationk12administration','Education K12 Administration'), ('educationmiddleschool','Education Middle School'), ('electricalelectronic','Electrical/Electronic'), ('engineering','Engineering'), ('engineeringmechanics','Engineering Mechanics'), ('engineeringscience','Engineering Science'), ('environmental','Environmental'), ('environmentalscience','Environmental Science'), ('environmentalscienceandglobalclimatechange','Environmental and Global Climate Change'), ('generalpublic','General Public'), ('geography','Geography'), ('geology','Geology'), ('geophysics','Geophysics'), ('healthsciencenutrition','Health Science/Nutrition'), ('history','History'), ('industrial','Industrial'), ('lifesciences','Life Sciences'), ('materialsscienceengineering','Materials Science'), ('mathematics','Mathematics'), ('mechanical','Mechanical'), ('medicinemedicalresearch','Medicine/Medical Research'), ('miningpetroleumnaturalgas','Mining/Petroleum and Natural Gas'), ('molecularcellbiology','Molecular/Cell Biology'), ('nuclear','Nuclear'), ('oceanography','Oceanography'), ('other','Other'), ('philosophy','Philosophy'), ('physicalscience','Physical Science'), ('physics','Physics'), ('planetarygeosciences','Planetary GeoSciences'), ('planetarysciences','Planetary Sciences'), ('politicalsciencepublicpolicy','Political Science/Public Policy'), ('psychology','Psychology'), ('socialsciences','Social Sciences'), ('sociology','Sociology'), ('zoology','Zoology') ) RACE = ( ('americanindianalaskanative','American Indian/Alaska Native'), ('asian','Asian'), ('blackafricanamerican','Black/African American'), ('caucasian','Caucasian'), ('hispanic','Hispanic'), ('nativehawaiianotherpacificislander','Native Hawaiian/Other Pacific Islander'), ('otherrace','Other race') ) class ProfessorInformation(BasePersonalInformation,BaseEmployerInformation): first = models.CharField( "First name", max_length=20 ) middle = models.CharField( "Middle name", max_length=20 ) last = models.CharField( "Last name", max_length=20 ) citizen = models.BooleanField( "US Citizen" ) rocket_comp = models.BooleanField( "Tribal or AISES Rocket Competition" ) maiden = models.CharField( "Maiden name", max_length=20 ) additional = models.CharField( "Additional name", max_length=20 ) title_department = models.CharField( "Title or Department", max_length=20 ) webpage = models.CharField( "Web page", max_length=20 ) street = models.CharField( "Street", max_length=20 ) city = models.CharField( "City", max_length=20 ) state = models.CharField( "State", max_length=2, choices=STATE_CHOICES ) ZIP = models.CharField( "Zip code", max_length=9 ) phone = models.CharField( "Phone number", max_length=16 ) primary = models.CharField( "Primary interest", max_length=35, choices=INTEREST ) primary_other = models.CharField( "Other", max_length=35 ) secondary = models.CharField( "Secondary interest", max_length=35, choices=INTEREST ) secondary_other = models.CharField( "Other", max_length=35 ) birthdate = models.DateField( "Birthdate", auto_now=False ) gender = models.CharField( "Gender", max_length=8, choices=GENDER_CHOICES ) disability = models.BooleanField( "Disability" ) race = models.CharField( "Race", max_length=25, choices=RACE ) tribe = models.CharField( "Tribe", max_length=20 )
mit
Python
998acbd4b490ef3807d79c245c27700d3e44d5da
Add a dummy pavement file.
scipy/scipy-svn,lesserwhirls/scipy-cwt,jasonmccampbell/scipy-refactor,lesserwhirls/scipy-cwt,lesserwhirls/scipy-cwt,scipy/scipy-svn,jasonmccampbell/scipy-refactor,lesserwhirls/scipy-cwt,jasonmccampbell/scipy-refactor,scipy/scipy-svn,scipy/scipy-svn,jasonmccampbell/scipy-refactor
tools/win32/build_scripts/pavement.py
tools/win32/build_scripts/pavement.py
options( setup=Bunch( name = "scipy-superpack", ) ) @task def setup(): print "Setting up package %s" % options.name
bsd-3-clause
Python
4f6b1a4dae7701cc79a523e96fe812efaa54745b
Add optimizers tests
fmacias64/keras,hhaoyan/keras,xiaoda99/keras,ogrisel/keras,navyjeff/keras,tencrance/keras,stephenbalaban/keras,amy12xx/keras,kod3r/keras,gamer13/keras,vseledkin/keras,brainwater/keras,iamtrask/keras,dribnet/keras,chenych11/keras,jbolinge/keras,llcao/keras,daviddiazvico/keras,MagicSen/keras,jasonyaw/keras,DeepGnosis/keras,cvfish/keras,gavinmh/keras,nehz/keras,zxytim/keras,kuza55/keras,kemaswill/keras,Cadene/keras,OlafLee/keras,dhruvparamhans/keras,untom/keras,bottler/keras,ashhher3/keras,jalexvig/keras,wxs/keras,jiumem/keras,pjadzinsky/keras,wubr2000/keras,Aureliu/keras,marchick209/keras,happyboy310/keras,danielforsyth/keras,xurantju/keras,saurav111/keras,iScienceLuvr/keras,sjuvekar/keras,relh/keras,zhmz90/keras,abayowbo/keras,imcomking/Convolutional-GRU-keras-extension-,nt/keras,yingzha/keras,rlkelly/keras,mikekestemont/keras,jimgoo/keras,bboalimoe/keras,JasonTam/keras,ml-lab/keras,pthaike/keras,ledbetdr/keras,dolaameng/keras,florentchandelier/keras,keras-team/keras,asampat3090/keras,Smerity/keras,keskarnitish/keras,keras-team/keras,ekamioka/keras,EderSantana/keras,LIBOTAO/keras,nzer0/keras,eulerreich/keras,DLlearn/keras,meanmee/keras,zhangxujinsh/keras,jayhetee/keras,harshhemani/keras,Yingmin-Li/keras,dxj19831029/keras,zxsted/keras,printedheart/keras,nebw/keras,why11002526/keras,cheng6076/keras,rudaoshi/keras,johmathe/keras,rodrigob/keras,3dconv/keras
tests/auto/test_optimizers.py
tests/auto/test_optimizers.py
from __future__ import print_function import numpy as np np.random.seed(1337) from keras.utils.test_utils import get_test_data from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam from keras.models import Sequential from keras.layers.core import Dense, Activation from keras.utils.np_utils import to_categorical import unittest (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(10,), classification=True, nb_class=2) y_train = to_categorical(y_train) y_test = to_categorical(y_test) def get_model(input_dim, nb_hidden, output_dim): model = Sequential() model.add(Dense(input_dim, nb_hidden)) model.add(Activation('relu')) model.add(Dense(nb_hidden, output_dim)) model.add(Activation('softmax')) return model def test_optimizer(optimizer, target=0.9): model = get_model(X_train.shape[1], 10, y_train.shape[1]) model.compile(loss='categorical_crossentropy', optimizer=optimizer) history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2) return history.history['val_acc'][-1] > target class TestOptimizers(unittest.TestCase): def test_sgd(self): print('test SGD') sgd = SGD(lr=0.01, momentum=0.9, nesterov=True) self.assertTrue(test_optimizer(sgd)) def test_rmsprop(self): print('test RMSprop') self.assertTrue(test_optimizer(RMSprop())) def test_adagrad(self): print('test Adagrad') self.assertTrue(test_optimizer(Adagrad())) def test_adadelta(self): print('test Adadelta') self.assertTrue(test_optimizer(Adadelta())) def test_adam(self): print('test Adam') self.assertTrue(test_optimizer(Adam())) if __name__ == '__main__': print('Test optimizers') unittest.main()
mit
Python
751b596482cdb473b1a7f9172501e25d00f15724
Use default loop on TCP benchmark
fivejjs/pyuv,saghul/pyuv,fivejjs/pyuv,fivejjs/pyuv,saghul/pyuv,saghul/pyuv
tests/benchmark-tcp.py
tests/benchmark-tcp.py
import sys sys.path.insert(0, '../') import signal import threading import pyuv RESPONSE = "HTTP/1.1 200 OK\r\n" \ "Content-Type: text/plain\r\n" \ "Content-Length: 12\r\n" \ "\r\n" \ "hello world\n" def on_client_shutdown(client): client.close() clients.remove(client) def on_read(client, data): if data is None: client.close() clients.remove(client) return data = data.strip() if not data: return client.write(RESPONSE) client.shutdown(on_client_shutdown) def on_connection(server): client = server.accept() clients.append(client) client.start_read(on_read) def async_exit(async, data): [c.close() for c in clients] async.close() signal_h.close() server.close() def signal_cb(sig, frame): async.send(async_exit) print "PyUV version %s" % pyuv.__version__ loop = pyuv.Loop.default_loop() async = pyuv.Async(loop) clients = [] server = pyuv.TCP(loop) server.bind(("0.0.0.0", 1234)) server.listen(on_connection) signal_h = pyuv.Signal(loop) signal_h.start() t = threading.Thread(target=loop.run) t.start() signal.signal(signal.SIGINT, signal_cb) signal.pause() t.join() print "Stopped!"
import sys sys.path.insert(0, '../') import signal import threading import pyuv RESPONSE = "HTTP/1.1 200 OK\r\n" \ "Content-Type: text/plain\r\n" \ "Content-Length: 12\r\n" \ "\r\n" \ "hello world\n" def on_client_shutdown(client): client.close() clients.remove(client) def on_read(client, data): if data is None: client.close() clients.remove(client) return data = data.strip() if not data: return client.write(RESPONSE) client.shutdown(on_client_shutdown) def on_connection(server): client = server.accept() clients.append(client) client.start_read(on_read) def async_exit(async, data): [c.close() for c in clients] async.close() signal_h.close() server.close() def signal_cb(sig, frame): async.send(async_exit) print "PyUV version %s" % pyuv.__version__ loop = pyuv.Loop() async = pyuv.Async(loop) clients = [] server = pyuv.TCP(loop) server.bind(("0.0.0.0", 1234)) server.listen(on_connection) signal_h = pyuv.Signal(loop) signal_h.start() t = threading.Thread(target=loop.run) t.start() signal.signal(signal.SIGINT, signal_cb) signal.pause() t.join() print "Stopped!"
mit
Python
6dbd81fb4b59e7394318cbd0b0f0fdb31fcd6dd2
Add unit test to ensure we don't diff bare repos
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
tests/unit/states/test_git.py
tests/unit/states/test_git.py
# -*- coding: utf-8 -*- ''' :codeauthor: Erik Johnson <erik@saltstack.com> ''' # Import Python libs from __future__ import absolute_import import logging import os # Import Salt Testing Libs from tests.support.helpers import with_tempdir from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf from tests.support.mock import ( Mock, MagicMock, patch, DEFAULT, NO_MOCK, NO_MOCK_REASON, ) # Import Salt Libs import salt.states.git as git_state # Don't potentially shadow GitPython log = logging.getLogger(__name__) @skipIf(NO_MOCK, NO_MOCK_REASON) class GitTestCase(TestCase, LoaderModuleMockMixin): ''' Test cases for salt.states.git ''' def setup_loader_modules(self): return { git_state: { '__env__': 'base', '__opts__': {'test': False}, '__salt__': {}, } } @with_tempdir() def test_latest_no_diff_for_bare_repo(self, target): ''' This test ensures that we don't attempt to diff when cloning a repo using either bare=True or mirror=True. ''' name = 'https://foo.com/bar/baz.git' gitdir = os.path.join(target, 'refs') isdir_mock = MagicMock( side_effect=lambda path: DEFAULT if path != gitdir else True) branches = ['foo', 'bar', 'baz'] tags = ['v1.1.0', 'v.1.1.1', 'v1.2.0'] local_head = 'b9ef06ab6b7524eb7c27d740dbbd5109c6d75ee4' remote_head = 'eef672c1ec9b8e613905dbcd22a4612e31162807' git_diff = Mock() dunder_salt = { 'git.current_branch': MagicMock(return_value=branches[0]), 'git.diff': git_diff, 'git.fetch': MagicMock(return_value={}), 'git.is_worktree': MagicMock(return_value=False), 'git.list_branches': MagicMock(return_value=branches), 'git.list_tags': MagicMock(return_value=tags), 'git.remote_refs': MagicMock(return_value={'HEAD': remote_head}), 'git.remotes': MagicMock(return_value={ 'origin': {'fetch': name, 'push': name}, }), 'git.rev_parse': MagicMock(side_effect=git_state.CommandExecutionError()), 'git.revision': MagicMock(return_value=local_head), 'git.version': MagicMock(return_value='1.8.3.1'), } with patch('os.path.isdir', isdir_mock), \ patch.dict(git_state.__salt__, dunder_salt): result = git_state.latest( name=name, target=target, mirror=True, # mirror=True implies bare=True ) assert result['result'] is True, result git_diff.assert_not_called()
apache-2.0
Python
469eedab89d22a1051e9d3f6f7f6c94ba946fb37
Add server tests for JOIN.
ProgVal/irctest
irctest/server_tests/test_channel_operations.py
irctest/server_tests/test_channel_operations.py
""" Section 3.2 of RFC 2812 <https://tools.ietf.org/html/rfc2812#section-3.2> """ from irctest import cases from irctest.irc_utils.message_parser import Message class JoinTestCase(cases.BaseServerTestCase): def testJoin(self): """“If a JOIN is successful, the user receives a JOIN message as confirmation and is then sent the channel's topic (using RPL_TOPIC) and the list of users who are on the channel (using RPL_NAMREPLY), which MUST include the user joining.” -- <https://tools.ietf.org/html/rfc2812#section-3.2.1> “If a JOIN is successful, the user is then sent the channel's topic (using RPL_TOPIC) and the list of users who are on the channel (using RPL_NAMREPLY), which must include the user joining.” -- <https://tools.ietf.org/html/rfc1459#section-4.2.1> """ self.connectClient('foo') self.sendLine(1, 'JOIN #chan') m = self.getMessage(1) self.assertMessageEqual(m, command='JOIN', params=['#chan']) m = self.getMessage(1) got_topic = False if m.command in ('331', '332'): # RPL_NOTOPIC, RPL_TOPIC got_topic = True m = self.getMessage(1) m = self.assertMessageEqual(m, command='353') # RPL_NAMREPLY m = self.getMessage(1) m = self.assertMessageEqual(m, command='366') # RPL_ENDOFNAMES else: m = self.assertMessageEqual(m, command='353') # RPL_NAMREPLY m = self.getMessage(1) m = self.assertMessageEqual(m, command='366') # RPL_ENDOFNAMES m = self.getMessage(1) self.assertIn(m.command, ('331', '332'), m) # RPL_NOTOPIC, RPL_TOPIC def testJoinTwice(self): self.connectClient('foo') self.sendLine(1, 'JOIN #chan') m = self.getMessage(1) self.assertMessageEqual(m, command='JOIN', params=['#chan']) self.sendLine(1, 'JOIN #chan') # What should we do now?
mit
Python
21742da132aeb9b834b128f7a7d01b7a2173137a
Add a tcp_server which simulates graphite-relay
unixsurfer/haproxystats,unixsurfer/haproxystats,unixsurfer/haproxystats
tcp_server.py
tcp_server.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # vim:fenc=utf-8 # """ A very simple TCP server for simulating a graphite relay, copied-paste from Python documentation. Few things were adjusted to make pylint happy and print incoming data. """ import asyncio class EchoServerClientProtocol(asyncio.Protocol): """ A TCP server """ def __init__(self): self.peername = None self.transport = None def connection_made(self, transport): self.peername = transport.get_extra_info('peername') print('Connection from {}'.format(self.peername)) self.transport = transport def data_received(self, data): message = data.decode() print(message) def connection_lost(self, exc): print('client {} closed connection {}'.format(self.peername, exc)) def main(): """ main code """ loop = asyncio.get_event_loop() # Each client connection will create a new protocol instance coro = loop.create_server(EchoServerClientProtocol, '127.0.0.1', 39991) server = loop.run_until_complete(coro) # Serve requests until Ctrl+C is pressed print('Serving on {}'.format(server.sockets[0].getsockname())) try: loop.run_forever() except KeyboardInterrupt: pass # Close the server server.close() loop.run_until_complete(server.wait_closed()) loop.close() # This is the standard boilerplate that calls the main() function. if __name__ == '__main__': main()
apache-2.0
Python
ef52b314eb5e15c34d8b034d7e6f7bdd727b6586
Add sp500_extractor_v1 version that does not use BeautifulSoup.
camisatx/IntroToPython-Fall-2016
Code/sp500_extractor_v1_no_bs.py
Code/sp500_extractor_v1_no_bs.py
import csv from lxml import html import time import requests """ Make it work, make it right, make it fast Extract the tickers from the S&P 500 table on Wikipedia, process them into a list and save them into a CSV file. # Retrieve HTML from URL with requests http://docs.python-requests.org/en/master/user/quickstart/ # HTML table structure http://www.w3schools.com/html/html_tables.asp # Python HTML scraping http://docs.python-guide.org/en/latest/scenarios/scrape/ # HTML table parsing with xpath http://www.w3schools.com/xml/xpath_syntax.asp # Save to CSV http://gis.stackexchange.com/a/72476 """ url = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies' csv_output = 'sp500_tickers.csv' start_time = time.time() # Download the S&P 500 table from Wikipedia, creating a string of the raw HTML raw_html = requests.get(url).content html_string = html.fromstring(raw_html) ticker_list = [] # Pull first HTML table out of the HTML string, then loop through each HTML row for html_row in html_string.xpath('//table[1]'): # Pull each HTML row's code that starts with a <tr> flag for col in html_row.xpath('.//tr'): # Create a list of text values from each column in this HTML row table_row_list = [item.text_content() for item in col.xpath('.//td')] # Only process table row lists that have values if table_row_list: # Tickers are in the first column in the row (first list element) ticker = table_row_list[0].strip() # Append each row's ticker to the ticker list ticker_list.append(ticker) # Alphabetize ticker list ticker_list.sort() print(ticker_list) # Save the ticker list to a csv file with open(csv_output, 'w', newline='') as file: writer = csv.writer(file) for ticker in ticker_list: writer.writerow([ticker]) end_time = time.time() run_time = round(end_time - start_time, 2) print('Finished extracting the S&P 500 ticker list in %s seconds' % run_time)
agpl-3.0
Python
d98eebda6b3b0e42ac7ca34c6a1dd6cc8b05d342
add functions and refactor fibonacci
lsroudi/pythonTraining
quickTour/function.py
quickTour/function.py
def fibonacci(n): a,b = 0,1 if(n==a): return a if(n==b): return b return fibonacci(n-1)+fibonacci(n-2) for n in range(0,10): print(fibonacci(n))
mit
Python
fefb13108a151c5cbfe8c6acd5b94a480dac98ec
Add test for NPairLossScheme
ronekko/deep_metric_learning
tests/test_datasets.py
tests/test_datasets.py
# -*- coding: utf-8 -*- """ Created on Tue Feb 21 20:30:26 2017 @author: sakurai """ import unittest import numpy as np from ..datasets.data_provider import NPairLossScheme class TestNPairLossScheme(unittest.TestCase): def test_pairs_of_indexes(self): batch_size = 20 labels = sum([[i]*10 for i in range(10)], []) scheme = NPairLossScheme(labels, batch_size) it = scheme.get_request_iterator() for i in range(5): indexes = next(it) a_indexes = indexes[:batch_size / 2] p_indexes = indexes[batch_size / 2:] a_labels = np.array(labels)[a_indexes] p_labels = np.array(labels)[p_indexes] np.testing.assert_array_equal(a_labels, p_labels) np.testing.assert_equal(len(a_labels), len(np.unique(a_labels))) if __name__ == '__main__': unittest.main()
mit
Python
33375a9333852eafa1bf262fb30f5d827c4534f7
Create networkx.py
mapattacker/cheatsheets,mapattacker/cheatsheets,mapattacker/cheatsheets,mapattacker/cheatsheets,mapattacker/cheatsheets,mapattacker/cheatsheets
networkx.py
networkx.py
import networkx
mit
Python
98a053f945e2c7cc01d8fbdec374ab90305bc11f
Create new files.
git-ning/core-python-programming
ospathex.py
ospathex.py
#!/usr/bin/python #_*_coding:utf-8_*_ import os for tmpdir in ('temp', r'c:\windows\temp'): if os.path.isdir(tmpdir): break else: print 'no temp directory available' tmpdir = '' if tmpdir: os.chdir(tmpdir) cwd = os.getcwd() print '*** current temporary directory' print cwd print '*** creating temporary directory...' os.mkdir('example') os.chdir('example') cwd = os.getcwd() print '*** new working directory: ' print cwd print '*** original directory listing: ' print os.listdir(cwd) print '*** creating test file...' fobj = open('test', 'w') fobj.write('foo\n') fobj.write('bar\n') fobj.close() print '*** updated directory listing: ' print os.listdir(cwd) print "*** renameing 'test' to 'filetest.txt'" os.rename('test', 'filename.txt') print '*** updated directory listing: ' print os.listdir(cwd) path = os.path.join(cwd, os.listdir(cwd)[0]) print '*** full file pathname' print path print '*** (pathname, basename)== ' print os.path.split(path) print '*** (filename, extension) == ' print os.path.splitext(os.path.basename(path)) print '*** displaying file contents: ' fobj = open(path) for eachLine in fobj: print eachLine fobj.close() print '*** deleting test file' os.remove(path) print '*** updated directory listing: ' print os.listdir(cwd) os.chdir(os.pardir) print '*** deleting test directory' os.rmdir('example') print '*** DONE!'
apache-2.0
Python
81ade3168faa68ef43456cc35a122b9ef493a23e
Add script to plot MS flag rate and acq fail rate
sot/aca_stats,sot/aca_stats,sot/aca_stats
plot_ms_flag_acq_fails.py
plot_ms_flag_acq_fails.py
from __future__ import division import matplotlib.pyplot as plt from astropy.table import Table import numpy as np from Ska.DBI import DBI from chandra_aca import star_probs db = DBI(dbi='sybase', server='sybase', user='aca_read') stats = db.fetchall('SELECT * from trak_stats_data ' 'WHERE kalman_datestart > "2014:180" ' 'AND aoacmag_median is not NULL') stats = Table(stats) mags = stats['aoacmag_median'] ok = (mags > 9) & (mags < 11) stats = stats[ok] mags = mags[ok] stats['frac_ms'] = stats['mult_star_samples'] / stats['n_samples'] stats['mag_bin'] = np.round(mags / 0.2) * 0.2 sg = stats.group_by('mag_bin') sgm = sg.groups.aggregate(np.mean) plt.figure(1, figsize=(6, 4)) plt.clf() randx = np.random.uniform(-0.05, 0.05, size=len(stats)) plt.plot(mags + randx, stats['frac_ms'], '.', alpha=0.5, label='MS flag rate per obsid') plt.plot(sgm['mag_bin'], sgm['frac_ms'], 'r', linewidth=5, alpha=0.7, label='MS flag rate (0.2 mag bins)') p_acqs = star_probs.acq_success_prob('2016:001', t_ccd=-15.0, mag=sgm['mag_bin']) plt.plot(sgm['mag_bin'], 1 - p_acqs, 'g', linewidth=5, label='Acq fail rate (model 2016:001, T=-15C)') plt.legend(loc='upper left', fontsize='small') plt.xlabel('Magnitude') plt.title('Acq fail rate compared to MS flag rate') plt.grid() plt.tight_layout() plt.savefig('ms_flag_acq_fails.png')
bsd-3-clause
Python