commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
b674ff31ab846bc4c11b615ad7f738ff176d5f96 | Add /team test | royragsdale/picoCTF,picoCTF/picoCTF,picoCTF/picoCTF,royragsdale/picoCTF,royragsdale/picoCTF,royragsdale/picoCTF,picoCTF/picoCTF,royragsdale/picoCTF,picoCTF/picoCTF,royragsdale/picoCTF,picoCTF/picoCTF,picoCTF/picoCTF,royragsdale/picoCTF | picoCTF-web/tests/api/functional/v1/test_team.py | picoCTF-web/tests/api/functional/v1/test_team.py | """Tests for the /api/v1/team endpoints."""
from common import ( # noqa (fixture)
ADMIN_DEMOGRAPHICS,
clear_db,
client,
decode_response,
get_csrf_token,
register_test_accounts,
TEACHER_DEMOGRAPHICS,
USER_DEMOGRAPHICS,
get_conn
)
def test_get_my_team(client):
"""Tests the /team endpoint."""
clear_db()
register_test_accounts()
client.post('/api/v1/user/login', json={
'username': USER_DEMOGRAPHICS['username'],
'password': USER_DEMOGRAPHICS['password']
})
expected_fields = {
'achievements': [],
'affiliation': 'Sample School',
'competition_active': False,
'country': 'US',
'eligible': True,
'flagged_submissions': [],
'max_team_size': 1,
'progression': [],
'score': 0,
'size': 1,
'solved_problems': [],
'team_name': 'sampleuser'
}
expected_member_fields = {
'affiliation': 'None',
'country': 'US',
'email': 'sample@example.com',
'firstname': 'Sample',
'lastname': 'User',
'username': 'sampleuser',
'usertype': 'student'
}
res = client.get('/api/v1/team')
assert res.status_code == 200
for k, v in expected_fields.items():
assert res.json[k] == v
assert len(res.json['members']) == 1
for k, v in expected_member_fields.items():
assert res.json['members'][0][k] == v
db = get_conn()
uid = db.users.find_one({'username': USER_DEMOGRAPHICS['username']})['uid']
assert res.json['members'][0]['uid'] == uid
| mit | Python |
|
d8c8287cce7ddc48f4ea271a54bd6efa8dcabe66 | Create OutputNeuronGroup_multiple_outputs_1.py | ricardodeazambuja/BrianConnectUDP | examples/OutputNeuronGroup_multiple_outputs_1.py | examples/OutputNeuronGroup_multiple_outputs_1.py | '''
Example of a spike receptor (only receives spikes)
In this example spikes are received and processed creating a raster plot at the end of the simulation.
'''
from brian import *
import numpy
from brian_multiprocess_udp import BrianConnectUDP
# The main function with the NeuronGroup(s) and Synapse(s) must be named "main_NeuronGroup".
# It will receive two objects: input_Neuron_Group and the simulation_clock. The input_Neuron_Group
# will supply the input spikes to the network. The size of the spike train received equals NumOfNeuronsInput.
# The size of the output spike train equals NumOfNeuronsOutput and must be the same size of the NeuronGroup who is
# going to interface with the rest of the system to send spikes.
# The function must return all the NeuronGroup objects and all the Synapse objects this way:
# ([list of all NeuronGroups],[list of all Synapses])
# and the FIRST (index 0) NeuronGroup of the list MUST be the one where the OUTPUT spikes will be taken by the simulation.
#
# Here is also possible to use "dummy" NeuronGroups only to receive and/or send spikes.
my_neuron_input_number = 45
def main_NeuronGroup(input_Neuron_Group, simulation_clock):
print "main_NeuronGroup!" #DEBUG!
simclock = simulation_clock
Nr=NeuronGroup(my_neuron_input_number, model='v:1', reset=0, threshold=0.5, clock=simclock)
Nr.v=0
# SYNAPSES BETWEEN REAL NEURON NETWORK AND THE INPUT
Syn_iNG_Nr=Synapses(input_Neuron_Group, Nr, model='w:1', pre='v+=w', clock=simclock)
Syn_iNG_Nr[:,:]='i==j'
print "Total Number of Synapses:", len(Syn_iNG_Nr) #DEBUG!
Syn_iNG_Nr.w=1
MExt=SpikeMonitor(Nr) # Spikes sent by UDP
Mdummy=SpikeMonitor(input_Neuron_Group) # Spikes received by UDP
return ([Nr],[Syn_iNG_Nr],[MExt,Mdummy])
def post_simulation_function(input_NG, simulation_NG, simulation_SYN, simulation_MN):
"""
input_NG: the neuron group that receives the input spikes
simulation_NG: the neuron groups list passed to the system by the user function (main_NeuronGroup)
simulation_SYN: the synapses list passed to the system by the user function (main_NeuronGroup)
simulation_MN: the monitors list passed to the system by the user function (main_NeuronGroup)
This way it is possible to plot, save or do whatever you want with these objects after the end of the simulation!
"""
pass
figure()
raster_plot(simulation_MN[1])
title("Spikes Received by UDP")
show(block=True)
# savefig('output.pdf')
if __name__=="__main__":
my_simulation = BrianConnectUDP(main_NeuronGroup, NumOfNeuronsInput=my_neuron_input_number, post_simulation_function=post_simulation_function,
input_addresses=[("127.0.0.1", 10101, my_neuron_input_number)], simclock_dt=1, inputclock_dt=2, TotalSimulationTime=10000, sim_repetitions=0, brian_address=2)
| cc0-1.0 | Python |
|
c70a127e17286f18e8d2d46bdc2e5ec6b0c55d0d | Add script to output statistics on body part emotion pairs | NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts | generate_body_part_emotion_pairs.py | generate_body_part_emotion_pairs.py | """Find known body parts in sentences with predicted label 'Lichaamsdeel'.
Extended body parts are saved to new text files.
Usage: python classify_body_parts.py <json file with body part mapping> <dir
with input texts> <dir for output texts>
"""
import os
import codecs
import argparse
import json
import copy
from collections import Counter
from count_labels import load_data
from emotools.heem_utils import heem_body_part_labels, heem_emotion_labels
from count_labels import corpus_metadata
from genre2period import print_results_line_period
def get_emotion_body_part_pairs(file_name):
# load data set
X_data, Y_data = load_data(file_name)
Y = [s.split('_') for s in Y_data]
emotions2body = {}
emotions = Counter()
for labelset in Y:
body_parts = [lb for lb in labelset if lb in heem_body_part_labels]
emotion_lbls = [lb for lb in labelset if lb in heem_emotion_labels]
if body_parts and emotion_lbls:
for em in emotion_lbls:
for bp in body_parts:
if not emotions2body.get(em):
emotions2body[em] = Counter()
emotions2body[em][bp] += 1
emotions[em] += 1
return emotions, emotions2body
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', help='csv file containing corpus metadata')
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
args = parser.parse_args()
f_name = args.file
input_dir = args.input_dir
text2period, text2year, text2genre, period2text, genre2text = \
corpus_metadata(f_name)
# statistics for entire corpus
global_emotions = Counter()
emotion_body_pairs = Counter()
period_counters = {}
# process texts
text_files = [t for t in os.listdir(input_dir) if t.endswith('.txt')]
for text_file in text_files:
text_id = text_file.replace('.txt', '')
in_file = os.path.join(input_dir, text_file)
period = text2period.get(text_id)
emotions, emotions2body = get_emotion_body_part_pairs(in_file)
global_emotions.update(emotions)
for em, body_counter in emotions2body.iteritems():
if not period_counters.get(em):
period_counters[em] = {}
if not period_counters.get(em).get(period):
period_counters[em][period] = Counter()
period_counters[em][period].update(body_counter)
for em, freq in global_emotions.most_common():
print '{}\t{}'.format(em, freq)
print 'Body part\tRenaissance\tClassisim\tEnlightenment\tNone\tTotal'
merged_body_parts = Counter()
for c in period_counters.get(em):
merged_body_parts.update(period_counters.get(em).get(c))
for label, freq in merged_body_parts.most_common():
print print_results_line_period(label, period_counters.get(em))
print
print
| apache-2.0 | Python |
|
e94dcbe781666ba8f083efab3dd63818d805c6d8 | Add flac2mp3 script. | JosefR/audio-cd-tools | flac2mp3.py | flac2mp3.py | #!/usr/bin/env python
import argparse
import subprocess
import os
import glob
def gettag(tag, filename):
proc = subprocess.Popen(["metaflac", "--no-utf8-convert",
"--show-tag=" + tag, filename], stdout=subprocess.PIPE)
out = proc.communicate()[0].rstrip()
remove = len(out.split("=")[0]) + 1
return out[remove:]
def decode_flac(flacfile, wavfile):
proc = subprocess.Popen(["flac", "-d", "-f", "-o", wavfile, flacfile])
proc.wait()
return 0 if proc.returncode == 0 else 1
def encode_mp3(wavfile, mp3file):
proc = subprocess.Popen(["lame", "-h", "-V0", wavfile, mp3file])
proc.wait()
return 0 if proc.returncode == 0 else 1
def tag_mp3(mp3file, metadata):
proc = subprocess.Popen(["eyeD3",
"-t", metadata["title"],
"-n", metadata["tracknumber"],
"-a", metadata["artist"],
"-A", metadata["album"],
"-G", metadata["genre"],
"-Y", metadata["year"],
"-A", metadata["album"],
"--add-image=" + metadata["cover"] + ":FRONT_COVER",
mp3file])
proc.wait()
return 0 if proc.returncode == 0 else 1
# parse command line arguments
parser = argparse.ArgumentParser(description='Convert flac files to mp3');
parser.add_argument('-i', metavar='input directory')
parser.add_argument('-o', metavar='output directory')
args = parser.parse_args()
if args.i:
indir = args.i
else:
indir="."
if args.o:
outdir = args.o
else:
outdir="."
print("read flac files from " + indir + "; results will be written to " +
outdir)
# convert and flag each file in directory
for filepath in os.listdir(indir):
print "path:" + filepath
if not filepath.endswith(".flac"):
continue
basename = os.path.basename(filepath)[0:-5]
flacname = indir + "/" + basename + ".flac"
wavname = outdir + "/" + basename + ".wav"
mp3name = outdir + "/" + basename + ".mp3"
print "transcode: " + flacname
metadata = {
"title" : gettag("TITLE", flacname),
"tracknumber" : gettag("TRACKNUMBER", flacname),
"artist" : gettag("ARTIST", flacname),
"album" : gettag("ALBUM", flacname),
"genre" : gettag("GENRE", flacname),
"year" : gettag("DATE", flacname)
}
if os.path.isfile("cover.png"):
metadata["cover"] = "cover.png"
elif os.path.isfile("cover.jpg"):
metadata["cover"] = "cover.jpg"
else:
metadata["cover"] = ""
print metadata
if decode_flac(flacname, wavname):
print "decoding flac failed"
exit(1)
if encode_mp3(wavname, mp3name):
print "encoding mp3 failed"
exit(1)
if tag_mp3(mp3name, metadata):
print "tagging mp3 failed"
exit(1)
os.remove(wavname);
print "finished"
| mit | Python |
|
19ee2fbee238e94b7944154d692a9e488ee19a79 | Add basic opps database configuration | jeanmask/opps,jeanmask/opps,YACOWS/opps,jeanmask/opps,williamroot/opps,opps/opps,williamroot/opps,opps/opps,opps/opps,opps/opps,YACOWS/opps,YACOWS/opps,williamroot/opps,williamroot/opps,jeanmask/opps,YACOWS/opps | opps/db/conf.py | opps/db/conf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf import settings
from appconf import AppConf
class OppsDataBaseConf(AppConf):
HOST = getattr(settings, 'OPPS_DB_HOSR', None)
USER = getattr(settings, 'OPPS_DB_USER', None)
PASSWORD = getattr(settings, 'OPPS_DB_PASSWORD', None)
PORT = getattr(settings, 'OPPS_DB_PORT', None)
NAME = getattr(settings, 'OPPS_DB_NAME', None)
TYPE = getattr(settings, 'OPPS_DB_TYPE', None)
OPTION = getattr(settings, 'OPPS_BD_OPTION', None)
class Meta:
prefix = 'opps_db'
| mit | Python |
|
d1c791ccf5b2873bbc248c9b079a5b68159ffb50 | Add ECM Keys script | smartchicago/chicago-early-learning,smartchicago/chicago-early-learning,smartchicago/chicago-early-learning,smartchicago/chicago-early-learning | python/ecep/portal/management/commands/update_ecm.py | python/ecep/portal/management/commands/update_ecm.py | import csv
import os
import re
from django.core.management.base import NoArgsCommand
from django.conf import settings
from portal.models import Location
class Command(NoArgsCommand):
"""
Import Cleaned Site Name, Address, and ECM Keys
"""
def handle(self, *args, **options):
with open('master-list.csv', 'rb') as master:
reader = csv.DictReader(master)
for row in reader:
try:
l = Location.objects.get(pk=int(row['Portal ID']))
l.site_name = row['Master Site Name']
l.address = row['Master Address']
l.ecm_key = row['ECM Key']
l.save()
print l.site_name
except:
print "Ruh roh!"
continue
| mit | Python |
|
8ed1fccb2a1d72815bde93b19d45069e59db0900 | add force404 sample | thinkAmi-sandbox/Bottle-sample,thinkAmi-sandbox/Bottle-sample | force404.py | force404.py | # -*- coding:utf-8 -*-
from bottle import route, run, abort, error
@route("/")
def top():
abort(404, "go to 404")
return "Hello world!"
@error(404)
def error404(error):
return "Not Found!"
run(host="0.0.0.0", port=8080, debug=True, reloader=True) | unlicense | Python |
|
5d58200622e05728acce8ffba1ddf7e5063f556c | Create formatIO.py | fpg2012/tuneTurner | formatIO.py | formatIO.py |
# 将输入输出格式化例如:(xxxx) -> (x)(x)(x)(x), ([x]) -> x, [xxxx] ->[x][x][x][x]
def formatting(old_tune):
'''
格式化
'''
new_tune = ''
sharped = False
low = high = 0
for i in old_tune:
if i == '(':
low = low + 1
elif i == '[':
high = high + 1
elif i == ']':
high = high - 1
elif i == ')':
low = low - 1
elif i == '#':
sharped = True
if low == high:
new_tune = new_tune + i
elif low > high:
new_tune = new_tune + '(' * (low - high) + i
elif low < high:
new_tune = new_tune + '[' * (high - low) + i
else:
return 'error'
else:
if sharped:
if low == high:
new_tune = new_tune + i
elif low > high:
new_tune = new_tune + i + ')' * (low - high)
elif low < high:
new_tune = new_tune + i + ']' * (low - high)
else:
return 'error'
sharped = False
else:
if low == high:
new_tune = new_tune + i
elif low > high:
new_tune = new_tune + '(' * (low - high) + i + ')' * (low - high)
elif low < high:
new_tune = new_tune + '[' * (high - low) + i + ']' * (low - high)
else:
return 'error'
print(new_tune)
return new_tune
| apache-2.0 | Python |
|
aff6ff82ec4fc0076f8356d782a2a103510ebbfd | use Queue for product and custom problem | MailG/code_py,MailG/code_py,MailG/code_py | product_custom/use_queue.py | product_custom/use_queue.py |
# http://blog.jobbole.com/52412/
from threading import Thread
import time
import random
from Queue import Queue
queue = Queue(10)
class ProducerThread(Thread):
def run(self):
nums = range(5)
while True:
num = random.choice(nums)
queue.put(num)
print "Produced", num
time.sleep(random.random())
class ConsumerThread(Thread):
def run(self):
while True:
num = queue.get()
queue.task_done()
print "Consumed", num
time.sleep(random.random())
ProducerThread().start()
ConsumerThread().start()
| mit | Python |
|
b75a0293f214de4196d9df50ef5906885c2810fc | Create empClass.py | imughal/EmployeeScript | empClass.py | empClass.py | from rgfunc import *
class Employee(object):
year = 0
month = 0
day = 0
city = ""
country = ""
lastname = ""
def __init__(self,name):
self.name = name
def dateofbirth(self):
return str(self.day)+"/"+str(self.month)+"/"+str(self.year)
#fullname = name," ",lastname
def fullname(self):
return str(self.name)+" "+str(self.lastname)
if __name__ == "__main__":
print "Error-Invalid File to Run- Please Run main.py."
exit()
| mit | Python |
|
d9be3f189fc34117bdec6e0c7856f7a7dc5f902a | Add tool for generating the JSONP required by the documentation versions. | chtyim/cdap,mpouttuclarke/cdap,anthcp/cdap,chtyim/cdap,hsaputra/cdap,chtyim/cdap,hsaputra/cdap,caskdata/cdap,chtyim/cdap,caskdata/cdap,mpouttuclarke/cdap,mpouttuclarke/cdap,hsaputra/cdap,caskdata/cdap,hsaputra/cdap,mpouttuclarke/cdap,caskdata/cdap,anthcp/cdap,anthcp/cdap,chtyim/cdap,caskdata/cdap,anthcp/cdap,caskdata/cdap,mpouttuclarke/cdap,anthcp/cdap,hsaputra/cdap,chtyim/cdap | cdap-docs/tools/versionscallback-gen.py | cdap-docs/tools/versionscallback-gen.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Cask Data, Inc.
#
# Used to generate JSONP from a CDAP documentation directory on a webserver.
#
# sudo echo "versionscallback({\"development\": \"2.6.0-SNAPSHOT\", \"current\": \"2.5.2\", \"versions\": [\"2.5.1\", \"2.5.0\"]});" > json-versions.js; ls -l
import sys
from os import getcwd, listdir, readlink
from os.path import isdir, islink, join
def add_value(call, name, value):
if value:
if call:
call += ', '
call += '\\\"%s\\\": \\\"%s\\\"' % (name, value)
return call
def add_object(call, name, value):
if value:
if call:
call += ', '
call += ('\\\"%s\\\": %s' % (name, value)).replace("\'", '\\\"')
return call
def walk_directory(path=''):
global current, development, versions
if not path:
path = getcwd()
onlydirs = [ d for d in listdir(path) if isdir(join(path,d)) ]
onlydirs.reverse()
for d in onlydirs:
if d == 'current':
d_path = join(path,d)
if islink(d_path):
current = readlink(d_path)
elif d.endswith('SNAPSHOT'):
development = d
elif d and d != current:
versions.append(d)
def build(path=''):
global current, development, versions
call = ''
walk_directory(path)
call = add_value(call, 'development', development)
call = add_value(call, 'current', current)
call = add_object(call, 'versions', versions)
target = join(path, 'json-versions.js')
print 'sudo echo "versionscallback({%s});" > %s; ls -l' % (call, target)
def usage():
print 'Generates a command that creates the "versionscallback" JSONP from a CDAP documentation directory on a webserver.'
print 'Run this with the path to the directory containing the documentation directories.'
print 'python %s <path>' % sys.argv[0]
# Main
if __name__ == '__main__':
current = ''
development = ''
versions = []
path = ''
if len(sys.argv) > 1:
path = sys.argv[1]
build(path)
else:
usage()
| apache-2.0 | Python |
|
fe1d75065f7371502cf81ea57e2a1019c2db093c | add config.py | devjoe/gae-dev-helper | custom_config.py | custom_config.py | # ===== GAE dev_appserver.py settings =====
# [Required]
gae_sdk_path = ""
project_path = ""
# [Optional]
datastore_path = ""
port = ""
admin_port = ""
# ===== GAE Helper settings =====
# [Log]
log_path = ""
append_date_to_log = False
# [Request Filter]
file_type_filter = []
custom_regex_filter = []
use_time_delimiter = False
| mit | Python |
|
90c36d54f8822ef28bef98be4ba735d15b405648 | add get_dump.py utility | contactless/mqtt-tools,contactless/mqtt-tools | get_dump.py | get_dump.py | #!/usr/bin/python
import argparse
import mosquitto
import time, random
import sys
def on_mqtt_message(arg0, arg1, arg2=None):
#
#~ print "on_mqtt_message", arg0, arg1, arg2
if arg2 is None:
mosq, obj, msg = None, arg0, arg1
else:
mosq, obj, msg = arg0, arg1, arg2
if msg.topic != retain_hack_topic:
print "%s\t%s" % (msg.topic, msg.payload)
else:
#~ print "done!"
client.disconnect()
sys.exit(0)
if __name__ =='__main__':
parser = argparse.ArgumentParser(description='MQTT retained message deleter', add_help=False)
parser.add_argument('-h', '--host', dest='host', type=str,
help='MQTT host', default='localhost')
parser.add_argument('-p', '--port', dest='port', type=int,
help='MQTT port', default='1883')
parser.add_argument('topic' , type=str,
help='Topic mask to unpublish retained messages from. For example: "/devices/my-device/#"')
args = parser.parse_args()
client = mosquitto.Mosquitto()
client.connect(args.host, args.port)
client.on_message = on_mqtt_message
client.subscribe(args.topic)
# hack to get retained settings first:
retain_hack_topic = "/tmp/%s/retain_hack" % ( client._client_id)
client.subscribe(retain_hack_topic)
client.publish(retain_hack_topic, '1')
while 1:
rc = client.loop()
if rc != 0:
break
| mit | Python |
|
e04d3bfd20879d0e8e404a3fff4ab37b914cd303 | Add ContactForm | Kromey/akwriters,Kromey/fbxnano,Kromey/fbxnano,Kromey/akwriters,Kromey/akwriters,Kromey/fbxnano,Kromey/fbxnano,Kromey/akwriters | contact/forms.py | contact/forms.py | from django import forms
from django.core.exceptions import ValidationError
from prosodyauth.forms import PlaceholderForm
from simplecaptcha import captcha
contact_reasons = (
('question', 'Question'),
('problem', 'Problem'),
('suggestion', 'Suggestion'),
('other', 'Other'),
)
class ContactForm(PlaceholderForm):
username = forms.CharField(widget=forms.HiddenInput)
ip_address = forms.GenericIPAddressField(widget=forms.HiddenInput)
subject = forms.ChoiceField(choices=contact_reasons)
email = forms.EmailField()
message = forms.CharField(widget=forms.Textarea)
| mit | Python |
|
0aed5df2f7c08cdb365b098a93800b0269c0c6b4 | Create class Dataset | gammapy/gamma-cat | gammacat/dataset.py | gammacat/dataset.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
from .utils import load_yaml, write_yaml
from gammapy.catalog.gammacat import GammaCatResource
__all__ = [
'DataSet',
]
log = logging.getLogger(__name__)
class DataSet:
"""Process a dataset file."""
resource_type = 'ds'
def __init__(self, data, resource):
log.debug('DataSet.__init__()')
self.resource = resource
self.data = data
@classmethod
def read(cls, filename):
data = load_yaml(filename)
resource = cls._read_resource_info(data, filename)
return cls(data = data, resource = resource)
def write(self, filename):
write_yaml(self.data, filename)
def folder(self):
return self.data['reference_id'].replace('&', '%26')
@classmethod
def _read_resource_info(cls, data, location):
try:
file_id = data['file_id']
except:
file_id = -1
return GammaCatResource(
source_id = data['source_id'],
reference_id = data['reference_id'],
file_id = file_id,
type=cls.resource_type,
location=location
) | bsd-3-clause | Python |
|
828b065e857b5f148a0d20b06fd9d45824a1befc | add manager.py flask api for genmodel | empirical-org/WikipediaSentences,empirical-org/WikipediaSentences | genmodel/manager.py | genmodel/manager.py | from flask import Flask, request, render_template, jsonify
import psycopg2
import os
# Connect to Database
try:
DB_NAME=os.environ['DB_NAME']
DB_USER=os.environ['DB_USER']
DB_PASS=os.environ['DB_PASS']
except KeyError as e:
raise Exception('environment variables for database connection must be set')
conn = psycopg2.connect(dbname=DB_NAME,
user=DB_USER,
password=DB_PASS,
host=localhost,
port=5432
)
app = Flask(__name__)
@app.route('/')
def man():
return 'Not implemented'
@app.route('/jobs', methods=["GET", "POST"])
def jobs():
if request.method == "GET":
cur = conn.cursor()
cur.execute('SELECT * FROM jobs')
resp = cur.fetchall()
return resp
elif request.method == "POST":
# Take a JSON with attributes of job, start job, then redirect to that
# job's monitoring page (jobs/job_id)
return 'Not implemented'
else:
return 'Not implemented'
@app.route('/jobs/<job_id>', methods=["GET", "PATCH", "DELETE"])
def job_for_id(job_id):
if request.method == "GET":
# Job monitoring for a specific job
return 'GET job #' + job_id
elif request.method == "PATCH":
# TODO: Should this be an endpoint?
# Modify job, scale resolvers
return 'PATCH job #' + job_id
elif request.method == "DELETE":
# Remove all dedicated Digital Ocean containers, stop all publishers,
# writers and workers. Purge the queue.
return 'DELETE job #' + job_id
return job_id
if __name__ == '__main__':
app.run(port=5000, host= '0.0.0.0', debug=True)
| agpl-3.0 | Python |
|
7e28a3fe54c24a38a90bf0e7cf2f634ca78ee2ed | Add script used to generate a Cantor set | ExcaliburZero/cantor-set | cantorset.py | cantorset.py | ### BEGIN LICENSE
# The MIT License (MIT)
#
# Copyright (C) 2015 Christopher Wells <cwellsny@nycap.rr.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
### END LICENSE
"""A script which visually draws a Cantor set."""
import turtle
import time
def rec_draw(l, r, x, xd, t, pen):
"""Recursively draw each section of the Cantor set, until the set
number of rows has been met."""
if x < t:
# Draw the first full line, is redundant after first recursion
pen.up()
pen.goto(l, (-(x - 1) * xd))
pen.down()
pen.goto(r, (-(x - 1) * xd))
# Find the length of each of the lesser lines
diff = (r - l) / 3
# Draw the first lesser line (1/3)
pen.up()
pen.goto(l, -x * xd)
pen.down()
pen.goto(l + diff, -x * xd)
rec_draw(l, l + diff, x + 1, xd, t, pen)
# Draw the second lesser line (3/3)
pen.up()
pen.goto(l + diff * 2, -x * xd)
pen.down()
pen.goto(r, -x * xd)
rec_draw(l + diff * 2, r, x + 1, xd, t, pen)
else:
# End once the given number of lines has been met
return
def main():
"""Draw a visual representation of a Cantor set."""
# Create the pen and set its initial values
pen = turtle.Turtle()
pen.ht()
pen.speed(0)
# Set the values of the Cantor set
left = -200 # The right boundry
right = 200 # The left boundry
starting_row = 0 # The location of the first row
row_distance = 10 # The distance between rows
rows = 5 # The number of rows
# Draw the Cantor set
rec_draw(left, right, starting_row, row_distance, rows, pen)
time.sleep(500)
# Run the main method of the script
if __name__ == '__main__':
main()
| mit | Python |
|
282383ab66f85ff6eb58b98c34558c02c9cf44eb | add a tool to list recipes used by builders (and ones not on recipes) | eunchong/build,eunchong/build,eunchong/build,eunchong/build | scripts/tools/builder_recipes.py | scripts/tools/builder_recipes.py | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import operator
import os
import subprocess
import sys
import tempfile
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
BLACKLISTED_MASTERS = [
'master.chromium.reserved',
'master.chromiumos.unused',
'master.client.reserved',
'master.reserved',
'master.tryserver.reserved',
]
def getMasterConfig(path):
with tempfile.NamedTemporaryFile() as f:
subprocess.check_call([
os.path.join(BASE_DIR, 'scripts', 'tools', 'runit.py'),
os.path.join(BASE_DIR, 'scripts', 'tools', 'dump_master_cfg.py'),
os.path.join(path),
f.name])
return json.load(f)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--only-nonrecipe', action='store_true')
args = parser.parse_args()
data = []
for master in os.listdir(os.path.join(BASE_DIR, 'masters')):
if master in BLACKLISTED_MASTERS:
continue
path = os.path.join(BASE_DIR, 'masters', master)
if not os.path.isdir(path):
continue
config = getMasterConfig(path)
for builder in config['builders']:
try:
recipe = builder['factory']['properties'].get(
'recipe', ['<no recipe>'])[0]
except Exception as e:
recipe = '<error: %r>' % e
if (args.only_nonrecipe and
recipe != '<no recipe>' and
not recipe.startswith('<error:')):
continue
data.append({
'master': master,
'builder': builder['name'],
'recipe': recipe,
})
master_padding = max(len(row['master']) for row in data)
builder_padding = max(len(row['builder']) for row in data)
pattern = '%%-%ds | %%-%ds | %%s' % (master_padding, builder_padding)
for row in sorted(data, key=operator.itemgetter('master', 'builder')):
print pattern % (row['master'], row['builder'], row['recipe'])
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | Python |
|
bf3f14692b6e2a348f5a0171ad57e494801ed4f4 | Add python script to write lib svm expected data format from my collected data | Wayne82/libsvm-practice,Wayne82/libsvm-practice,Wayne82/libsvm-practice | scripts/writelibsvmdataformat.py | scripts/writelibsvmdataformat.py | """
A script to write out lib svm expected data format from my collecting data
"""
import os
import sys
import csv
import getopt
cmd_usage = """
usage: writelibsvmdataformat.py --inputs="/inputs/csv_files" --output="/output/lib_svm_data"
"""
feature_space = 10
def write_libsvm_data(input_files, output_file):
"""
:param input_files: input files, each of which contains a single label at first row, and a bunch of data following
:param output_file: output file, which meet lib svm expected data format
"""
with open(output_file, 'wb') as output_csv_file:
output_writer = csv.writer(output_csv_file, delimiter=' ')
for input_file in input_files:
with open(input_file, 'rb') as input_csv_file:
input_reader = csv.reader(input_csv_file, delimiter=' ')
# assume there is only one item in each row
label = input_reader.next()
i = 1 # start from index 1
line = [label[0]]
for row in input_reader:
if int(row[0]) != 0:
line.append(':'.join([str(i), row[0]]))
i += 1
if i > feature_space:
output_writer.writerow(line)
i = 1
line = [label[0]]
def main(argv):
"""
:param argv: command line arguments
:rtype : error status, success 0 and fail 1
"""
try:
optlist, args = getopt.getopt(argv[1:], "hi:o:", ["help", "inputs=", "output="])
except getopt.GetoptError:
print("Command line arguments error, please try --help for help")
return 1
for opt, opt_arg in optlist:
if opt in ("-h", "--help"):
print cmd_usage
return 0
if opt in ("-i", "--inputs"):
inputs = opt_arg
if not os.path.exists(inputs):
print("Input files folder not exist")
return 1
elif opt in ("-o", "--output"):
output_file = opt_arg
# print the messages
print("Inputs folder: " + inputs)
print("Output file: " + output_file)
assert isinstance(output_file, basestring)
assert isinstance(inputs, basestring)
input_files = []
for root, dirs, files in os.walk(inputs):
for name in files:
if name.endswith('.csv'):
input_files.append(os.path.abspath(os.path.join(root, name)))
if len(input_files) == 0:
print("No input files.")
return 1
write_libsvm_data(input_files, output_file)
if __name__ == "__main__":
sys.exit(main(sys.argv)) | bsd-3-clause | Python |
|
900c93e6917ef92da02cca6865284e0004b01695 | add file | Fahreeve/aiovk,Fahreeve/aiovk,Fahreeve/aiovk | aiovk/mixins.py | aiovk/mixins.py | class LimitRateDriverMixin(object):
requests_per_second = 3
| mit | Python |
|
a30277835e65195fc68e6708fe5da394bc43e08c | Test Projection | Contraz/demosys-py | tests/test_projection.py | tests/test_projection.py | from demosys.test import DemosysTestCase
from demosys.opengl import Projection
class ProjectionTest(DemosysTestCase):
def test_create(self):
proj = Projection(fov=60, near=0.1, far=10)
proj.update(fov=75, near=1, far=100)
proj.tobytes()
proj.projection_constants
| isc | Python |
|
1c2c7d5134780e58bd69f24ee06050b2f405d946 | Add unit test for run_nohw | Igalia/snabb,eugeneia/snabbswitch,eugeneia/snabbswitch,eugeneia/snabbswitch,alexandergall/snabbswitch,Igalia/snabb,alexandergall/snabbswitch,snabbco/snabb,dpino/snabb,eugeneia/snabb,heryii/snabb,alexandergall/snabbswitch,snabbco/snabb,snabbco/snabb,alexandergall/snabbswitch,Igalia/snabbswitch,Igalia/snabb,snabbco/snabb,SnabbCo/snabbswitch,Igalia/snabb,alexandergall/snabbswitch,Igalia/snabbswitch,eugeneia/snabbswitch,heryii/snabb,Igalia/snabbswitch,snabbco/snabb,dpino/snabbswitch,alexandergall/snabbswitch,dpino/snabbswitch,Igalia/snabb,dpino/snabbswitch,SnabbCo/snabbswitch,eugeneia/snabb,dpino/snabb,snabbco/snabb,Igalia/snabbswitch,dpino/snabb,heryii/snabb,dpino/snabb,eugeneia/snabb,alexandergall/snabbswitch,snabbco/snabb,Igalia/snabb,eugeneia/snabb,Igalia/snabbswitch,heryii/snabb,eugeneia/snabb,dpino/snabb,eugeneia/snabb,alexandergall/snabbswitch,SnabbCo/snabbswitch,eugeneia/snabb,dpino/snabb,dpino/snabbswitch,Igalia/snabb,dpino/snabb,eugeneia/snabb,Igalia/snabb,SnabbCo/snabbswitch,snabbco/snabb,heryii/snabb,heryii/snabb | src/program/lwaftr/tests/subcommands/run_nohw_test.py | src/program/lwaftr/tests/subcommands/run_nohw_test.py | """
Test the "snabb lwaftr run_nohw" subcommand.
"""
import unittest
from random import randint
from subprocess import call, check_call
from test_env import DATA_DIR, SNABB_CMD, BaseTestCase
class TestRun(BaseTestCase):
program = [
str(SNABB_CMD), 'lwaftr', 'run_nohw',
]
cmd_args = {
'--duration': '1',
'--bench-file': '/dev/null',
'--conf': str(DATA_DIR / 'icmp_on_fail.conf'),
'--inet-if': '',
'--b4-if': '',
}
veths = []
@classmethod
def setUpClass(cls):
cls.create_veth_pair()
@classmethod
def create_veth_pair(cls):
veth0 = cls.random_veth_name()
veth1 = cls.random_veth_name()
# Create veth pair.
check_call(('ip', 'link', 'add', veth0, 'type', 'veth', 'peer', \
'name', veth1))
# Set interfaces up.
check_call(('ip', 'link', 'set', veth0, 'up'))
check_call(('ip', 'link', 'set', veth1, 'up'))
# Add interface names to class.
cls.veths.append(veth0)
cls.veths.append(veth1)
@classmethod
def random_veth_name(cls):
return 'veth%s' % randint(10000, 999999)
def test_run_nohw(self):
self.execute_run_test(self.cmd_args)
def execute_run_test(self, cmd_args):
self.cmd_args['--inet-if'] = self.veths[0]
self.cmd_args['--b4-if'] = self.veths[1]
output = self.run_cmd(self.build_cmd())
self.assertIn(b'link report', output,
b'\n'.join((b'OUTPUT', output)))
def build_cmd(self):
result = self.program
for item in self.cmd_args.items():
for each in item:
result.append(each)
return result
@classmethod
def tearDownClass(cls):
cls.remove_veths()
@classmethod
def remove_veths(cls):
for i in range(0, len(cls.veths), 2):
check_call(('ip', 'link', 'delete', cls.veths[i]))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
|
d36ce70863653238d88e8ec23416ec894d6140eb | Create _geoserver_publish_layergroup.py | state-hiu/cybergis-scripts,state-hiu/cybergis-scripts | lib/cybergis/gs/_geoserver_publish_layergroup.py | lib/cybergis/gs/_geoserver_publish_layergroup.py | from base64 import b64encode
from optparse import make_option
import json
import urllib
import urllib2
import argparse
import time
import os
import subprocess
def make_request(url, params, auth=None, data=None, contentType=None):
"""
Prepares a request from a url, params, and optionally authentication.
"""
print url + urllib.urlencode(params)
req = urllib2.Request(url + urllib.urlencode(params), data=data)
if auth:
req.add_header('AUTHORIZATION', 'Basic ' + auth)
if contentType:
req.add_header('Content-type', contentType)
else:
if data:
req.add_header('Content-type', 'text/xml')
return urllib2.urlopen(req)
def parse_url(url):
if (url is None) or len(url) == 0:
return None
index = url.rfind('/')
if index != (len(url)-1):
url += '/'
return url
def buildPOSTDataLayerGroup(layergroup,layers):
data = "<layerGroup><name>"+layergroup+"</name>"
for layer in layers:
data += "<layer>"+layer+"</layer>"
data += "</layergroup>"
return data
def createLayerGroup(verbose, geoserver, workspace, auth, layergroup, layers):
if verbose > 0:
print('Creating GeoServer Layergroup for '+layergroup+".")
params = {}
data = buildPOSTDataLayerGroup(layergroup,layers)
url = geoserver+"rest/workspaces/"+workspace+"/layergroups.xml"
try:
request = make_request(url=url+'?', params=params, auth=auth, data=data)
except:
#raise Exception("Create layergroup failed with url="+url+", params="+str(params)+", data="+data)
print "Create layergroup failed with url="+url+", params="+str(params)+", data="+data
raise
if request.getcode() != 201:
raise Exception("Create layergroup failed: Status Code {0}".format(request.getcode()))
if verbose > 0:
print('Layer created.')
def parse_layers(layers):
if layers and len(layers) > 0:
try:
return layers.split(",")
except:
return None
else:
return None
def run(args):
#print args
#==#
verbose = args.verbose
#==#
layers = parse_layers(args.layers)
geoserver = parse_url(args.geoserver)
workspace = args.workspace
layergroup = args.layergroup
#==#
auth = None
if args.username and args.password:
auth = b64encode('{0}:{1}'.format(args.username, args.password))
#==#
print "=================================="
print "#==#"
print "CyberGIS Script / cybergis-scrit-geoserver-publish-layergroup.py"
print "Publishes multiple layers as a layer group"
print "#==#"
#==#
if not layers:
print "Could not parse layers correctly."
return 1;
#==#
#Publish Layers as Layer Group
try:
createLayerGroup(verbose, geoserver, workspace, auth, layergroup, layers)
except:
print "Couldn't create layergroup from layers "+args.layers+"."
raise
print "=================================="
| mit | Python |
|
ff3e3e6be3a5a46db73a772f99071e83b9026d98 | add wikipedia plugin | Rouji/Yui,Rj48/ircbot | plugins/wiki.py | plugins/wiki.py | import wikipedia
MAX_LEN = 350
@yui.command('wiki', 'wk', 'w')
def wiki(argv):
"""wiki [-lang] <article>"""
lang = 'en'
if len(argv) < 2:
return
# check if a language is given
argv = argv[1:]
if len(argv) > 1 and argv[0].startswith('-'):
lang = argv[0][1:]
argv = argv[1:]
article = ' '.join(argv)
try:
wikipedia.set_lang(lang)
sum = wikipedia.summary(article)
except Exception as ex:
return "Couldn't find an article for '%s'" % article
if len(sum) > MAX_LEN:
sum = sum[:MAX_LEN-3] + '...'
return sum
| mit | Python |
|
8fc91c780cf7f0b43deac69b0e60f2b9472af172 | Add script to automatically setup ln -s for the utilities I use | LonamiWebs/Py-Utils | set-links.py | set-links.py | """
Helper script to set up ln -s <desired utilities> on a given bin/ PATH.
"""
import os
utilities = (
'mineutils/mc',
'misc/gitmail',
'misc/pipu',
'misc/reclick',
)
def run(program, *args):
"""Spawns a the given program as a subprocess and waits for its exit"""
# I for Invariant argument count, P for using PATH environmental variable
os.spawnlp(os.P_WAIT, program, program, *args)
if __name__ == '__main__':
where = None
try:
import pyperclip
where = pyperclip.paste()
if where.startswith('file://'):
where = where[len('file://'):]
if not os.path.isdir(where):
where = None
except ImportError:
pass
if not where:
where = input('Where should the links be created?\n: ')
if not os.path.isdir(where):
os.makedirs(where)
utilities = tuple(os.path.abspath(x) for x in utilities)
os.chdir(where)
for utility in utilities:
print(f'Creating link for {utility}...')
run('ln', '-s', utility)
print('Done!')
| mit | Python |
|
7556fd9f55fe84a82a4843fb0ba43e7ad144e874 | Update tendrl_definitions.py | Tendrl/node_agent,r0h4n/node-agent,Tendrl/node-agent,Tendrl/node-agent,Tendrl/node_agent,Tendrl/node-agent,r0h4n/node-agent,r0h4n/node-agent | tendrl/node_agent/persistence/tendrl_definitions.py | tendrl/node_agent/persistence/tendrl_definitions.py | from tendrl.bridge_common.etcdobj.etcdobj import EtcdObj
from tendrl.bridge_common.etcdobj import fields
class TendrlDefinitions(EtcdObj):
"""A table of the Os, lazily updated
"""
__name__ = '/tendrl_definitions_node_agent'
data = fields.StrField("data")
| from tendrl.bridge_common.etcdobj.etcdobj import EtcdObj
from tendrl.bridge_common.etcdobj import fields
class TendrlDefinitions(EtcdObj):
"""A table of the Os, lazily updated
"""
__name__ = '/tendrl_definitions_node_agent'
data = fields.StrField("data")
def render(self):
self.__name__ = self.__name__ % self.node_uuid
return super(TendrlDefinitions, self).render()
| lgpl-2.1 | Python |
907fa0a42dd90ca67d86e61ce7984d5764455fb9 | add missing __init__.py | freevo/kaa-base,freevo/kaa-base | src/distribution/__init__.py | src/distribution/__init__.py | # -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# core.py - distutils functions for kaa packages
# -----------------------------------------------------------------------------
# $Id: distribution.py 2110 2006-11-29 00:41:31Z tack $
#
# -----------------------------------------------------------------------------
# Copyright (C) 2006 Dirk Meyer, Jason Tackaberry
#
# First Edition: Dirk Meyer <dmeyer@tzi.de>
# Maintainer: Dirk Meyer <dmeyer@tzi.de>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version
# 2.1 as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
#
# -----------------------------------------------------------------------------
from core import *
| lgpl-2.1 | Python |
|
92f63d6ad055aa213b67ad2778187faee1fde821 | Add in printParents.py | houtianze/pyutil | printParents.py | printParents.py | from types import *
# https://stackoverflow.com/questions/2611892/get-python-class-parents
def printParents(thing, ident = 2):
'''
Print out all the parents (till the ancestors) of a given class / object.
@param indent: Print indentation
'''
typ = type(thing)
if typ is ClassType:
printClassParents(thing, 0)
elif typ is InstanceType:
print("Object: {}".format(thing))
printClassParents(thing.__class__, 0)
else:
print("'{}' - '{}'".format(thing, type))
print("I don't know your parents.")
def printClassParents(cls, level = 0, indent = 2):
thisLevel = ' ' * indent * level + "{} --> {{ {} }}".format(
cls, ', '.join(str(c) for c in cls.__bases__))
print(thisLevel)
for base in cls.__bases__:
printClassParents(base, level + 1)
if __name__ == '__main__':
import sys
def help(names):
print("Invalid arg: {}\nSyntax: modeul1.class1 module2.class2".format(names))
if len(sys.argv) > 1:
# input args: module1.class1 module2.class2 ...
# eg. printParents.py Tkinter.Frame Tkinker.Button
# https://stackoverflow.com/questions/4821104/python-dynamic-instantiation-from-string-name-of-a-class-in-dynamically-imported
for names in sys.argv[1:]:
mc = names.split('.')
if len(mc) == 2:
# price you pay when you go dynamic
try:
ctor = getattr(__import__(mc[0]), mc[1])
inst = ctor()
printParents(inst)
print('=' * 32)
except:
help(names)
else:
help(names)
else:
from ttk import *
button = Button()
printParents(button)
print('=' * 32)
printParents(Label)
print('=' * 32)
printParents(8)
| mit | Python |
|
87565c1e6032bff2cc3e20f5c4f46b7a17977f7c | Add organisation for TFL Dial a Ride | alphagov/notifications-api,alphagov/notifications-api | migrations/versions/0098_tfl_dar.py | migrations/versions/0098_tfl_dar.py | """empty message
Revision ID: 0098_tfl_dar
Revises: 0097_notnull_inbound_provider
Create Date: 2017-06-05 16:15:17.744908
"""
# revision identifiers, used by Alembic.
revision = '0098_tfl_dar'
down_revision = '0097_notnull_inbound_provider'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
TFL_DAR_ID = '1d70f564-919b-4c68-8bdf-b8520d92516e'
def upgrade():
op.execute("""INSERT INTO organisation VALUES (
'{}',
'',
'tfl_dar_x2.png',
''
)""".format(TFL_DAR_ID))
def downgrade():
op.execute("""
DELETE FROM organisation WHERE "id" = '{}'
""".format(TFL_DAR_ID))
| mit | Python |
|
22585d29220709dc3a3de16b03c626ca27c715ca | Add migration version? Not sure if this is right | brianwolfe/robotics-tutorial,brianwolfe/robotics-tutorial,brianwolfe/robotics-tutorial | migrations/versions/3025c44bdb2_.py | migrations/versions/3025c44bdb2_.py | """empty message
Revision ID: 3025c44bdb2
Revises: None
Create Date: 2014-12-16 12:13:55.759378
"""
# revision identifiers, used by Alembic.
revision = '3025c44bdb2'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
| bsd-2-clause | Python |
|
64c70f3f73d14d5bdd18cf5c4ad8b15ec745f517 | Add helpful script for ascii checking - fyi @bruskiza | praekelt/mama-ng-jsbox,praekelt/mama-ng-jsbox | config/check_ascii.py | config/check_ascii.py | import json
files = ["go-ussd_public.ibo_NG.json"]
def is_ascii(s):
return all(ord(c) < 128 for c in s)
current_message_id = 0
for file_name in files:
json_file = open(file_name, "rU").read()
json_data = json.loads(json_file)
print "Proccessing %s\n-------" % file_name
for key, value in json_data.items():
# Ignore non-content keys and empty keys
if len(value) == 2:
if not is_ascii(value[1]):
print ("Non-ascii translation found of <%s>: %s" % (key, value[1]))
print "Done Proccessing %s\n-------" % file_name
| bsd-3-clause | Python |
|
289ce4a720c5863f6a80e1b86083fd2919b52f14 | Add file for tests of start symbol as not nonterminal | PatrikValkovic/grammpy | tests/startsymbol_tests/NotNonterminalTest.py | tests/startsymbol_tests/NotNonterminalTest.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 10.08.2017 23:12
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class NotNonterminalTest(TestCase):
pass
if __name__ == '__main__':
main()
| mit | Python |
|
764f02e5e8c53b47cb2e28375a049accba442f0c | Create __init__.py | widoyo/simpleapp | app/__init__.py | app/__init__.py | # -*- encoding: utf-8 -*-
# app/__init__.py
| mit | Python |
|
d9291843d575e587efdd7aa0c4605fee766dc232 | clean up test queries | uwescience/raco,uwescience/raco,uwescience/raco,uwescience/raco,uwescience/raco | examples/test_query.py | examples/test_query.py | from raco import RACompiler
from raco.language import CCAlgebra, MyriaAlgebra
from raco.algebra import LogicalAlgebra
import logging
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
def testEmit(query, name):
LOG.info("compiling %s", query)
# Create a compiler object
dlog = RACompiler()
# parse the query
dlog.fromDatalog(query)
#print dlog.parsed
LOG.info("logical: %s",dlog.logicalplan)
dlog.optimize(target=CCAlgebra, eliminate_common_subexpressions=False)
LOG.info("physical: %s",dlog.physicalplan[0][1])
# generate code in the target language
code = dlog.compile()
with open(name+'.c', 'w') as f:
f.write(code)
queries = [
("A(s1) :- T(s1)", "scan"),
("A(s1) :- T(s1), s>10", "select"),
("A(s1) :- T(s1), s>0, s<10", "select_conjunction"),
("A(s1,s2) :- T(s1,s2), s>10, s2>10", "two_var_select"),
("A(s1,o2) :- T(s1,p1,o1), R(o2,p1,o2)", "join"),
("A(a,b,c) :- R(a,b), S(b,c)", "two_path"),
("A(a,c) :- R(a,b), S(b,c)", "two_hop"),
("A(a,b,c) :- R(a,b), S(b,c), T(c,d)", "three_path"),
("A(a,b,c) :- R(a,b), S(b,c), T(c,a)", "directed_triangles"),
("A(s1,s2,s3) :- T(s1,s2,s3), R(s3,s4), s1<s2, s4<100", "select_then_join"),
#("A(a,b,c) :- R(a,b), S(b,c), T(c,a), a<b, b<c", "increasing_triangles"),
#("A(s1,s2,s3) :- T(s1,s2,s3), R(s3,s4), s1<s4", "equi_and_range"),
#("A(s1,s2,s3) :- T(s1,s2),R(s3,s4), s1<s3", "range_join"),
#("A(a,b,c,d,e):-X(a,b),Y(a,c),Z(a,d,e),T(a,b),K(b,a)", "complex_joins"),
]
for q in queries:
query, name = q
testEmit(query, name)
| bsd-3-clause | Python |
|
3aea50fd086975cfdcc6a337b2ff5a6cace8ce95 | Create kuubio.py | botlabio/autonomio,botlabio/autonomio | kuubio.py | kuubio.py | from keras_diagram import ascii
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.models import model_from_json
from load_data import *
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('bmh')
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
import spacy as sp
import ascify as asc
def vectorize_text(data):
'''
OUTPUT: a list of lists with the word vectors in lists
USE EXAMPLE: vectors = vectorize_text(list_with_strings)
'''
nlp = sp.load('en')
c = len(data)
l = []
for i in range(c):
asc_string = asc.Ascify(str(data[i])).ascify()
uni_string = unicode(asc_string)
vec_obj = nlp(uni_string)
vector = vec_obj.vector
l.append(vector)
return l
def _transform_data(x_text,y_var,data):
x = vectorize_text(data[x_text])
y = [data[y_var]]
try:
if len(y) == 1:
y = map(list, zip(*y))
if len(y) == len(x):
x1 = x # we do this to retain the original y and x
y1 = y
df_x = pd.DataFrame(x1)
df_y = pd.DataFrame(y1)
df_y = pd.DataFrame(df_y[0] >= df_y[0].mean()).astype(int)
#df_y = pd.DataFrame(df_y[0] >= 0.2).astype(int) # convert to 1/0
except:
print "ERROR: something went"
return df_x,df_y
def _load_model(filename):
json_file = open(filename, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
return loaded_model
def kuubio(X,Y,data,dims=8,epoch=5,model='model',save_model=False):
X,Y = _transform_data(X,Y,tweets)
'''
NOTE: 1) the data has to be in float or something that
goes nicely in to 'float32'
2) the data has to be in pandas dataframe
with no column names (other than int)
'''
if model != 'model':
model = _load_model(model)
np.random.seed(7)
X = X.astype('float32')
Y = Y.astype('float32')
X = np.array(X)
Y = np.array(Y)
X = X[:,0:dims]
#Y = Y[:,8]
model = Sequential()
model.add(Dense(dims+4, input_dim=dims, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(X, Y, epochs=epoch, batch_size=10)
scores = model.evaluate(X, Y)
#print(history.history.keys())
print ""
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
print ""
print(ascii(model))
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=False)
ax1.plot(history.history['acc'])
ax2.plot(history.history['loss'])
ax1.set_title('model accuracy')
ax1.set_xlabel('epoch')
ax1.set_ylabel('accuracy')
ax2.set_title('model loss')
ax2.set_xlabel('epoch')
ax2.set_ylabel('loss')
fig.set_size_inches(12,3)
if save_model == True:
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("model.h5")
print("Saved model to disk")
fig.show()
| mit | Python |
|
acfa4877ac50a3895cc9f9cb2e349f948d4b8001 | add a script to fetch official hero data from battle.net | rhots/automation | bin/get_official_heroes.py | bin/get_official_heroes.py | import sys
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
"""
The official heroes listing on battle.net is populated by a list of
Objects defined in JS (window.heroes). This script fetches the full
list and outputs a list of tuples relating official hero names to the
battle.net slugs.
To run this script, you should install phantomjs in addition to the
import dependencies.
"""
def get_heroes_data():
# We prefer the PhantomJS driver to avoid opening any GUI windows.
browser = webdriver.PhantomJS()
browser.get("http://us.battle.net/heroes/en/heroes/#/")
heroes = browser.execute_script("return window.heroes;")
browser.quit()
return heroes
def main():
heroes = get_heroes_data()
heroes = [(h['name'], h['slug']) for h in heroes]
print(heroes)
if __name__ == "__main__":
main()
| isc | Python |
|
381c2537eff5003758d552281edfd885ee40ab80 | Add migrations | praekelt/sideloader,praekelt/sideloader,praekelt/sideloader,praekelt/sideloader | sideloader/migrations/0003_auto_20141203_1708.py | sideloader/migrations/0003_auto_20141203_1708.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sideloader', '0002_auto_20141203_1611'),
]
operations = [
migrations.AlterField(
model_name='project',
name='build_script',
field=models.CharField(default=b'', max_length=255, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='project',
name='package_name',
field=models.CharField(default=b'', max_length=255, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='project',
name='postinstall_script',
field=models.CharField(default=b'', max_length=255, blank=True),
preserve_default=True,
),
]
| mit | Python |
|
f405829f9f4bed9c833f7e25dc97610e34b5dd71 | Add JSONField tests | renalreg/cornflake | cornflake/tests/fields/test_json_field.py | cornflake/tests/fields/test_json_field.py | import pytest
from cornflake.fields import JSONField
@pytest.mark.parametrize(('value', 'expected'), [
(True, True),
(False, False),
(123, 123),
(123.456, 123.456),
('foo', 'foo'),
(['foo', 'bar'], ['foo', 'bar']),
({'foo': 'bar'}, {'foo': 'bar'})
])
def test_to_representation(value, expected):
assert JSONField().to_representation(value) == expected
@pytest.mark.parametrize(('data', 'expected'), [
(True, True),
(False, False),
(123, 123),
(123.456, 123.456),
('foo', 'foo'),
(['foo', 'bar'], ['foo', 'bar']),
({'foo': 'bar'}, {'foo': 'bar'})
])
def test_to_internal_value(data, expected):
assert JSONField().to_internal_value(data) == expected
| mit | Python |
|
8df06647abc7e5125e88af68000f04ac9eca3290 | add missing file | nschloe/quadpy | quadpy/_exception.py | quadpy/_exception.py | class QuadpyError(Exception):
pass
| mit | Python |
|
f641c8aa8e2eb5d98a90a10813fae6af4b136133 | Add command that reindexes all tenants in parallel | onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle | bluebottle/clients/management/commands/reindex.py | bluebottle/clients/management/commands/reindex.py | from optparse import make_option
import subprocess
from multiprocessing import Pool
from bluebottle.common.management.commands.base import Command as BaseCommand
from bluebottle.clients.models import Client
def reindex(schema_name):
print(f'reindexing tenant {schema_name}')
return (
schema_name,
subprocess.call(
f'./manage.py tenant_command -s {schema_name} search_index --rebuild -f',
shell=True
)
)
class Command(BaseCommand):
help = 'Reindex all tenants'
option_list = BaseCommand.options + (
make_option(
'--processes',
default=8,
help='How many processes run in parallel'
),
)
def handle(self, *args, **options):
pool = Pool(processes=options['processes'])
tasks = [pool.apply_async(reindex, args=[str(tenant.schema_name)]) for tenant in Client.objects.all()]
results = [result.get() for result in tasks]
for tenant, result in results:
if result != 0:
print(f'Tenant failed to index: {tenant}')
pool.close()
| bsd-3-clause | Python |
|
1bfc53f5645d6dc7dbbdd020f23e86bebfdc2fc9 | Add quick.py (quicksort) | jailuthra/misc,jailuthra/misc,jailuthra/misc,jailuthra/misc | python/quick.py | python/quick.py | #!/usr/bin/env python3
def main():
arr = []
fname = sys.argv[1]
with open(fname, 'r') as f:
for line in f:
arr.append(int(line.rstrip('\r\n')))
quicksort(arr, start=0, end=len(arr)-1)
print('Sorted list is: ', arr)
return
def quicksort(arr, start, end):
if end - start < 1:
return 0
b = start + 1
for i in range(start+1, end):
if arr[i] <= arr[start]:
arr[b], arr[i] = arr[i], arr[b]
b += 1
arr[start], arr[b-1] = arr[b-1], arr[start]
quicksort(arr, start, b-1)
quicksort(arr, b, end)
if __name__ == '__main__':
main()
| mit | Python |
|
25a6ad2a6b37bac4dd553c4e534092f2261d6037 | Add response classes | rahmonov/agile-crm-python | client/responses.py | client/responses.py |
class SuccessResponse:
def __new__(cls, data=None, text=None, *args, **kwargs):
return {
'status_code': 200,
'ok': True,
'data': data,
'text': text
}
class NonSuccessResponse:
def __new__(cls, status=400, text=None, *args, **kwargs):
return {
'status_code': status,
'ok': True,
'text': text
}
class ErrorResponse:
def __new__(cls, status=400, text=None, *args, **kwargs):
return {
'status_code': status,
'ok': False,
'text': text
}
| mit | Python |
|
bd4ccf9c4876b84cad23f68ea81ecadae733589a | add example for raw binary IO with header | aringh/odl,kohr-h/odl,odlgroup/odl,aringh/odl,odlgroup/odl,kohr-h/odl | examples/tomo/data/raw_binary_io_with_header.py | examples/tomo/data/raw_binary_io_with_header.py | # Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Example for the usage of raw binary reader/writer with header.
The code below defines a simple file format for 2D images using
- shape
- origin
- pixel size
First, a file is written using `FileWriterRawBinaryWithHeader`. This
requires a header in a certain format.
Then, the same file is read again using a file specification and the
`FileReaderRawBinaryWithHeader`. The specification is given as a
sequence of dictionaries with a certain structure.
"""
from __future__ import print_function
from collections import OrderedDict
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
import numpy as np
import scipy
import tempfile
import odl
# --- Writing --- #
# Create some test data. We arbitrarily define origin and pixel size.
# In practice, these could come from a `DiscreteLp` space as `mid_pt`
# and `cell_sides` properties.
image = scipy.misc.ascent()
shape = np.array(image.shape, dtype='int32')
origin = np.array([-1.0, 0.0], dtype='float32')
px_size = np.array([0.1, 0.1], dtype='float32')
# To make it storable as binary data, we take the string version of the data
# type with a fixed size of 10 characters and encode it as array of single
# bytes.
dtype = np.fromiter(str(image.dtype).ljust(10), dtype='S1')
# Create the header
# Use an OrderedDict for the header to have a predictable order when
# looping through it
header = OrderedDict()
header['shape'] = {'offset': 0, 'value': shape}
header['origin'] = {'offset': 8, 'value': origin}
header['px_size'] = {'offset': 16, 'value': px_size}
header['dtype'] = {'offset': 24, 'value': dtype}
# Initialize the writer with a file and the header. We use a temporary
# file in order to keep the workspace clean.
file = tempfile.NamedTemporaryFile()
writer = odl.tomo.data.FileWriterRawBinaryWithHeader(file, header)
# Write header and data to the file
writer.write(image)
# Print some stuff to see that the sizes are correct
print('File size ({}) = Image size ({}) + Header size ({})'
''.format(file.seek(0, 2), image.nbytes, writer.header_bytes))
# --- Reading --- #
# We build a specification for our file format that mirrors `header`.
header_fields = [
{'name': 'shape', 'offset': 0, 'size': 8, 'dtype': 'int32'},
{'name': 'origin', 'offset': 8, 'size': 8, 'dtype': 'float32'},
{'name': 'px_size', 'offset': 16, 'size': 8, 'dtype': 'float32'},
{'name': 'dtype', 'offset': 24, 'size': 10, 'dtype': 'S1'}
]
# Now we create a reader and read from our newly created file.
# TODO: make this simpler after fixing the properties
reader = odl.tomo.data.FileReaderRawBinaryWithHeader(
file, header_fields, set_attrs=False)
reader.header_bytes = writer.header_bytes
# Read header and data in one go
header_file, image_file = reader.read()
# Check that everything has been reconstructed correctly
shape_file = header_file['shape']['value']
origin_file = header_file['origin']['value']
px_size_file = header_file['px_size']['value']
dtype_file = header_file['dtype']['value']
print('shape -- original {}, from file {}'.format(shape, shape_file))
print('origin -- original {}, from file {}'.format(origin, origin_file))
print('px_size -- original {}, from file {}'.format(px_size, px_size_file))
print('dtype -- original {}, from file {}'
''.format(str(image.dtype), ''.join(dtype_file.astype(str))))
if plt is not None:
plt.figure()
plt.title('Original image')
plt.imshow(image, cmap='Greys_r')
plt.figure()
plt.title('Image from file')
plt.imshow(image, cmap='Greys_r')
plt.show()
| mpl-2.0 | Python |
|
d89bb401926698dc829be937d8f9c1959ecfd580 | make ok,eq actual functions | fxstein/cement,akhilman/cement,datafolklabs/cement,rjdp/cement,datafolklabs/cement,fxstein/cement,rjdp/cement,akhilman/cement,akhilman/cement,rjdp/cement,datafolklabs/cement,fxstein/cement | cement/utils/test.py | cement/utils/test.py | """Cement testing utilities."""
import unittest
from ..core import backend, foundation
# shortcuts
from nose.tools import ok_ as ok
from nose.tools import eq_ as eq
from nose.tools import raises
from nose import SkipTest
class TestApp(foundation.CementApp):
"""
Basic CementApp for generic testing.
"""
class Meta:
label = 'test'
config_files = []
argv = []
class CementTestCase(unittest.TestCase):
"""
A sub-class of unittest.TestCase.
"""
def __init__(self, *args, **kw):
super(CementTestCase, self).__init__(*args, **kw)
def setUp(self):
"""
Sets up self.app with a generic TestApp(). Also resets the backend
hooks and handlers so that everytime an app is created it is setup
clean each time.
"""
self.app = self.make_app()
def make_app(self, *args, **kw):
"""
Create a generic app using TestApp. Arguments and Keyword Arguments
are passed to the app.
"""
self.reset_backend()
return TestApp(*args, **kw)
def reset_backend(self):
"""
Remove all registered hooks and handlers from the backend.
"""
for _handler in backend.handlers.copy():
del backend.handlers[_handler]
for _hook in backend.hooks.copy():
del backend.hooks[_hook]
def ok(self, expr, msg=None):
"""Shorthand for assert."""
return ok(expr, msg)
def eq(self, a, b, msg=None):
"""Shorthand for 'assert a == b, "%r != %r" % (a, b)'. """
return eq(a, b, msg) | """Cement testing utilities."""
import unittest
from ..core import backend, foundation
# shortcuts
from nose.tools import ok_ as ok
from nose.tools import eq_ as eq
from nose.tools import raises
from nose import SkipTest
class TestApp(foundation.CementApp):
"""
Basic CementApp for generic testing.
"""
class Meta:
label = 'test'
config_files = []
argv = []
class CementTestCase(unittest.TestCase):
"""
A sub-class of unittest.TestCase.
"""
ok = ok
eq = eq
def __init__(self, *args, **kw):
super(CementTestCase, self).__init__(*args, **kw)
def setUp(self):
"""
Sets up self.app with a generic TestApp(). Also resets the backend
hooks and handlers so that everytime an app is created it is setup
clean each time.
"""
self.app = self.make_app()
def make_app(self, *args, **kw):
"""
Create a generic app using TestApp. Arguments and Keyword Arguments
are passed to the app.
"""
self.reset_backend()
return TestApp(*args, **kw)
def reset_backend(self):
"""
Remove all registered hooks and handlers from the backend.
"""
for _handler in backend.handlers.copy():
del backend.handlers[_handler]
for _hook in backend.hooks.copy():
del backend.hooks[_hook]
| bsd-3-clause | Python |
8e1e6585c4bfa76ebbd945d765c6a4a3dc98025d | Add new package: dnstracer (#18933) | LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack | var/spack/repos/builtin/packages/dnstracer/package.py | var/spack/repos/builtin/packages/dnstracer/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Dnstracer(Package):
"""Dnstracer determines where a given Domain Name Server gets
its information from, and follows the chain of DNS servers back to
the servers which know the data."""
homepage = "https://github.com/Orc/dnstracer"
git = "https://github.com/Orc/dnstracer.git"
version('master', branch='master')
phases = ['configure', 'build', 'install']
def configure(self, spec, prefix):
configure = Executable('./configure.sh')
configure('--prefix={0}'.format(prefix))
def build(self, spec, prefix):
make()
def install(self, spec, prefix):
make('install')
| lgpl-2.1 | Python |
|
2f889b045c1a03b3b046127380f15909ea117265 | add new package (#25844) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/py-kornia/package.py | var/spack/repos/builtin/packages/py-kornia/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyKornia(PythonPackage):
"""Open Source Differentiable Computer Vision Library for PyTorch."""
homepage = "https://www.kornia.org/"
pypi = "kornia/kornia-0.5.10.tar.gz"
version('0.5.10', sha256='428b4b934a2ba7360cc6cba051ed8fd96c2d0f66611fdca0834e82845f14f65d')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-pytest-runner', type='build')
depends_on('py-torch@1.6.0:', type=('build', 'run'))
| lgpl-2.1 | Python |
|
e73aac38882b90e7219035800b400c2ed1e181ef | add http data wrapper that can be used to specify options for a specific request | sassoftware/robj | robj/lib/httputil.py | robj/lib/httputil.py | #
# Copyright (c) 2010 rPath, Inc.
#
# This program is distributed under the terms of the MIT License as found
# in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/mit-license.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the MIT License for full details.
#
"""
Module for httplib customizations.
"""
from robj.lib import util
class HTTPData(object):
__slots__ = ('data', 'method', 'size', 'headers', 'contentType', 'callback',
'chunked', 'bufferSize', 'rateLimit', )
CHUNK_SIZE = 262144
BUFFER_SIZE = 8192
def __init__(self, data=None, method=None, size=None, headers=None,
contentType=None, callback=None, chunked=None, bufferSize=None,
rateLimit=None):
if headers is None:
headers = {}
if data is not None:
if hasattr(data, 'read'):
if chunked:
headers['Transfer-Encoding'] = 'Chunked'
else:
data = data.encode('utf-8')
size = len(data)
self.method = method
self.data = data
self.headers = headers
self.size = size
self.contentType = contentType
self.callback = callback
self.chunked = chunked
self.bufferSize = bufferSize or self.BUFFER_SIZE
self.rateLimit = rateLimit
def iterheaders(self):
for k, v in sorted(self.headers.iteritems()):
yield k, str(v)
# Don't send a Content-Length header if chunking
if not self.chunked and self.size is not None:
yield 'Content-Length', str(self.size)
if self.contentType is not None:
yield 'Content-Type', self.contentType
def writeTo(self, connection):
if self.data is None:
return
if not hasattr(self.data, 'read'):
connection.send(self.data)
return
if not self.chunked:
util.copyfileobj(self.data, connection, bufSize=self.bufferSize,
callback=self.callback, rateLimit=self.rateLimit,
sizeLimit=self.size)
return
assert self.size is not None
# keep track of the total amount of data sent so that the
# callback passed in to copyfileobj can report progress correctly
sent = 0
chunk = self.CHUNK_SIZE
while self.size - sent:
if chunk > self.size - sent:
chunk = self.size - sent
# first send the hex-encoded size
connection.send('%x\r\n' % chunk)
# then the chunk of data
util.copyfileobj(self.data, connection, bufSize=chunk,
callback=self.callback, rateLimit=self.rateLimit,
sizeLimit=chunk, total=sent)
# send \r\n after the chunked data
connection.send("\r\n")
sent += chunk
# terminate the chunked encoding
connection.send('0\r\n\r\n')
def isHTTPData(obj):
return isinstance(obj, HTTPData)
| apache-2.0 | Python |
|
bc2abe4c295a371358064952e6c3afa395a4bd13 | Rename Longest-Common-Prefix.py to LongestCommonPrefixtwo.py | chengjinlee/leetcode-solution-python,chengjinlee/leetcode | leetcode/14.-Longest-Common-Prefix/LongestCommonPrefixtwo.py | leetcode/14.-Longest-Common-Prefix/LongestCommonPrefixtwo.py | #!/usr/bin/python
#_*_ coding:utf-8 _*_
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
strNum=len(strs) #字符串的长度
if strNum == 0 or strs == None:
return ''
else:
prefix = strs[0] #对前缀进行赋值
for i in range(strNum):
if prefix == '' or strs[i] == '':
return ''
comlen = min(len(prefix),len(strs[i])) #减少寻找公共最小前缀的长度
j = 0
while j < comlen and prefix[j] == strs[i][j]: #寻找寻找公共最小前缀
j += 1
prefix = prefix[0:j]
return prefix
| mit | Python |
|
5f9c9500296627a94221ecd9614209a2c791e8b9 | remove pointless condition | QuiteQuiet/PokemonShowdownBot | plugins/messages.py | plugins/messages.py | import random
class Message:
def __init__(self, sent, msg):
self.sent = sent
self.msg = msg
def replyFormat(self):
return 'From {user}: {msg}'.format(user = self.sent, msg = self.msg)
class MessageDatabase:
def __init__(self):
self.messages = {}
def pendingMessages(self, user):
cnt = len(self.messages[user])
return 'You have {nr} message{s} waiting for you.\nUse ~read [number] to get [number] of messages shown to you'.format(nr = cnt, s = 's' if cnt > 1 else '')
def addMessage(self, to, sent, msg):
if to not in self.messages: self.messages[to] = {}
if sent in self.messages[to]: return False
self.messages[to][sent] = Message(sent, msg)
return True
def getMessage(self, user):
return self.removeRandomMessage(user).replyFormat()
def getMessages(self, user, amnt):
''' This removes amnt number of messages from the message service '''
# This can be super-spammy for users with a lot of pending messages
# as they can opt to look at all at once
reply = ''
if amnt > len(self.messages[user]): amnt = len(self.messages[user])
while amnt > 0:
reply += self.getMessage(user) + ('\n' if amnt > 1 else '')
amnt -= 1
# Remove the user from the list if there's no messages left
if not self.messages[user]:
self.messages.pop(user)
return reply
def getAllMessages(self, user):
''' This gets and delete every message to this user from storage '''
# No need to test for existance, this assumes a message exists
# and usage should first test for existance.
messages = self.removeAllMessages(user)
combine = []
for msg in messages:
combine.append(messages[msg].replyFormat())
return '\n'.join(combine)
def hasMessage(self, user):
return user in self.messages
def alreadySentMessage(self, user, frm):
return user in self.messages and frm in self.messages[user]
def removeRandomMessage(self, to):
return self.messages[to].pop(random.choice(list(self.messages[to].keys())), None)
# Unused but still supported
def removeAllMessages(self, to):
return self.messages.pop(to, None)
| import random
class Message:
def __init__(self, sent, msg):
self.sent = sent
self.msg = msg
def replyFormat(self):
return 'From {user}: {msg}'.format(user = self.sent, msg = self.msg)
class MessageDatabase:
def __init__(self):
self.messages = {}
def pendingMessages(self, user):
cnt = len(self.messages[user])
return 'You have {nr} message{s} waiting for you.\nUse ~read [number] to get [number] of messages shown to you'.format(nr = cnt, s = 's' if cnt > 1 else '')
def addMessage(self, to, sent, msg):
if to not in self.messages: self.messages[to] = {}
if sent in self.messages[to]: return False
self.messages[to][sent] = Message(sent, msg)
return True
def getMessage(self, user):
return self.removeRandomMessage(user).replyFormat()
def getMessages(self, user, amnt):
''' This removes amnt number of messages from the message service '''
# This can be super-spammy for users with a lot of pending messages
# as they can opt to look at all at once
reply = ''
if amnt > len(self.messages[user]): amnt = len(self.messages[user])
while amnt > 0 and len(self.messages[user]) > 0:
reply += self.getMessage(user) + ('\n' if amnt > 1 else '')
amnt -= 1
# Remove the user from the list if there's no messages left
if not self.messages[user]:
self.messages.pop(user)
return reply
def getAllMessages(self, user):
''' This gets and delete every message to this user from storage '''
# No need to test for existance, this assumes a message exists
# and usage should first test for existance.
messages = self.removeAllMessages(user)
combine = []
for msg in messages:
combine.append(messages[msg].replyFormat())
return '\n'.join(combine)
def hasMessage(self, user):
return user in self.messages
def alreadySentMessage(self, user, frm):
return user in self.messages and frm in self.messages[user]
def removeRandomMessage(self, to):
return self.messages[to].pop(random.choice(list(self.messages[to].keys())), None)
# Unused but still supported
def removeAllMessages(self, to):
return self.messages.pop(to, None)
| mit | Python |
f0f72e5d8a64f7f49406022fd170808417220289 | Create publish.py | HuStmpHrrr/gjms | clickonce/publish.py | clickonce/publish.py | from __future__ import print_function
import subprocess
import os
import sys
import shutil
import datetime
import distutils.dir_util
if sys.version_info < (3,):
input = raw_input
str = unicode
pwd = os.getcwd()
appver_file = r'.\AppVer'
target_shares = {
'release': [],
'test' : [],
'dev' : []
}
# it needs this transformation because msbuild does a direct string concatenation instead of a path join.
target_shares = {k: [p if p.endswith(os.path.sep) else p+os.path.sep for p in v] for k, v in target_shares.items()}
output_dir = r'bin\publish'
publish_dir = r'bin\publishapp.publish'
msbuild_folder = r'%ProgramFiles%\MSBuild\12.0\bin\amd64' \
if os.path.exists(r'%s\MSBuild\12.0\bin\amd64' % os.environ['PROGRAMFILES'])\
else r'%ProgramFiles(x86)%\MSBuild\12.0\bin\amd64'
def get_appver():
with open(appver_file) as fd:
return fd.readline().strip()
def incr_appver(ver):
vers = ver.split('.')
vers[-1] = str(int(vers[-1]) + 1)
return '.'.join(vers)
def set_appver(ver):
with open(appver_file, 'w') as fd:
fd.write(ver)
def get_cmd(target, ver, env):
template = r'"{0}\msbuild" /t:clean;publish /property:OutputPath={1},PublishUrl={2},InstallUrl={2},UpdateUrl={2},ApplicationVersion={3},MinimumRequiredVersion={3},AssemblyName="{4}"'
cmd = template.format(msbuild_folder, output_dir, target, ver, 'NST System Configurator '+env)
return cmd
if __name__=='__main__':
error = {}
print('current python implementation version is', sys.version)
print('currently working in: %s' % pwd)
print('please make sure this script runs directly under the project folder.')
env = input('build environment(%s): ' % ', '.join(sorted(target_shares.keys())))
while env not in target_shares:
print("nonexisting environment: {}".format(env), file=sys.stderr)
env = input('build environment(%s): ' % ', '.join(sorted(target_shares.keys())))
ver = incr_appver(get_appver())
for i, p in enumerate(target_shares[env]):
cmd = get_cmd(p, ver, env+str(i))
print('executing {}'.format(cmd))
print('----------------------------------')
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
with proc.stdout:
for l in proc.stdout:
print(l.strip().decode('utf-8'))
proc.terminate()
print('----------------------------------')
if proc.returncode == 0:
try:
distutils.dir_util.copy_tree(publish_dir, p)
except Exception as e:
error[p] = e
sys.stderr.write("error occurred: %s\n" % str(e))
distutils.dir_util.copy_tree(publish_dir, r'bin\backup' + '\\' + str(i))
else:
print("error: %d" % proc.returncode, file=sys.stderr)
print
if len(error) != 0:
print('Error occurred:', file=sys.stderr)
for k, e in error.items():
print('%s: %s\n' % (k, str(e)), file=sys.stderr)
print('has backed up the folder.', file=sys.stderr)
try:
set_appver(ver)
except IOError as e:
print("failed to write to file: %s" % str(e), file=sys.stderr)
print('next application version will be %s.' % incr_appver(ver), file=sys.stderr)
input('press enter to continue...')
| lgpl-2.1 | Python |
|
badbf8c89216b97ac29ea3582d99d28535f82a7e | Update __init__.py | PySlither/Slither,PySlither/Slither | slither/__init__.py | slither/__init__.py | from . import slither
__all__ = ['slither','Mouse','Stage','Sprite','Sound']
| from slither import slither
__all__ = ['slither','Mouse','Stage','Sprite','Sound']
| mit | Python |
79df8ab80e6b14f16af125895f5b7338e5c41a60 | add IOTools | morgenst/PyAnalysisTools,morgenst/PyAnalysisTools,morgenst/PyAnalysisTools | base/IOTools.py | base/IOTools.py | import ROOT
import os
class Writer:
def __init__(self, directory=None):
"""
:param directory:
"""
if directory is None:
directory = os.path.abspath(os.curdir)
self.dir = directory
self.__check_and_create_directory(self.dir)
def __check_and_create_directory(self, directory):
_logger.debug("Check if directory: %s exists" % (directory))
if not os.path.exists(directory):
_logger.debug("Create directory: %s exists" % (directory))
os.makedirs(directory)
def dump_canvas(self, canvas, message=None, image=None):
if image:
self.write_canvas_to_file(canvas, image)
else:
if message is None:
image = raw_input("save canvas as (<RET> for skipping): ")
else:
image = raw_input(message)
if image:
self.write_canvas_to_file(canvas, image)
def write_canvas_to_file(self, canvas, name, extension='pdf'):
ext = self.parse_extension_from_file_name(name)
if ext is not None:
extension = ext
name = ''.join(name.split('.')[0:-1])
if not extension.startswith('.'):
extension = '.' + extension
if extension == '.root':
self.write_object_to_root_tile(canvas, name + extension)
else:
canvas.SaveAs(os.path.join(os.path.join(self.dir,
name + extension)))
def write_object_to_root_tile(self, obj, filename, dir=''):
f = ROOT.gROOT.GetListOfFiles().FindObject(filename)
if not f:
f = ROOT.TFile.Open(filename, 'UPDATE')
d = f.GetDirectory(dir)
if not d:
d = make_root_dir(f, dir)
d.cd()
obj.Write()
def parse_extension_from_file_name(self, name):
ext = name.split('.')[-1]
if ext is name:
return None
return ext
def set_directory(self, directory):
self.__check_and_create_directory(directory)
self.dir = directory
| mit | Python |
|
2786dd91b0bb7dc8849e3549ff40de28d72d40d5 | add a django multi-database router | muccg/rdrf,muccg/rdrf,muccg/rdrf,muccg/rdrf,muccg/rdrf | rdrf/rdrf/db.py | rdrf/rdrf/db.py | from io import StringIO
import os
from django.core.management import call_command
from django.db import connections
class RegistryRouter:
# Whether clinical db is configured at all.
one_db = "clinical" not in connections
# Whether clinical db is configured to be the same as main db.
same_db = (one_db or
connections["default"].get_connection_params() ==
connections["clinical"].get_connection_params())
clinical_models = (
("rdrf", "clinical"),
("rdrf", "questionnaireresponsedata"),
# fixme: move CDEFile to clinical database. This is just
# tricky with migrations.
# ("rdrf", "cdefile"),
("rdrf", "patientdata"),
("rdrf", "formprogress"),
("rdrf", "modjgo"),
)
@classmethod
def is_clinical(cls, app_label, model_name):
return (app_label, model_name) in cls.clinical_models
def choose_db_model(self, model):
return self.choose_db(model._meta.app_label, model._meta.model_name)
def choose_db(self, app_label, model_name):
clinical = self.is_clinical(app_label, model_name)
return "clinical" if clinical and not self.one_db else "default"
def db_for_read(self, model, **hints):
return self.choose_db_model(model)
def db_for_write(self, model, **hints):
return self.choose_db_model(model)
def allow_migrate(self, db, app_label, model_name=None, **hints):
return (db == "default" and self.same_db or
db == self.choose_db(app_label, model_name))
def reset_sql_sequences(apps):
"""
Executes the necessary SQL to reset the primary key counters for
all tables in `apps`.
"""
os.environ['DJANGO_COLORS'] = 'nocolor'
commands = StringIO()
for app in apps:
call_command('sqlsequencereset', app, stdout=commands)
_execute_reset_sql_sequences(commands.getvalue().splitlines())
def _execute_reset_sql_sequences(commands):
# this gets nasty because the --database option of
# sqlsequencereset command doesn't work.
clinical_tables = ["_".join(m) for m in RegistryRouter.clinical_models]
def for_db(database):
def _for_db(command):
is_clinical = any(t in command for t in clinical_tables)
return (not command.startswith("SELECT") or
(database == "default" and not is_clinical) or
(database == "clinical" and is_clinical) or
(database == "default" and "clinical" not in connections))
return _for_db
for database in ["default", "clinical"]:
if database in connections:
cursor = connections[database].cursor()
cursor.execute("\n".join(filter(for_db(database), commands)))
| agpl-3.0 | Python |
|
0b6f6a9fd3916d8a028d5c3ccf4ca4a0277b9781 | Add arena class prototype | Finikssky/gladiators | src/arena.py | src/arena.py | import jsonpickle
class ArenaType():
Circle = 0
Square = 1
class ArenaCoverType():
Soil = 0
Sand = 1
Grass = 2
Stone = 3
class Arena():
def __init__(self, name, size, stype, cover):
self.name = name
self.size = size
self.type = stype
self.cover = cover | mit | Python |
|
4b0a21dd813d58370805053e60376f64b5927cd9 | Add tutorial for MakeNumpyDataFrame | olifre/root,olifre/root,olifre/root,olifre/root,olifre/root,olifre/root,olifre/root,olifre/root,olifre/root,olifre/root,olifre/root | tutorials/dataframe/df032_MakeNumpyDataFrame.py | tutorials/dataframe/df032_MakeNumpyDataFrame.py | ## \file
## \ingroup tutorial_dataframe
## \notebook
## Read data from Numpy arrays into RDataFrame.
##
## \macro_code
## \macro_output
##
## \date March 2021
## \author Stefan Wunsch (KIT, CERN)
import ROOT
import numpy as np
# Let's create some data in numpy arrays
x = np.array([1, 2, 3], dtype=np.int32)
y = np.array([4, 5, 6], dtype=np.float64)
# Read the data with RDataFrame
# The column names in the RDataFrame are defined by the keys of the dictionary.
# Please note that only fundamental types (int, float, ...) are supported.
df = ROOT.RDF.MakeNumpyDataFrame({'x': x, 'y': y})
# You can now use the RDataFrame as usualy, e.g. add a column ...
df = df.Define('z', 'x + y')
# ... or print the content
df.Display().Print()
# ... or save the data as a ROOT file
df.Snapshot('tree', 'df032_MakeNumpyDataFrame.root')
| lgpl-2.1 | Python |
|
43eb4f930f14fcf693f0656a3f0bbe749ed98d2e | Move subgraph attribute copies tests to a separate file. | dmoliveira/networkx,dmoliveira/networkx,dhimmel/networkx,ghdk/networkx,wasade/networkx,bzero/networkx,debsankha/networkx,goulu/networkx,bzero/networkx,NvanAdrichem/networkx,debsankha/networkx,bzero/networkx,RMKD/networkx,yashu-seth/networkx,JamesClough/networkx,tmilicic/networkx,ionanrozenfeld/networkx,farhaanbukhsh/networkx,ghdk/networkx,harlowja/networkx,jakevdp/networkx,blublud/networkx,harlowja/networkx,kernc/networkx,chrisnatali/networkx,Sixshaman/networkx,nathania/networkx,blublud/networkx,cmtm/networkx,SanketDG/networkx,chrisnatali/networkx,kernc/networkx,dhimmel/networkx,farhaanbukhsh/networkx,harlowja/networkx,nathania/networkx,dhimmel/networkx,RMKD/networkx,michaelpacer/networkx,sharifulgeo/networkx,chrisnatali/networkx,jakevdp/networkx,sharifulgeo/networkx,nathania/networkx,debsankha/networkx,jfinkels/networkx,aureooms/networkx,aureooms/networkx,andnovar/networkx,blublud/networkx,farhaanbukhsh/networkx,RMKD/networkx,jcurbelo/networkx,beni55/networkx,dmoliveira/networkx,OrkoHunter/networkx,ghdk/networkx,ionanrozenfeld/networkx,jakevdp/networkx,ltiao/networkx,aureooms/networkx,ionanrozenfeld/networkx,kernc/networkx,sharifulgeo/networkx | networkx/algorithms/components/tests/test_subgraph_copies.py | networkx/algorithms/components/tests/test_subgraph_copies.py | """ Tests for subgraphs attributes
"""
from copy import deepcopy
from nose.tools import assert_equal
import networkx as nx
class TestSubgraphAttributesDicts:
def setUp(self):
self.undirected = [
nx.connected_component_subgraphs,
nx.biconnected_component_subgraphs,
]
self.directed = [
nx.weakly_connected_component_subgraphs,
nx.strongly_connected_component_subgraphs,
nx.attracting_component_subgraphs,
]
self.subgraph_funcs = self.undirected + self.directed
self.D = nx.DiGraph()
self.D.add_edge(1, 2, eattr='red')
self.D.add_edge(2, 1, eattr='red')
self.D.node[1]['nattr'] = 'blue'
self.D.graph['gattr'] = 'green'
self.G = nx.Graph()
self.G.add_edge(1, 2, eattr='red')
self.G.node[1]['nattr'] = 'blue'
self.G.graph['gattr'] = 'green'
def test_subgraphs_default_copy_behavior(self):
# Test the default behavior of subgraph functions
# For the moment (1.10) the default is to copy
for subgraph_func in self.subgraph_funcs:
G = deepcopy(self.G if subgraph_func in self.undirected else self.D)
SG = list(subgraph_func(G))[0]
assert_equal(SG[1][2]['eattr'], 'red')
assert_equal(SG.node[1]['nattr'], 'blue')
assert_equal(SG.graph['gattr'], 'green')
SG[1][2]['eattr'] = 'foo'
assert_equal(G[1][2]['eattr'], 'red')
assert_equal(SG[1][2]['eattr'], 'foo')
SG.node[1]['nattr'] = 'bar'
assert_equal(G.node[1]['nattr'], 'blue')
assert_equal(SG.node[1]['nattr'], 'bar')
SG.graph['gattr'] = 'baz'
assert_equal(G.graph['gattr'], 'green')
assert_equal(SG.graph['gattr'], 'baz')
def test_subgraphs_copy(self):
for subgraph_func in self.subgraph_funcs:
test_graph = self.G if subgraph_func in self.undirected else self.D
G = deepcopy(test_graph)
SG = list(subgraph_func(G, copy=True))[0]
assert_equal(SG[1][2]['eattr'], 'red')
assert_equal(SG.node[1]['nattr'], 'blue')
assert_equal(SG.graph['gattr'], 'green')
SG[1][2]['eattr'] = 'foo'
assert_equal(G[1][2]['eattr'], 'red')
assert_equal(SG[1][2]['eattr'], 'foo')
SG.node[1]['nattr'] = 'bar'
assert_equal(G.node[1]['nattr'], 'blue')
assert_equal(SG.node[1]['nattr'], 'bar')
SG.graph['gattr'] = 'baz'
assert_equal(G.graph['gattr'], 'green')
assert_equal(SG.graph['gattr'], 'baz')
def test_subgraphs_no_copy(self):
for subgraph_func in self.subgraph_funcs:
G = deepcopy(self.G if subgraph_func in self.undirected else self.D)
SG = list(subgraph_func(G, copy=False))[0]
assert_equal(SG[1][2]['eattr'], 'red')
assert_equal(SG.node[1]['nattr'], 'blue')
assert_equal(SG.graph['gattr'], 'green')
SG[1][2]['eattr'] = 'foo'
assert_equal(G[1][2]['eattr'], 'foo')
assert_equal(SG[1][2]['eattr'], 'foo')
SG.node[1]['nattr'] = 'bar'
assert_equal(G.node[1]['nattr'], 'bar')
assert_equal(SG.node[1]['nattr'], 'bar')
SG.graph['gattr'] = 'baz'
assert_equal(G.graph['gattr'], 'baz')
assert_equal(SG.graph['gattr'], 'baz')
| bsd-3-clause | Python |
|
f36a3e4e6cfbc5d3aa14017dcfea6e0fc67514f0 | add delete_environment command | briandilley/ebs-deploy,cfeduke/ebs-deploy,cookbrite/ebs-deploy | ebs_deploy/commands/delete_environment_command.py | ebs_deploy/commands/delete_environment_command.py | from ebs_deploy import out, parse_env_config
def add_arguments(parser):
"""
Args for the delete environment command
"""
parser.add_argument('-e', '--environment',
help='Environment name', required=True)
def execute(helper, config, args):
"""
Deletes an environment
"""
env_config = parse_env_config(config, args.environment)
cname_prefix = env_config.get('cname_prefix', None)
# env_name = args.environment
real_env_name = helper.environment_name_for_cname(cname_prefix)
environments = helper.get_environments()
for env in environments:
if env['EnvironmentName'] == real_env_name:
if env['Status'] != 'Ready':
out("Unable to delete " + env['EnvironmentName']
+ " because it's not in status Ready ("
+ env['Status'] + ")")
else:
out("Deleting environment: "+env['EnvironmentName'])
# helper.delete_environment(env['EnvironmentName'])
# environments_to_wait_for_term.append(env['EnvironmentName'])
out("Environment deleted")
return 0
| mit | Python |
|
cf4e468ed28a7e750adfbcd41235ac5b90cb562b | Add new package: diffmark (#18930) | LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/diffmark/package.py | var/spack/repos/builtin/packages/diffmark/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Diffmark(AutotoolsPackage):
"""Diffmark is a DSL for transforming one string to another."""
homepage = "https://github.com/vbar/diffmark"
git = "https://github.com/vbar/diffmark.git"
version('master', branch='master')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('libxml2')
| lgpl-2.1 | Python |
|
ddff4237ae0bb8dd2575265707a843f4497ccbf2 | Create headache.py | nbush/headache | headache.py | headache.py | """
python plaintext obfuscator
by n.bush
"""
import string
import random
def mess_maker(size=6, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def headache(text):
charlist = list(text)
obfuscated = []
class_a = mess_maker(10)
class_b = mess_maker(10)
css = """
<style>
span.%s {}
span.%s {color: transparent; letter-spacing:-1em;}
</style>
""" % (class_a, class_b)
obfuscated.append(css)
for i in charlist:
mess = mess_maker(10)
span = '<span class="%s">%s</span><span class="%s">%s</span>' % (class_a, i, class_b, mess)
obfuscated.append(span)
return ''.join(obfuscated)
print headache("Hi. This is copyable. Not.")
| mit | Python |
|
a7d6344428ef43374fb82f5b357968ec38402984 | Create test_step_motor_Model_28BYJ_48p.py | somchaisomph/RPI.GPIO.TH | test/test_step_motor_Model_28BYJ_48p.py | test/test_step_motor_Model_28BYJ_48p.py | from gadgets.motors.step_motor import Model_28BYJ_48
st_mot = Model_28BYJ_48([11,15,16,18])
for i in range(2):
st_mot.angular_step(60,direction=2,waiting_time=2,bi_direction=True)
| mit | Python |
|
4585d6426a6c2945a359bbe02c58702a07e68746 | Create new package. (#6209) | EmreAtes/spack,tmerrick1/spack,tmerrick1/spack,matthiasdiener/spack,EmreAtes/spack,krafczyk/spack,LLNL/spack,EmreAtes/spack,iulian787/spack,krafczyk/spack,matthiasdiener/spack,LLNL/spack,mfherbst/spack,matthiasdiener/spack,krafczyk/spack,iulian787/spack,matthiasdiener/spack,skosukhin/spack,tmerrick1/spack,skosukhin/spack,skosukhin/spack,skosukhin/spack,skosukhin/spack,LLNL/spack,iulian787/spack,EmreAtes/spack,mfherbst/spack,mfherbst/spack,LLNL/spack,matthiasdiener/spack,mfherbst/spack,mfherbst/spack,LLNL/spack,iulian787/spack,tmerrick1/spack,iulian787/spack,tmerrick1/spack,krafczyk/spack,EmreAtes/spack,krafczyk/spack | var/spack/repos/builtin/packages/r-gsubfn/package.py | var/spack/repos/builtin/packages/r-gsubfn/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGsubfn(RPackage):
"""gsubfn is like gsub but can take a replacement function or
certain other objects instead of the replacement string. Matches
and back references are input to the replacement function and
replaced by the function output. gsubfn can be used to split
strings based on content rather than delimiters and for
quasi-perl-style string interpolation. The package also has
facilities for translating formulas to functions and allowing
such formulas in function calls instead of functions. This can
be used with R functions such as apply, sapply, lapply, optim,
integrate, xyplot, Filter and any other function that expects
another function as an input argument or functions like cat or
sql calls that may involve strings where substitution is
desirable."""
homepage = "https://cran.r-project.org/package=gsubfn"
url = "https://cran.r-project.org/src/contrib/gsubfn_0.6-6.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/gsubfn"
version('0.6-6', '94195ff3502706c736d9c593c07252bc')
depends_on('r-proto', type=('build', 'run'))
| lgpl-2.1 | Python |
|
e76fa7d23894bb88d47b761f683b4bbd797ef889 | Add Helpers object for cleaner helper syntax | funkybob/knights-templater,funkybob/knights-templater | knights/utils.py | knights/utils.py |
class Helpers:
'''
Provide a cheaper way to access helpers
'''
def __init__(self, members):
for key, value in members.items():
setattr(self, key, value)
| mit | Python |
|
2838711c7fa12525c2ae6670bb130999654fe7ea | add shortest-palindrome | zeyuanxy/leet-code,EdisonAlgorithms/LeetCode,EdisonAlgorithms/LeetCode,zeyuanxy/leet-code,EdisonAlgorithms/LeetCode,zeyuanxy/leet-code | vol5/shortest-palindrome/shortest-palindrome.py | vol5/shortest-palindrome/shortest-palindrome.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Zeyuan Shang
# @Date: 2015-11-19 20:43:07
# @Last Modified by: Zeyuan Shang
# @Last Modified time: 2015-11-19 20:43:21
class Solution(object):
def shortestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
ss = s + '#' + s[::-1]
n = len(ss)
p = [0] * n
for i in xrange(1, n):
j = p[i - 1]
while j > 0 and ss[i] != ss[j]:
j = p[j - 1]
p[i] = j + (ss[i] == s[j])
return s[p[-1]:][::-1] + s | mit | Python |
|
10f72d72e988bf4aa570e21b0e0d6979edb843a7 | add example "fit text path into a box" | mozman/ezdxf,mozman/ezdxf,mozman/ezdxf,mozman/ezdxf,mozman/ezdxf | examples/addons/fit_text_path_into_box.py | examples/addons/fit_text_path_into_box.py | # Copyright (c) 2021, Manfred Moitzi
# License: MIT License
from pathlib import Path
import ezdxf
from ezdxf import path, zoom
from ezdxf.math import Matrix44
from ezdxf.tools import fonts
from ezdxf.addons import text2path
DIR = Path('~/Desktop/Outbox').expanduser()
fonts.load()
doc = ezdxf.new()
doc.layers.new('OUTLINE')
doc.layers.new('FILLING')
msp = doc.modelspace()
attr = {'layer': 'OUTLINE', 'color': 1}
ff = fonts.FontFace(family="Arial")
sx, sy = 4, 2
# create target box
msp.add_lwpolyline([(0, 0), (sx, 0), (sx, sy), (0, sy)], close=True)
text_as_paths = text2path.make_paths_from_str("Squeeze Me", ff)
final_paths = path.fit_paths_into_box(text_as_paths, size=(sx, sy, 0), uniform=False)
final_paths = path.transform_paths(final_paths, Matrix44.scale(-1, 1, 1))
# move bottom/left corner to (0, 0) if required:
bbox = path.bbox(final_paths)
dx, dy, dz = -bbox.extmin
final_paths = path.transform_paths(final_paths, Matrix44.translate(dx,dy, dz))
path.render_lwpolylines(msp, final_paths, distance=0.01, dxfattribs=attr)
zoom.extents(msp)
doc.saveas(DIR / 'text2path.dxf')
| mit | Python |
|
e075b0b1c8d581107209e869eda7f6ff07a7321c | Add script to create a historic->modern dictionary | NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts | reverse_dict.py | reverse_dict.py | """Reverse modern->historic spelling variants dictonary to historic->modern
mappings
"""
import argparse
import codecs
import json
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dict', help='the name of the json file '
'containing the modern->spelling variants dictionary')
args = parser.parse_args()
dict_file = args.input_dict
modern_dict = {}
historic_dict = {}
with codecs.open(dict_file, 'rb', 'utf8') as f:
modern_dict = json.load(f, encoding='utf-8')
for modern_word, variants in modern_dict.iteritems():
for var in variants:
if var not in historic_dict.keys():
historic_dict[var] = Counter()
historic_dict[var][modern_word] += 1
print '#words in modern dict: {}'.format(len(modern_dict))
print '#words in historic dict: {}'.format(len(historic_dict))
# find historic words that map to mulitple terms
mappings_counter = Counter()
print '\nhistoric word\tmodern variant\tfrequency'
for w, mappings in historic_dict.iteritems():
mappings_counter[str(len(mappings)).zfill(3)] += 1
if len(mappings) > 1:
for variant, freq in mappings.iteritems():
print '{}\t{}\t{}'.format(w, variant, freq)
mp = mappings_counter.keys()
mp.sort()
print '\n#mappings\t#historic words'
for m in mp:
print '{}\t{}'.format(m, mappings_counter[m])
| apache-2.0 | Python |
|
8a4175461e36c11356b41e28ca250121f200dc7e | add a new basic longliving statement checker which looks at the health of items on the server | dataflow/DataStage,dataflow/DataStage,dataflow/DataStage | datastage/dataset/longliving/sword_statement_check.py | datastage/dataset/longliving/sword_statement_check.py | import logging
import time
import thread
import urllib2
import sys
from django_longliving.base import LonglivingThread
from datastage.dataset import SUBMISSION_QUEUE
from datastage.web.dataset.models import DatasetSubmission
from datastage.web.dataset import openers
from sword2 import Connection, UrlLib2Layer
logger = logging.getLogger(__name__)
# list of all the error states that we can see in the statement that we want
# to be able to react to
ERROR_STATES = [
"http://databank.ox.ac.uk/errors/UnzippingIssue"
]
# NOTE: this thread is resistant to being stopped. A KeyboardInterrupt will
# NOT suffice, it will need to be killed with a "kill <pid>" on the command
# line
class SwordStatementCheckThread(LonglivingThread):
# FIXME: not quite sure how the __init__ function on LonglivingThread,
# so setting this as a class variable for the time being
# this is how long the thread will sleep between requests
throttle = 5
# this is how many times the thread will re-try contacting the server if
# it suffers a major exception (i.e. not a sword exception, but something
# network related)
retry_count = 10
def run(self):
# just keep going until the thread is killed
while True:
self._check_all_datasets()
def _check_all_datasets(self):
dss = DatasetSubmission.objects.all()
for dataset_submission in dss:
self._check_dataset(dataset_submission)
def _check_dataset(self, dataset_submission):
retry_counter = 0
while retry_counter < SwordStatementCheckThread.retry_count:
try:
logger.info("Checking state of dataset at " + dataset_submission.remote_url)
opener = openers.get_opener(dataset_submission.repository,
dataset_submission.submitting_user)
conn = Connection(error_response_raises_exceptions=False, http_impl=UrlLib2Layer(opener))
receipt = conn.get_deposit_receipt(dataset_submission.remote_url)
statement = conn.get_ore_sword_statement(receipt.ore_statement_iri)
for state_uri, state_desc in statement.states:
logger.info("Dataset has state URI: " + state_uri)
if state_uri in ERROR_STATES:
dataset_submission.status = 'error'
dataset_submission.save()
logger.info("URI: " + state_uri + " is an error state ... setting 'error' state on submission record")
time.sleep(SwordStatementCheckThread.throttle)
except urllib2.URLError as e:
# if we get an exception, try again up to the limit
retry_counter += 1
continue
else:
# if we don't get an exception, we're done
return
"""
def run(self):
client = self.get_redis_client()
for key, pk in self.watch_queue(client, SUBMISSION_QUEUE, True):
try:
self.process_item(client, pk)
except Exception:
logger.exception("Failed to process submission")
try:
dataset_submission = DatasetSubmission.objects.get(pk=pk)
dataset_submission.status = 'error'
dataset_submission.save()
except Exception:
logger.exception("Failed to mark submission as failed")
"""
"""
def process_item(self, client, pk):
dataset_submission = DatasetSubmission.objects.get(pk=pk)
logger.info("Received submission request for %r to %r",
dataset_submission.identifier,
dataset_submission.repository.homepage)
dataset = dataset_submission.dataset
opener = openers.get_opener(dataset_submission.repository,
dataset_submission.submitting_user)
def update_status(status):
logger.debug("Status updated to %r", status)
dataset_submission.status = status
dataset_submission.save()
dataset.complete_submission(opener, dataset_submission, update_status)
logger.info("Submission completed")
"""
| mit | Python |
|
34886d13155af33acd043ddcd0d87738a729115a | Add files via upload | sheabrown/faraday_complexity | faraday_cnn.py | faraday_cnn.py | # ============================================================================
# Convolutional Neural Network for training a classifier to determine the
# complexity of a faraday spectrum.
# Written using Keras and TensorFlow by Shea Brown
# https://sheabrownastro.wordpress.com/
# https://astrophysicalmachinelearning.wordpress.com/
# ============================================================================
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (7,7) # Make the figures a bit bigger
np.random.seed(11) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution1D, MaxPooling1D, GaussianNoise
from keras.utils import np_utils
from keras import backend as K
# Function to regularize the feature vector of each sample (row)
# ---------------------------------------------------------------
def regularizeData(data):
data=np.asarray(data)
reg=(data-data.mean())/data.max() #data.max(axis=1,keepdims=True)
return reg
batch_size = 5
nb_classes = 2
nb_epoch = 5
# Load some test data
X_train=np.load('x_train.npy')
y_train=np.load('y_train.npy')
X_test=np.load('x_test.npy')
y_test=np.load('y_test.npy')
# input spectrum dimensions
spec_length = 200
# number of convolutional filters to use
nb_filters = 64
# size of pooling area for max pooling
pool_length = 2
# convolution kernel size
filter_length = 9
# the data, shuffled and split between train and test sets
#X_train, y_train, X_test, y_test = load_wtf_data()
#print("The training data shape is",X_train.shape)
#print("The training target shape is",len(y_train))
#X_train = regularizeData(X_train)
#X_test = regularizeData(X_test)
#if K.image_dim_ordering() == 'th':
# X_train = X_train.reshape(X_train.shape[0], 1, spec_length)
# X_test = X_test.reshape(X_test.shape[0], 1, spec_length)
# input_shape = (2, spec_length)
#else:
# X_train = X_train.reshape(X_train.shape[0], spec_length, 1)
# X_test = X_test.reshape(X_test.shape[0], spec_length, 1)
input_shape = (spec_length, 2)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print(Y_train)
print(Y_test)
model = Sequential()
model.add(Convolution1D(nb_filters, filter_length,
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling1D(pool_length=pool_length))
model.add(Dropout(0.5))
model.add(GaussianNoise(0.4))
model.add(Convolution1D(2*nb_filters, filter_length))
model.add(MaxPooling1D(pool_length=pool_length))
model.add(Dropout(0.6))
model.add(Convolution1D(2*nb_filters, filter_length))
model.add(MaxPooling1D(pool_length=pool_length))
model.add(Dropout(0.6))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='binary_crossentropy',
optimizer='adadelta',
metrics=['binary_accuracy'])
#model.load_weights('possum_weights', by_name=False)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
print('Saving the weights in possum_weights')
model.save_weights('wtf_weights')
# The predict_classes function outputs the highest probability class
# according to the trained classifier for each input example.
predicted_classes = model.predict_classes(X_test)
print("The shape of the predicted classes is",predicted_classes.shape)
print("Predicted classes",predicted_classes)
print("Real classes",y_test)
# Check which items we got right / wrong
correct_indices = np.nonzero(predicted_classes == y_test)[0]
incorrect_indices = np.nonzero(predicted_classes != y_test)[0]
ff=sum(predicted_classes[y_test == 0] == 0)
ft=sum(predicted_classes[y_test == 0] == 1)
tf=sum(predicted_classes[y_test == 1] == 0)
tt=sum(predicted_classes[y_test == 1] == 1)
print('The confusion matrix is')
print(ff,tf)
print(ft,tt)
| mit | Python |
|
64e04143fec40f11cc573140d53bd96765426465 | Add scripts/evt2image.py to make image from event file | liweitianux/chandra-acis-analysis,liweitianux/chandra-acis-analysis,liweitianux/chandra-acis-analysis | scripts/evt2image.py | scripts/evt2image.py | #!/usr/bin/env python3
#
# Copyright (c) 2017 Weitian LI <liweitianux@live.com>
# MIT license
"""
Make image by binning the event file, and update the manifest.
TODO: use logging module instead of print()
"""
import sys
import argparse
import subprocess
from manifest import get_manifest
from setup_pfiles import setup_pfiles
from chandra_acis import get_chips
def make_image(infile, outfile, chips, erange, fov, clobber=False):
"""
Make image by binning the event file.
Parameters
----------
infile : str
Path to the input event file
outfile : str
Filename and path of the output image file
chips : str
Chips of interest, e.g., ``7`` or ``0-3``
erange : str
Energy range of interest, e.g., ``700-7000``
fov : str
Path to the FoV file
"""
chips = chips.replace("-", ":")
erange = erange.replace("-", ":")
clobber = "yes" if clobber else "no"
fregion = "sky=region(%s[ccd_id=%s])" % (fov, chips)
fenergy = "energy=%s" % erange
fbin = "bin sky=::1"
subprocess.check_call(["punlearn", "dmcopy"])
subprocess.check_call([
"dmcopy", "infile=%s[%s][%s][%s]" % (infile, fregion, fenergy, fbin),
"outfile=%s" % outfile, "clobber=%s" % clobber
])
def main():
parser = argparse.ArgumentParser(
description="Make image by binning the event file")
parser.add_argument("--elow", dest="elow", type=int, default=700,
help="lower energy limit [eV] of the output image " +
"(default: 700 [eV])")
parser.add_argument("--ehigh", dest="ehigh", type=int,
help="upper energy limit [eV] of the output image " +
"(default: 7000 [eV])")
parser.add_argument("-i", "--infile", dest="infile",
help="event file from which to create the image " +
"(default: evt2_clean from manifest)")
parser.add_argument("-o", "--outfile", dest="outfile",
help="output image filename (default: " +
"build in format 'img_c<chip>_e<elow>-<ehigh>.fits')")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
help="show verbose information")
parser.add_argument("-C", "--clobber", dest="clobber", action="store_true",
help="overwrite existing file")
args = parser.parse_args()
setup_pfiles(["dmkeypar", "dmcopy"])
manifest = get_manifest()
fov = manifest.getpath("fov")
infile = args.infile if args.infile else manifest.getpath("evt2_clean")
chips = get_chips(infile, sep="-")
erange = "{elow}-{ehigh}".format(elow=args.elow, ehigh=args.ehigh)
if args.outfile:
outfile = args.outfile
else:
outfile = "img_c{chips}_e{erange}.fits".format(
chips=chips, elow=erange)
if args.verbose:
print("infile:", infile, file=sys.stderr)
print("outfile:", outfile, file=sys.stderr)
print("fov:", fov, file=sys.stderr)
print("chips:", chips, file=sys.stderr)
print("erange:", erange, file=sys.stderr)
make_image(infile, outfile, chips, erange, fov, args.clobber)
# Add created image to manifest
key = "img_e{erange}".format(erange=erange)
manifest.setpath(key, outfile)
if __name__ == "__main__":
main()
| mit | Python |
|
6988a498504b382fd86099d3c037100ad14c62d3 | fix bug, tpl_path is related to simiki source path, not wiki path | zhaochunqi/simiki,9p0le/simiki,tankywoo/simiki,9p0le/simiki,9p0le/simiki,tankywoo/simiki,zhaochunqi/simiki,zhaochunqi/simiki,tankywoo/simiki | simiki/configs.py | simiki/configs.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from os import path as osp
from pprint import pprint
import yaml
from simiki import utils
def parse_configs(config_file):
base_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
try:
with open(config_file, "rb") as fd:
configs = yaml.load(fd)
except yaml.YAMLError, e:
msg = "Yaml format error in {}:\n{}".format(
config_file,
unicode(str(e), "utf-8")
)
sys.exit(utils.color_msg("error", msg))
if configs["base_dir"] is None:
configs["base_dir"] = osp.dirname(osp.realpath(config_file))
configs.update(
# The directory to store markdown files
source = osp.join(configs["base_dir"], configs["source"]),
# The directory to store the generated html files
destination = osp.join(configs["base_dir"], configs["destination"]),
# The path of html template file
tpl_path = osp.join(base_dir, "simiki/themes", configs["theme"]),
)
if configs.get("url", "") is None:
configs["url"] = ""
if configs.get("keywords", "") is None:
configs["keywords"] = ""
if configs.get("description", "") is None:
configs["description"] = ""
return configs
if __name__ == "__main__":
BASE_DIR = osp.dirname(osp.dirname(osp.realpath(__file__)))
config_file = osp.join(BASE_DIR, "_config.yml")
pprint(parse_configs(config_file))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from os import path as osp
from pprint import pprint
import yaml
from simiki import utils
def parse_configs(config_file):
#base_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
try:
with open(config_file, "rb") as fd:
configs = yaml.load(fd)
except yaml.YAMLError, e:
msg = "Yaml format error in {}:\n{}".format(
config_file,
unicode(str(e), "utf-8")
)
sys.exit(utils.color_msg("error", msg))
if configs["base_dir"] is None:
configs["base_dir"] = osp.dirname(osp.realpath(config_file))
configs.update(
# The directory to store markdown files
source = osp.join(configs["base_dir"], configs["source"]),
# The directory to store the generated html files
destination = osp.join(configs["base_dir"], configs["destination"]),
# The path of html template file
tpl_path = osp.join(configs["base_dir"], "simiki/themes", configs["theme"]),
)
if configs.get("url", "") is None:
configs["url"] = ""
if configs.get("keywords", "") is None:
configs["keywords"] = ""
if configs.get("description", "") is None:
configs["description"] = ""
return configs
if __name__ == "__main__":
BASE_DIR = osp.dirname(osp.dirname(osp.realpath(__file__)))
config_file = osp.join(BASE_DIR, "_config.yml")
pprint(parse_configs(config_file))
| mit | Python |
6adae60ee018966199ee1f8e2120b2eb65dcdc9e | Add stub for registration executable. | DudLab/nanshe,DudLab/nanshe,jakirkham/nanshe,nanshe-org/nanshe,nanshe-org/nanshe,jakirkham/nanshe | nanshe/nanshe/nanshe_registerer.py | nanshe/nanshe/nanshe_registerer.py | #!/usr/bin/env python
__author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Feb 20, 2015 13:00:51 EST$"
| bsd-3-clause | Python |
|
b83c4ddb14c9ba555d187125838a5189dfb3530c | Remove six as an explicit dependency. | martijnengler/mycli,chenpingzhao/mycli,j-bennet/mycli,brewneaux/mycli,suzukaze/mycli,evook/mycli,oguzy/mycli,MnO2/rediscli,D-e-e-m-o/mycli,nkhuyu/mycli,danieljwest/mycli,suzukaze/mycli,j-bennet/mycli,jinstrive/mycli,evook/mycli,ZuoGuocai/mycli,danieljwest/mycli,chenpingzhao/mycli,MnO2/rediscli,mdsrosa/mycli,brewneaux/mycli,shoma/mycli,thanatoskira/mycli,jinstrive/mycli,ZuoGuocai/mycli,nkhuyu/mycli,thanatoskira/mycli,webwlsong/mycli,webwlsong/mycli,martijnengler/mycli,mdsrosa/mycli,oguzy/mycli,D-e-e-m-o/mycli,shoma/mycli | setup.py | setup.py | import re
import ast
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('mycli/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
description = 'CLI for MySQL Database. With auto-completion and syntax highlighting.'
setup(
name='mycli',
author='Amjith Ramanujam',
author_email='amjith[dot]r[at]gmail.com',
version=version,
license='LICENSE.txt',
url='http://mycli.net',
packages=find_packages(),
package_data={'mycli': ['myclirc', '../AUTHORS', '../SPONSORS']},
description=description,
long_description=open('README.md').read(),
install_requires=[
'click >= 4.1',
'Pygments >= 2.0', # Pygments has to be Capitalcased. WTF?
'prompt_toolkit==0.42',
'PyMySQL >= 0.6.6',
'sqlparse == 0.1.14',
'configobj >= 5.0.6',
],
entry_points='''
[console_scripts]
mycli=mycli.main:cli
''',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: SQL',
'Topic :: Database',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| import re
import ast
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('mycli/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
description = 'CLI for MySQL Database. With auto-completion and syntax highlighting.'
setup(
name='mycli',
author='Amjith Ramanujam',
author_email='amjith[dot]r[at]gmail.com',
version=version,
license='LICENSE.txt',
url='http://mycli.net',
packages=find_packages(),
package_data={'mycli': ['myclirc', '../AUTHORS', '../SPONSORS']},
description=description,
long_description=open('README.md').read(),
install_requires=[
'click >= 4.1',
'Pygments >= 2.0', # Pygments has to be Capitalcased. WTF?
'prompt_toolkit==0.42',
'PyMySQL >= 0.6.6',
'sqlparse == 0.1.14',
'six >= 1.9',
'configobj >= 5.0.6',
],
entry_points='''
[console_scripts]
mycli=mycli.main:cli
''',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: SQL',
'Topic :: Database',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| bsd-3-clause | Python |
540bf48cdca59744baf043cbfa5056b07e493429 | fix sage script to work generally over a list of account ids to produce lists of journals | DOAJ/doaj,DOAJ/doaj,DOAJ/doaj,DOAJ/doaj | portality/scripts/journals_in_doaj_by_account.py | portality/scripts/journals_in_doaj_by_account.py | from portality import models
from portality.core import app
from portality.core import es_connection
import esprit
import csv
import json
from portality.util import ipt_prefix
class JournalQuery(object):
def __init__(self, owner):
self.owner = owner
def query(self):
return {
"query":{
"filtered":{
"filter":{
"bool":{
"must":[
{"term":{"admin.owner.exact": self.owner}},
{"term" : {"admin.in_doaj" : True}}
]
}
},
"query":{
"match_all":{}
}
}
}
}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="input account list")
parser.add_argument("-o", "--out", help="output file path")
args = parser.parse_args()
if not args.out:
print("Please specify an output file path with the -o option")
parser.print_help()
exit()
if not args.input:
print("Please specify an input file path with the -i option")
parser.print_help()
exit()
# conn = esprit.raw.make_connection(None, app.config["ELASTIC_SEARCH_HOST"], None, app.config["ELASTIC_SEARCH_DB"])
conn = es_connection
with open(args.out, "w", encoding="utf-8") as f, open(args.input, "r") as g:
reader = csv.reader(g)
writer = csv.writer(f)
writer.writerow(["Name", "Account", "ID", "Title"])
for row in reader:
query = JournalQuery(row[1])
print(json.dumps(query.query()))
count = 0
for j in esprit.tasks.scroll(conn, ipt_prefix(models.Journal.__type__), q=query.query(), limit=800, keepalive='5m'):
journal = models.Journal(_source=j)
bibjson = journal.bibjson()
writer.writerow([row[0], row[1], journal.id, bibjson.title])
count += 1
print(count) | apache-2.0 | Python |
|
4d139c6d2b9ea368bfc5189537d9af67cea582f6 | Create demo_Take_Photo_when_PIR_high.py | nksheridan/elephantAI,nksheridan/elephantAI | demo_Take_Photo_when_PIR_high.py | demo_Take_Photo_when_PIR_high.py | import time
import picamera
import datetime
import RPi.GPIO as GPIO
def CheckPIR():
# dependencies are RPi.GPIO and time
# returns whats_here with "NOTHING HERE" or "SOMETHING HERE"
time.sleep(1)
#don't rush the PIR!
GPIO.setmode(GPIO.BOARD)
# set numbering system for GPIO PINs are BOARD
GPIO.setup(7, GPIO.IN)
# set up number 7 PIN for input from the PIR
# need to adjust if you connected PIR to another GPIO PIN
try:
val = GPIO.input(7)
if (val == True):
PIR_IS = 1
#PIR returned HIGH to GPIO PIN, so something here!
if (val == False):
PIR_IS = 0
#PIR returned LOW to GPIO PIN, so something here!
GPIO.cleanup()
except:
GPIO.cleanup()
return PIR_IS
PIR = 1
count = 0
while True:
PIR = 0
#Now to check the PIR and send what it returns to PIR
PIR = CheckPIR()
if PIR == 0:
print("Nothing has been detected by PIR")
elif PIR == 1:
print("Something has been seen! Time to photograph it!")
i = 0
with picamera.PiCamera() as camera:
while i < 5:
i = i+1
print(i)
camera.start_preview()
time.sleep(1)
utc_datetime = datetime.datetime.utcnow()
utc_datetime.strftime("%Y-%m-%d-%H%MZ")
#get date and time so we can append it to the image filename
camera.capture('image_'+str(utc_datetime)+'.jpg')
camera.stop_preview()
time.sleep(1)
if i == 5:
break
| mit | Python |
|
feeb386efe01fb3dd4e70e216337c8a4b476cb9a | Add setup.py | JaviMerino/bart,ARM-software/bart | setup.py | setup.py | #!/usr/bin/env python
# Copyright 2015-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setuptools import setup, find_packages
VERSION = "1.0.0"
LONG_DESCRIPTION = """Behavioural Analysis involves the expressing the general
expectation of the state of the system while targeting a single or set of heuristics.
This is particularly helpful when there are large number of factors that can change
the behaviour of the system and testing all permutations of these input parameters
is impossible. In such a scenario an assertion of the final expectation can be
useful in managing performance and regression.
The Behavioural Analysis and Regression Toolkit is based on TRAPpy. The primary goal is
to assert behaviours using the FTrace output from the kernel
"""
REQUIRES = [
"TRAPpy==1.0.0",
]
setup(name='BART',
version=VERSION,
license="Apache v2",
author="ARM-BART",
author_email="bart@arm.com",
description="Behavioural Analysis and Regression Toolkit",
long_description=LONG_DESCRIPTION,
url="http://arm-software.github.io/bart",
packages=find_packages(),
include_package_data=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Environment :: Console",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
# As we depend on trace data from the Linux Kernel/FTrace
"Topic :: System :: Operating System Kernels :: Linux",
"Topic :: Scientific/Engineering :: Visualization"
],
install_requires=REQUIRES
)
| apache-2.0 | Python |
|
4a4231976f2f084c1233e3efe27f5d18b486f146 | Create setup.py | jthomm/game-center-db | setup.py | setup.py | from setuptools import setup
import re
name = 'gcdb'
version = ''
with open('{0}/__init__.py'.format(name), 'rb') as f:
match_object = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
f.read(),
re.MULTILINE)
version = match_object.group(1)
setup(
name=name,
version=version,
packages=[name],
entry_points={'console_scripts': ['gcdb = gcdb:main']},
)
| mit | Python |
|
3c314d006fb1726b671d0223f08fe16f0944cd82 | test call started sla | ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend | cla_backend/apps/reports/tests/test_mi_sla_report.py | cla_backend/apps/reports/tests/test_mi_sla_report.py | # -*- coding: utf-8 -*-
from contextlib import contextmanager
import datetime
from django.test import TestCase
from legalaid.forms import get_sla_time
import mock
from core.tests.mommy_utils import make_recipe, make_user
from cla_eventlog import event_registry
from cla_eventlog.models import Log
from reports.forms import MICB1Extract
@contextmanager
def patch_field(cls, field_name, dt):
field = cls._meta.get_field(field_name)
mock_now = lambda: dt
with mock.patch.object(field, 'default', new=mock_now):
yield
class MiSlaTestCase(TestCase):
def test_call_started_sla(self):
with patch_field(Log, 'created', datetime.datetime(2015, 1, 2, 9, 0, 0)):
case = make_recipe('legalaid.case')
user = make_user()
make_recipe('call_centre.operator', user=user)
event = event_registry.get_event('call_me_back')()
_dt = datetime.datetime(2015, 1, 2, 9, 1, 0)
with patch_field(Log, 'created', datetime.datetime(2015, 1, 2, 9, 1, 0)):
event.get_log_code(case=case)
event.process(
case, created_by=user,
notes='',
context={
'requires_action_at': _dt,
'sla_15': get_sla_time(_dt, 15),
'sla_30': get_sla_time(_dt, 30),
'sla_120': get_sla_time(_dt, 120),
'sla_480': get_sla_time(_dt, 480)
},
)
case.requires_action_at = datetime.datetime(2015, 1, 2, 9, 1, 0)
case.save()
event = event_registry.get_event('case')()
with patch_field(Log, 'created', datetime.datetime(2015, 1, 2, 9, 30, 0)):
event.process(
case, status='call_started', created_by=user,
notes='Call started'
)
date_range = (
datetime.datetime(2015, 1, 1),
datetime.datetime(2015, 2, 1)
)
with mock.patch('reports.forms.MICB1Extract.date_range', date_range):
report = MICB1Extract()
qs = report.get_queryset()
self.assertFalse(qs[0][28])
| mit | Python |
|
26cbfe83f0047c8ce66a21237db8ae484736a085 | Add TensorboardLogs class for use as a proxy to tensorboard data. | tsoontornwutikul/mlxm | helpers/tensorboard.py | helpers/tensorboard.py | import glob
import numpy as np
import os
from tensorflow.tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from . import get_first_existing_path, get_nth_matching_path
from ..experiments import Experiment
class TensorboardLogs(object):
def __init__(self, path):
self.path = path
self.ea = EventAccumulator(self.path)
self.ea.Reload()
def get_scalars(self, name):
events = self.ea.Scalars(name)
scalars = np.array([(event.wall_time, event.step, event.value) for event in events])
return (scalars[:,0], scalars[:,1].astype('int'), scalars[:,2])
def find_log_path(config, main_path=None):
config.define('path.result.main.base', 'path.result.base', default='')
config.define('path.result.main.relative', 'path.result.relative', default='')
config.define('path.result.tensorboard.base', 'path.result.base.tensorboard', default='')
config.define('path.result.tensorboard.relative', 'path.result.relative.tensorboard', default='')
candidates = [os.path.join(config('path.result.tensorboard.base'), config('path.result.tensorboard.relative')),
os.path.join(config('path.result.main.base').replace('experiment', 'experiment-tb'), config('path.result.tensorboard.relative')),
os.path.join(Experiment.DEFAULT_TENSORBOARD_ROOT, config('path.result.tensorboard.relative')),
get_nth_matching_path(os.path.join(config('path.result.tensorboard.base'), config('path.result.main.relative')) + '@*', -1, ''),
get_nth_matching_path(os.path.join(config('path.result.main.base').replace('experiment', 'experiment-tb'), config('path.result.main.relative')) + '@*', -1, ''),
get_nth_matching_path(os.path.join(Experiment.DEFAULT_TENSORBOARD_ROOT, config('path.result.main.relative')) + '@*', -1, '')]
if main_path:
candidates.append(get_nth_matching_path(glob.escape(main_path.replace('experiment','experiment-tb')) + '@*', -1, ''))
path = get_first_existing_path(*candidates)
if not path:
raise FileNotFoundError('Tensorboard log directory is not found.')
return path | mit | Python |
|
c184e79b91a63299c249e207dba1e8cd95a8e5d0 | Add fpocket (#12675) | iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/fpocket/package.py | var/spack/repos/builtin/packages/fpocket/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Fpocket(MakefilePackage):
"""fpocket is a very fast open source protein pocket detection algorithm
based on Voronoi tessellation."""
homepage = "https://github.com/Discngine/fpocket"
version('master', branch='master',
git='https://github.com/Discngine/fpocket.git')
depends_on("netcdf")
def setup_environment(self, spack_env, run_env):
if self.compiler.name == 'gcc':
spack_env.set('CXX', 'g++')
def edit(self):
makefile = FileFilter('makefile')
makefile.filter('BINDIR .*', 'BINDIR = %s/bin' % self.prefix)
makefile.filter('MANDIR .*', 'MANDIR = %s/man/man8' % self.prefix)
| lgpl-2.1 | Python |
|
fb6eee18b2bf48dd0063623515ced00e980bdf10 | Add a few tests for docparse. | carolFrohlich/nipype,carolFrohlich/nipype,sgiavasis/nipype,gerddie/nipype,christianbrodbeck/nipype,glatard/nipype,grlee77/nipype,satra/NiPypeold,arokem/nipype,wanderine/nipype,mick-d/nipype,carlohamalainen/nipype,rameshvs/nipype,dgellis90/nipype,blakedewey/nipype,glatard/nipype,gerddie/nipype,grlee77/nipype,Leoniela/nipype,dgellis90/nipype,mick-d/nipype_source,iglpdc/nipype,iglpdc/nipype,arokem/nipype,fprados/nipype,satra/NiPypeold,dmordom/nipype,mick-d/nipype_source,Leoniela/nipype,Leoniela/nipype,sgiavasis/nipype,christianbrodbeck/nipype,mick-d/nipype_source,gerddie/nipype,rameshvs/nipype,carolFrohlich/nipype,wanderine/nipype,pearsonlab/nipype,JohnGriffiths/nipype,wanderine/nipype,arokem/nipype,fprados/nipype,wanderine/nipype,blakedewey/nipype,JohnGriffiths/nipype,iglpdc/nipype,blakedewey/nipype,pearsonlab/nipype,dgellis90/nipype,glatard/nipype,dgellis90/nipype,gerddie/nipype,FredLoney/nipype,FCP-INDI/nipype,rameshvs/nipype,FredLoney/nipype,arokem/nipype,dmordom/nipype,dmordom/nipype,sgiavasis/nipype,carlohamalainen/nipype,grlee77/nipype,sgiavasis/nipype,JohnGriffiths/nipype,carolFrohlich/nipype,mick-d/nipype,pearsonlab/nipype,FCP-INDI/nipype,FCP-INDI/nipype,iglpdc/nipype,grlee77/nipype,fprados/nipype,pearsonlab/nipype,mick-d/nipype,FredLoney/nipype,mick-d/nipype,carlohamalainen/nipype,FCP-INDI/nipype,glatard/nipype,JohnGriffiths/nipype,rameshvs/nipype,blakedewey/nipype | nipype/utils/tests/test_docparse.py | nipype/utils/tests/test_docparse.py | from nipype.testing import *
from nipype.utils.docparse import reverse_opt_map, build_doc
class Foo(object):
opt_map = {'outline': '-o', 'fun': '-f %.2f', 'flags': '%s'}
foo_doc = """Usage: foo infile outfile [opts]
Bunch of options:
-o something about an outline
-f <f> intensity of fun factor
Other stuff:
-v verbose
"""
fmtd_doc = """Parameters
----------
outline :
something about an outline
fun :
<f> intensity of fun factor
Others Parameters
-----------------
-v verbose"""
def test_rev_opt_map():
map = {'-f': 'fun', '-o': 'outline'}
rev_map = reverse_opt_map(Foo.opt_map)
assert_equal(rev_map, map)
def test_build_doc():
opts = reverse_opt_map(Foo.opt_map)
doc = build_doc(foo_doc, opts)
assert_equal(doc, fmtd_doc)
| bsd-3-clause | Python |
|
e71742bc0fc09ebf37532b92458670a4efe8926b | Add setup file | roverdotcom/django-device-notifications | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name='django-device-notifications',
version='0.0.1',
description='Generic library for APN & GCM notifications',
author='Johann Heller',
author_email='johann@rover.com',
url='https://github.com/roverdotcom/django-device-notifications',
packages=find_packages(exclude=('tests', 'docs'))
)
| bsd-3-clause | Python |
|
ec54935e169019067f2179a92d0f6e833f133bc9 | add a DataContainer implemented as a subclass of dict | simphony/simphony-common | simphony/core/data_container.py | simphony/core/data_container.py | from collections import Mapping
from simphony.core.cuba import CUBA
_ERROR_MESSAGE = "Keys {!r} are not in the approved CUBA keywords"
_CUBA_KEYS = set(CUBA)
class DataContainer(dict):
""" A DataContainer instance
The DataContainer object is implemented as a python dictionary whose keys
are restricted to be members of the CUBA enum class.
"""
# Memory usage optimization.
__slots__ = ()
def __init__(self, *args, **kwards):
""" Contructor.
Initialization follows the behaviour of the python dict class.
"""
self._check_arguments(args, kwards)
if len(args) == 1 and not hasattr(args[0], 'keys'):
super(DataContainer, self).__init__(**kwards)
for key, value in args[0]:
self.__setitem__(key, value)
return
super(DataContainer, self).__init__(*args, **kwards)
def __setitem__(self, key, value):
""" Set/Update the key value only when
"""
if key in _CUBA_KEYS:
super(DataContainer, self).__setitem__(key, value)
else:
message = "Key {!r} is not in the approved CUBA keywords"
raise KeyError(message.format(key))
def update(self, *args, **kwards):
self._check_arguments(args, kwards)
if len(args) == 1 and not hasattr(args[0], 'keys'):
for key, value in argument:
self.__setitem__(key, value)
return
super(DataContainer, self).update(*args, **kwards)
def _check_arguments(self, args, kwards):
""" Check for the right arguments
"""
# See if there are any non CUBA keys in the mapping argument
non_cuba_keys = kwards.viewkeys() - _CUBA_KEYS
if len(non_cuba_keys) > 0:
raise KeyError(_ERROR_MESSAGE.format(non_cuba_keys))
if len(args) == 1:
argument = args[0]
if isinstance(argument, DataContainer):
# This is already a DataContainer so we are sure that
# it only contains CUBA keys.
return
if isinstance(argument, Mapping):
# See if there any non CUBA keys in the mapping argument
non_cuba_keys = set(argument.keys()) - _CUBA_KEYS
if len(non_cuba_keys) > 0:
raise KeyError(_ERROR_MESSAGE.format(non_cuba_keys))
| bsd-2-clause | Python |
|
4912bac4ab534ca942393c36f71dd7df4182eb94 | add test_dot.py | yashsharan/sympy,sahilshekhawat/sympy,dqnykamp/sympy,jbbskinny/sympy,debugger22/sympy,Mitchkoens/sympy,toolforger/sympy,VaibhavAgarwalVA/sympy,drufat/sympy,hrashk/sympy,cccfran/sympy,postvakje/sympy,MridulS/sympy,lidavidm/sympy,ga7g08/sympy,pandeyadarsh/sympy,atreyv/sympy,madan96/sympy,lindsayad/sympy,Arafatk/sympy,pbrady/sympy,emon10005/sympy,oliverlee/sympy,madan96/sympy,souravsingh/sympy,wanglongqi/sympy,kaichogami/sympy,bukzor/sympy,kumarkrishna/sympy,bukzor/sympy,mafiya69/sympy,garvitr/sympy,Sumith1896/sympy,Gadal/sympy,asm666/sympy,sahmed95/sympy,Designist/sympy,beni55/sympy,yukoba/sympy,hrashk/sympy,meghana1995/sympy,hrashk/sympy,Mitchkoens/sympy,souravsingh/sympy,sampadsaha5/sympy,sampadsaha5/sympy,ahhda/sympy,asm666/sympy,Curious72/sympy,AkademieOlympia/sympy,aktech/sympy,pandeyadarsh/sympy,liangjiaxing/sympy,wyom/sympy,vipulroxx/sympy,jaimahajan1997/sympy,Arafatk/sympy,Vishluck/sympy,jbbskinny/sympy,grevutiu-gabriel/sympy,jbbskinny/sympy,AkademieOlympia/sympy,kumarkrishna/sympy,Vishluck/sympy,farhaanbukhsh/sympy,lidavidm/sympy,moble/sympy,skirpichev/omg,Gadal/sympy,atreyv/sympy,bukzor/sympy,liangjiaxing/sympy,skidzo/sympy,kaushik94/sympy,Davidjohnwilson/sympy,Arafatk/sympy,sunny94/temp,farhaanbukhsh/sympy,MechCoder/sympy,shipci/sympy,skidzo/sympy,dqnykamp/sympy,sahilshekhawat/sympy,iamutkarshtiwari/sympy,kaushik94/sympy,Titan-C/sympy,toolforger/sympy,MechCoder/sympy,aktech/sympy,lindsayad/sympy,maniteja123/sympy,postvakje/sympy,jamesblunt/sympy,jerli/sympy,shipci/sympy,yashsharan/sympy,kevalds51/sympy,MridulS/sympy,kumarkrishna/sympy,AunShiLord/sympy,drufat/sympy,Designist/sympy,Shaswat27/sympy,yukoba/sympy,iamutkarshtiwari/sympy,shikil/sympy,Shaswat27/sympy,Sumith1896/sympy,kaichogami/sympy,farhaanbukhsh/sympy,saurabhjn76/sympy,moble/sympy,Curious72/sympy,kevalds51/sympy,shikil/sympy,yukoba/sympy,drufat/sympy,Gadal/sympy,ga7g08/sympy,hargup/sympy,VaibhavAgarwalVA/sympy,debugger22/sympy,kaushik94/sympy,mafiya69/sympy,rahuldan/sympy,ga7g08/sympy,chaffra/sympy,cccfran/sympy,garvitr/sympy,chaffra/sympy,Shaswat27/sympy,beni55/sympy,pbrady/sympy,toolforger/sympy,kmacinnis/sympy,kevalds51/sympy,jamesblunt/sympy,wyom/sympy,AunShiLord/sympy,ChristinaZografou/sympy,abhiii5459/sympy,wyom/sympy,asm666/sympy,wanglongqi/sympy,dqnykamp/sympy,mcdaniel67/sympy,madan96/sympy,atsao72/sympy,iamutkarshtiwari/sympy,wanglongqi/sympy,cswiercz/sympy,meghana1995/sympy,kmacinnis/sympy,yashsharan/sympy,cswiercz/sympy,oliverlee/sympy,lindsayad/sympy,ahhda/sympy,Davidjohnwilson/sympy,emon10005/sympy,atreyv/sympy,Sumith1896/sympy,Mitchkoens/sympy,mcdaniel67/sympy,Titan-C/sympy,abhiii5459/sympy,garvitr/sympy,MridulS/sympy,Vishluck/sympy,vipulroxx/sympy,cccfran/sympy,kmacinnis/sympy,maniteja123/sympy,Davidjohnwilson/sympy,hargup/sympy,kaichogami/sympy,sahilshekhawat/sympy,ahhda/sympy,sunny94/temp,abloomston/sympy,moble/sympy,saurabhjn76/sympy,atsao72/sympy,pbrady/sympy,aktech/sympy,shipci/sympy,jaimahajan1997/sympy,mcdaniel67/sympy,maniteja123/sympy,emon10005/sympy,AkademieOlympia/sympy,beni55/sympy,mafiya69/sympy,vipulroxx/sympy,Titan-C/sympy,oliverlee/sympy,abloomston/sympy,AunShiLord/sympy,souravsingh/sympy,grevutiu-gabriel/sympy,MechCoder/sympy,Designist/sympy,lidavidm/sympy,Curious72/sympy,sahmed95/sympy,jerli/sympy,cswiercz/sympy,grevutiu-gabriel/sympy,meghana1995/sympy,skidzo/sympy,jamesblunt/sympy,hargup/sympy,rahuldan/sympy,abloomston/sympy,rahuldan/sympy,debugger22/sympy,sahmed95/sympy,pandeyadarsh/sympy,ChristinaZografou/sympy,atsao72/sympy,liangjiaxing/sympy,sampadsaha5/sympy,jerli/sympy,sunny94/temp,jaimahajan1997/sympy,diofant/diofant,chaffra/sympy,abhiii5459/sympy,saurabhjn76/sympy,shikil/sympy,VaibhavAgarwalVA/sympy,postvakje/sympy,ChristinaZografou/sympy | sympy/printing/tests/test_dot.py | sympy/printing/tests/test_dot.py | from sympy.printing.dot import (purestr, styleof, attrprint, dotnode,
dotedges, dotprint)
from sympy import Symbol, Integer, Basic, Expr
from sympy.abc import x
def test_purestr():
assert purestr(Symbol('x')) == "Symbol(x)"
assert purestr(Basic(1, 2)) == "Basic(1, 2)"
def test_styleof():
styles = [(Basic, {'color': 'blue', 'shape': 'ellipse'}),
(Expr, {'color': 'black'})]
assert styleof(Basic(1), styles) == {'color': 'blue', 'shape': 'ellipse'}
x = Symbol('x')
assert styleof(x + 1, styles) == {'color': 'black', 'shape': 'ellipse'}
def test_attrprint():
assert attrprint({'color': 'blue', 'shape': 'ellipse'}) == \
'"color"="blue", "shape"="ellipse"'
def test_dotnode():
assert dotnode(x) ==\
'"Symbol(x)" ["color"="black", "label"="x", "shape"="ellipse"];'
assert dotnode(x+2) == \
'"Add(Integer(2), Symbol(x))" ["color"="black", "label"="Add", "shape"="ellipse"];'
def test_dotedges():
assert sorted(dotedges(x+2)) == [
'"Add(Integer(2), Symbol(x))" -> "Integer(2)";',
'"Add(Integer(2), Symbol(x))" -> "Symbol(x)";'
]
def test_dotprint():
text = dotprint(x+2)
assert all(e in text for e in dotedges(x+2))
assert all(n in text for n in map(dotnode, (x, Integer(2), x+2)))
assert 'digraph' in text
| bsd-3-clause | Python |
|
4567a9810b8c9abdb450a442c892dbdb4eecf0e0 | Add test.py to test gsutil in pantheon | googleinterns/automated-windows-vms,googleinterns/automated-windows-vms | vm_server/accept/test.py | vm_server/accept/test.py | from google.cloud import storage
bucket_name = "automation-interns"
destination_file_name = ("./text.txt")
source_blob_name = "test/text_file.txt"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name) | apache-2.0 | Python |
|
a43acda7271c3fc48a82552721aec1332e9892d6 | Create OpticalDensityInv.py | DigitalSlideArchive/HistomicsTK,DigitalSlideArchive/HistomicsTK | OpticalDensityInv.py | OpticalDensityInv.py | import numpy
def OpticalDensityInv( I ):
'''
Transforms input RGB image "I" into optical density space for color deconvolution.
*Inputs:
I (rgbimage) - a floating-point image of optical density values obtained
from OpticalDensityFwd.
*Outputs:
Out (rgbimage) - a floating-point multi-channel intensity image with
values in range 0-255.
*Related functions:
OpticalDensityFwd, ColorDeconvolution, ColorConvolution
'''
return numpy.exp(-(I - 255)*numpy.log(255)/255);
| apache-2.0 | Python |
|
fa049b79c24f8213fa9335a31a34c354faf67459 | Add exmaple about proving equivalence of exprs | JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton | src/examples/python/proving_equivalence.py | src/examples/python/proving_equivalence.py | #!/usr/bin/env python
## -*- coding: utf-8 -*-
##
## $ python ./proving equivalence.py
## True
## True
## True
## True
## True
## True
## True
## True
## True
## True
## True
## True
## True
## True
##
import sys
from triton import *
def prove(ctx, n):
ast = ctx.getAstContext()
if ctx.isSat(ast.lnot(n)) == True:
return False
return True
if __name__ == '__main__':
ctx = TritonContext(ARCH.X86_64)
ast = ctx.getAstContext()
ctx.setAstRepresentationMode(AST_REPRESENTATION.PYTHON)
x = ast.variable(ctx.newSymbolicVariable(8, 'x'))
y = ast.variable(ctx.newSymbolicVariable(8, 'y'))
# MBA coming from VMProtect https://whereisr0da.github.io/blog/posts/2021-02-16-vmp-3/
# To detect their equivalence you can synthesize them (see synthesizing_obfuscated_expressions.py)
# Then you can confirm the synthesized output with this example
print(prove(ctx, x ^ y == (~(~(x) & ~(y)) & ~(~(~(x)) & ~(~(y))))))
print(prove(ctx, x + y == ((~(~(x)) & ~(~(y))) + (~(~(x)) | ~(~(y))))))
print(prove(ctx, x + y == ((~(~(y)) | ~(~(x))) + ~(~(x)) - (~(~(x)) & ~(~(~(y)))))))
print(prove(ctx, x + y == ((~(~(x)) | ~(~(y))) + (~(~(~(x))) | ~(~(y))) - (~(~(~(x)))))))
print(prove(ctx, x + y == ((~(~(x)) | ~(~(y))) + ~(~(y)) - (~(~(~(x))) & ~(~(y))))))
print(prove(ctx, x + y == (~(~(y)) + (~(~(x)) & ~(~(~(y)))) + (~(~(x)) & ~(~(y))))))
print(prove(ctx, x - y == (~(~(x) + y))))
print(prove(ctx, ~((x | y) - x) == (~(((~(~(x)) | y) - (~(~(x))))))))
print(prove(ctx, x - y == (~((~(x) & ~(x)) + y) & ~((~(x) & ~(x)) + y))))
print(prove(ctx, x & y == ((~(~(x)) | y) - (~(~(~(x))) & y) - (~(~(x)) & ~y))))
print(prove(ctx, x & y == ((~(~(~(x))) | y) - (~(~(~(x)))))))
print(prove(ctx, x | y == ((~(~(x)) & ~(y)) + y)))
print(prove(ctx, x | y == (((~(~(x)) & ~(y)) & y) + ((~(~(x)) & ~(y)) | y))))
print(prove(ctx, x + y == ((~(~(x)) & ~(~(y))) + (~(~(x)) | ~(~(y))))))
sys.exit(0)
| apache-2.0 | Python |
|
3c997e3a9eb92c3053c521f6c2fff6cfdf99c126 | add setup.py | olin-computing/assignment-dashboard,osteele/assignment-dashboard,osteele/assignment-dashboard,olin-computing/assignment-dashboard,olin-computing/assignment-dashboard | setup.py | setup.py | # noqa: D100
import os
import re
from setuptools import setup
requirements_txt = open(os.path.join(os.path.dirname(__file__), 'requirements.txt')).read()
requirements = re.findall(r'^([^\s#]+)', requirements_txt, re.M)
setup(name='assignment_dashboard',
packages=['assignment_dashboard'],
include_package_data=True,
version='0.1',
description="A web app that inspects forks of an GitHub assignment repo",
long_description="Display the a GitHub repo's forks, by file, and collate Jupyter notebooks",
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3'
'Programming Language :: Python :: 3.5'
],
url='http://github.com/osteele/assignment-dashboard',
author='Oliver Steele',
author_email='steele@osteele.com',
license='MIT',
install_requires=requirements
)
| mit | Python |
|
11cf7dd63f8fe7453057ef0846d4e645fa05f124 | Add setuptools setup.py | matwey/pybeam | setup.py | setup.py | from setuptools import setup
setup(name='pybeam',
version='0.1',
description='Python module to parse Erlang BEAM files',
url='http://github.com/matwey/pybeam',
author='Matwey V. Kornilov',
author_email='matwey.kornilov@gmail.com',
license='MIT',
packages=['pybeam'],
install_requires=['construct'],
zip_safe=False)
| mit | Python |
|
555dac76a8810cfeaae96f8de04e9eb3362a3314 | Remove old notification status column | alphagov/notifications-api,alphagov/notifications-api | migrations/versions/0109_rem_old_noti_status.py | migrations/versions/0109_rem_old_noti_status.py | """
Revision ID: 0109_rem_old_noti_status
Revises: 0108_change_logo_not_nullable
Create Date: 2017-07-10 14:25:15.712055
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0109_rem_old_noti_status'
down_revision = '0108_change_logo_not_nullable'
def upgrade():
op.drop_column('notification_history', 'status')
op.drop_column('notifications', 'status')
def downgrade():
op.add_column(
'notifications',
sa.Column(
'status',
postgresql.ENUM(
'created', 'sending', 'delivered', 'pending', 'failed', 'technical-failure',
'temporary-failure', 'permanent-failure', 'sent', name='notify_status_type'
),
autoincrement=False,
nullable=True
)
)
op.add_column(
'notification_history',
sa.Column(
'status',
postgresql.ENUM(
'created', 'sending', 'delivered', 'pending', 'failed', 'technical-failure',
'temporary-failure', 'permanent-failure', 'sent', name='notify_status_type'
),
autoincrement=False,
nullable=True
)
)
| mit | Python |
|
21a67556b83b7905134439d55afe33c35e4b3422 | Add an index on notifications for (service_id, created_at) to improve the performance of the notification queries. We've already performed this update on production since you need to create the index concurrently, which is not allowed from the alembic script. For that reason we are checking if the index exists. | alphagov/notifications-api,alphagov/notifications-api | migrations/versions/0246_notifications_index.py | migrations/versions/0246_notifications_index.py | """
Revision ID: 0246_notifications_index
Revises: 0245_archived_flag_jobs
Create Date: 2018-12-12 12:00:09.770775
"""
from alembic import op
revision = '0246_notifications_index'
down_revision = '0245_archived_flag_jobs'
def upgrade():
conn = op.get_bind()
conn.execute(
"CREATE INDEX IF NOT EXISTS ix_notifications_service_created_at ON notifications (service_id, created_at)"
)
def downgrade():
conn = op.get_bind()
conn.execute(
"DROP INDEX IF EXISTS ix_notifications_service_created_at"
)
| mit | Python |
|
86b2f32bd212a14e904b9823fbf543b321f46ca7 | Add very basic setup.py | takluyver/astcheck | setup.py | setup.py | from distutils.core import setup
setup(name='astcheck',
version='0.1',
py_modules=['astcheck'],
) | mit | Python |
|
5acc7d50cbe199af49aece28b95ea97484ae31c7 | Add solution class for Ghia et al. (1982) | mesnardo/snake | snake/solutions/ghiaEtAl1982.py | snake/solutions/ghiaEtAl1982.py | """
Implementation of the class `GhiaEtAl1982` that reads the centerline velocities
reported in Ghia et al. (1982).
_References:_
* Ghia, U. K. N. G., Ghia, K. N., & Shin, C. T. (1982).
High-Re solutions for incompressible flow using the Navier-Stokes equations
and a multigrid method.
Journal of computational physics, 48(3), 387-411.
"""
import os
import numpy
class GhiaEtAl1982(object):
"""
Container to store results from Ghia et al. (1982).
"""
def __init__(self, Re=None, file_path=None):
"""
Initialization.
Parameters
----------
Re: float, optional
Desired Reynolds number;
default: None.
file_path: string, optional
Path of the file containing the validation data;
default: None.
"""
self.y, self.u = None, None
self.x, self.v = None, None
if Re:
self.read_centerline_velocities(Re, file_path=file_path)
def read_centerline_velocities(self, Re, file_path=None):
"""
Reads the centerline velocities from file and for a given Reynolds number.
Parameters
----------
Re: float
Desired Reynolds number.
file_path: string, optional
Path of the file containing the validation data;
default: None (will be read the file located in `resources` directory of
the `snake` package).
"""
if not file_path:
file_path = os.path.join(os.environ['SNAKE'],
'resources',
'validationData',
'ghia_et_al_1982_lid_driven_cavity.dat')
Re = str(int(round(Re)))
# column indices in file with experimental results
cols = {'100': {'u': 1, 'v': 7},
'1000': {'u': 2, 'v': 8},
'3200': {'u': 3, 'v': 9},
'5000': {'u': 4, 'v': 10},
'10000': {'u': 5, 'v': 11}}
with open(file_path, 'r') as infile:
y, u, x, v = numpy.loadtxt(infile,
dtype=float,
usecols=(0, cols[Re]['u'], 6, cols[Re]['v']),
unpack=True)
self.y, self.u = y, u
self.x, self.v = x, v
| mit | Python |
|
a893a8f9375164cbbec4e276ae73f181f74fd9ae | create image,py | piraaa/VideoDigitalWatermarking | src/image.py | src/image.py | #
# image.py
# Created by pira on 2017/07/28.
#
#coding: utf-8 | mit | Python |
|
14068a2e3ca445c02895aed38420baf846338aae | Add smile detection example script. | iabdalkader/openmv,iabdalkader/openmv,iabdalkader/openmv,iabdalkader/openmv,openmv/openmv,kwagyeman/openmv,openmv/openmv,kwagyeman/openmv,kwagyeman/openmv,openmv/openmv,kwagyeman/openmv,openmv/openmv | scripts/examples/25-Machine-Learning/nn_haar_smile_detection.py | scripts/examples/25-Machine-Learning/nn_haar_smile_detection.py | # Simle detection using Haar Cascade + CNN.
import sensor, time, image, os, nn
sensor.reset() # Reset and initialize the sensor.
sensor.set_contrast(2)
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565
sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False)
# Load smile detection network
net = nn.load('/smile.network')
# Load Face Haar Cascade
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
# FPS clock
clock = time.clock()
while (True):
clock.tick()
# Capture snapshot
img = sensor.snapshot()
# Find faces.
objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)
# Detect smiles
for r in objects:
img.draw_rectangle(r)
out = net.forward(img, roi=r, softmax=True)
img.draw_string(r[0], r[1], ':)' if (out[0]/127 > 0.8) else ':(', color=(255), scale=2)
print(clock.fps())
| mit | Python |
|
b31e7a3471daefb79b1d63a433c480cf51b75745 | Create __init__.py | TryCatchHCF/DumpsterFire | FireModules/FileDownloads/AccountBruting/__init__.py | FireModules/FileDownloads/AccountBruting/__init__.py | mit | Python |
||
7a4df9d8c385ed53e29e5171c115939920a271b3 | Add a setup.py script | kinverarity1/python-csv-utility | setup.py | setup.py | # Use the setuptools package if it is available. It's preferred
# because it creates an exe file on Windows for Python scripts.
try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
setup(name='csv_util',
entry_points={'console_scripts': [
# 'EXECUTABLE_NAME = csv_util.scripts.script_module_name:entry_function_name'
]
}) | mit | Python |
|
1e7548a5b237f18c3bf5918a2254d04125492372 | Add setup script | yehzhang/RapidTest,yehzhang/RapidTest | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='rapidtest',
version='0.1',
author='Simon Zhang',
license='MIT',
packages=find_packages(),
install_requires=[])
| mit | Python |
|
61fcca809b31372bb5e793359df243cff5ee23cf | Add the setup.py file | fedora-infra/fedmsg-fasclient | setup.py | setup.py | # -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='fedmsg_fasclient',
version='0.1',
description='A fedmsg consumer that runs the fasClient based on fedmsg FAS messages',
license="LGPLv2+",
author='Janez Nemanič, Ralph Bean and Pierre-Yves Chibon',
author_email='admin@fedoraproject.org',
url='https://github.com/fedora-infra/fedmsg-fasclient',
install_requires=["fedmsg"],
packages=[],
py_modules=['fedmsg_fasclient'],
entry_points="""
[moksha.consumer]
fedmsg_fasclient = fedmsg_fasclient:FasClientConsumer
""",
)
| lgpl-2.1 | Python |
|
139123ddb81eec12d0f932ff6ff73aadb4b418cc | Add decorator to make a Node class from a regular function | vitorio/ocropodium,vitorio/ocropodium,vitorio/ocropodium,vitorio/ocropodium | ocradmin/lib/nodetree/decorators.py | ocradmin/lib/nodetree/decorators.py | """
Nodetree decorators.
"""
import inspect
import textwrap
import node
def underscore_to_camelcase(value):
def camelcase():
yield str.lower
while True:
yield str.capitalize
c = camelcase()
return "".join(c.next()(x) if x else '_' for x in value.split("_"))
def upper_camelcase(value):
value = underscore_to_camelcase(value)
return value[0].capitalize() + value[1:]
class makenode(object):
"""Decorate for constructing a node out
of a single function."""
def __init__(self, intypes, outtype, **kwargs):
self.intypes = intypes
self.outtype = outtype
self.kwargs = kwargs
def __call__(self, fun):
argspec = inspect.getargspec(fun)
def _eval(self):
args = [self.eval_input(i) for i in range(len(argspec.args))]
return fun(*args)
doc = fun.__doc__ if not fun.__doc__ is None \
else "No description provided"
clsname = upper_camelcase(fun.__name__)
ns = upper_camelcase(fun.__module__.split(".")[-1])
clsdict = dict(
__module__ = fun.__module__,
__doc__ = doc,
_eval = _eval,
arity = len(self.intypes),
intypes = self.intypes,
outtype = self.outtype,
description = textwrap.dedent(fun.__doc__),
name = "%s::%s" % (ns, clsname),
)
clsdict.update(self.kwargs)
return type(clsname + "Node", (node.Node,), clsdict)()
| apache-2.0 | Python |
|
fe7f07cbd9ff9844efa2b191a900f6efb9de576e | add db model file | taoalpha/XMate,taoalpha/XMate,taoalpha/XMate,taoalpha/XMate | model/db.py | model/db.py | # db model - all db handlers
| mit | Python |
|
8ec524a7a64c55f0759e18ea4b70c63c9c83f99a | Add admin for the various models | hzj123/56th,geoffkilpin/pombola,hzj123/56th,patricmutwiri/pombola,geoffkilpin/pombola,patricmutwiri/pombola,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,hzj123/56th,mysociety/pombola,geoffkilpin/pombola,mysociety/pombola,patricmutwiri/pombola,patricmutwiri/pombola,patricmutwiri/pombola,geoffkilpin/pombola,ken-muturi/pombola,ken-muturi/pombola,patricmutwiri/pombola,mysociety/pombola,hzj123/56th,ken-muturi/pombola,hzj123/56th,geoffkilpin/pombola,geoffkilpin/pombola,ken-muturi/pombola,hzj123/56th | pombola/interests_register/admin.py | pombola/interests_register/admin.py | from django.contrib import admin
from . import models
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ["name"]}
list_display = ['slug', 'name', 'sort_order']
search_fields = ['name']
class ReleaseAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ["name"]}
list_display = ['slug', 'name', 'date']
search_fields = ['name']
date_hierarchy = 'date'
class LineItemInlineAdmin(admin.TabularInline):
model = models.EntryLineItem
# extra = 2
fields = [ 'key', 'value' ]
class EntryAdmin(admin.ModelAdmin):
inlines = [LineItemInlineAdmin]
list_display = ['id', 'person', 'category', 'release', 'sort_order']
list_filter = [ 'release', 'category' ]
search_fields = ['person__legal_name']
# Add these to the admin
admin.site.register( models.Category, CategoryAdmin)
admin.site.register( models.Release, ReleaseAdmin)
admin.site.register( models.Entry, EntryAdmin)
| agpl-3.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.