commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
370f5a87ac8d26245b5919fc98b24019861f4dde
Add missing test
GeoscienceAustralia/GeodesyMLConverter,GeoscienceAustralia/GeodesyMLConverter
tests/test_fetch_site_logs_from_ftp_sites.py
tests/test_fetch_site_logs_from_ftp_sites.py
import pytest import os from fetch_site_logs_from_ftp_sites import gws_list_site_logs def test_get_gws_site_logs(): os.environ['gws_url'] = 'https://testgeodesy-webservices.geodesy.ga.gov.au' assert len(gws_list_site_logs()) > 1000
apache-2.0
Python
0774413ae3623c28a8aaf77727d0c355f6a5bd7c
Add deezer_complete core plugin #146
Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide
timeside/plugins/provider/deezer_complete.py
timeside/plugins/provider/deezer_complete.py
from timeside.core import implements, interfacedoc from timeside.core.provider import Provider from timeside.core.api import IProvider from timeside.core.tools.utils import slugify import os from requests import get class DeezerComplete(Provider): """ Represents Deezer Provider while loading results computed on complete tracks on Deezer's infrastructure """ implements(IProvider) @staticmethod @interfacedoc def id(): return 'deezer_complete' @staticmethod @interfacedoc def name(): return "Deezer Complete" @staticmethod @interfacedoc def ressource_access(): return False def get_source_from_id(self, external_id, path, download=False): return '' def get_source_from_url(self, url, path, download=False): return '' def get_id_from_url(self, url): return url.split("/")[-1:][0]
agpl-3.0
Python
02c06a544b1b6e4230a9b658540b360cc60c0bb5
add cmstat.py
liuyang1/dotfiles,liuyang1/dotfiles,liuyang1/dotfiles,liuyang1/dotfiles
gist/cmstat.py
gist/cmstat.py
from __future__ import print_function import sh from collections import namedtuple import os import itertools git = sh.git.bake() NumStat = namedtuple('NumStat', ['insert', 'delete', 'filename']) def getCommit(commit): """get commit message --no-pager: stop pager which block stdout -n 1: only show one commit (not use ^.. style, this will fail when not have prior commit) --color=never: not colorful output """ opt = ('--numstat', '-n 1', '--color=never', '--pretty=%H') return git('--no-pager', 'log', commit, *opt) def parseNumStat(cm): l = cm.split('\n\n') ret = [] if len(l) < 2: return ret for line in l[1].split('\n'): line = line.split() if len(line) < 3: continue if line[0] == '-' or line[1] == '-': continue n = NumStat(int(line[0]), int(line[1]), line[2]) ret.append(n) return ret def getStatLst(): cmlst = git('rev-list', 'HEAD', '--after=2014-01-01', '--author=liuyang') shalst = cmlst.split() stlst = [] for sha in shalst: cm = getCommit(sha) ret = parseNumStat(cm) stlst.extend(ret) return stlst def groupFileExt(numst): fn = numst.filename ret = os.path.splitext(fn) if ret[1] == "": return ret[0] else: return ret[1] def main(): stlst = getStatLst() a = sum([st.insert for st in stlst]) b = sum([st.delete for st in stlst]) print(a, b, a + b) stlst = sorted(stlst, key=groupFileExt) for ext, g in itertools.groupby(stlst, groupFileExt): g = list(g) aa = sum([st.insert for st in g]) bb = sum([st.delete for st in g]) print(ext, aa, bb, aa + bb) if __name__ == "__main__": main()
mit
Python
e29a2107cd08e6b40b99e3682d783887107a5e77
Add a loader to load several yaml files
novafloss/populous
populous/loader.py
populous/loader.py
import collections import yaml def load_yaml(*filenames): """ Parse the given files as if they were a single YAML file. """ with ChainedFileObject(*filenames) as f: return yaml.load(f) class ChainedFileObject(object): """ A file-like object behaving like if all the given filenames were a single file. Note that you never get content from several files during a single call to ``read``, even if the length of the requested buffer if longer that the remaining bytes in the current file. You have to call ``read`` again in order to get content from the next file. Can be used as a context manager (in a ``with`` statement). Example:: >>> f = ChainedFileObject('foo.txt', 'bar.txt') >>> f.read() "I'm the content of foo.txt" >>> f.read(1024) "I'm the content of bar.txt" >>> f.read() '' >>> f.close() """ def __init__(self, *filenames): self.filenames = collections.deque(filenames) self.current = None self.nextfile() def __enter__(self): return self def __exit__(self, *args): return self.close() def nextfile(self): current = self.current self.current = None try: if current: current.close() finally: try: self.current = open(self.filenames.popleft()) except IndexError: self.current = None def read(self, n=None): if not self.current: return '' output = self.current.read() if not output: self.nextfile() return self.read() return output def close(self): current = self.current self.current = None if current: current.close()
mit
Python
555cedf76d5f569b8a99691ed7dba672e578bb42
Add admin integration for positions
Hackfmi/Diaphanum,Hackfmi/Diaphanum
positions/admin.py
positions/admin.py
from django.contrib import admin from .models import Position class PositionAdminIndex(admin.ModelAdmin): list_display = ['title', 'date'] list_filter = ['date'] search_fields = ['title', 'content'] admin.site.register(Position, PositionAdminIndex)
mit
Python
9dc39f6492d9ece3964d5cb733cc146acee7cf66
Create w3_1.py
s40523236/2016fallcp_hw,s40523236/2016fallcp_hw,s40523236/2016fallcp_hw
w3_1.py
w3_1.py
print("hello")
agpl-3.0
Python
31fcd83585905ca28245e42163c77af38f0c83cf
Create w3_1.py
s40523242/2016fallcp_hw,s40523242/2016fallcp_hw,s40523242/2016fallcp_hw
w3_1.py
w3_1.py
print("test")
agpl-3.0
Python
d35f2d7310c277625ea6e2e15b887ac9620696a7
Add unit test for glacier vault
felix-d/boto,lochiiconnectivity/boto,weebygames/boto,abridgett/boto,appneta/boto,alex/boto,j-carl/boto,appneta/boto,rayluo/boto,lochiiconnectivity/boto,weka-io/boto,jameslegg/boto,drbild/boto,alfredodeza/boto,ocadotechnology/boto,janslow/boto,disruptek/boto,campenberger/boto,trademob/boto,elainexmas/boto,israelbenatar/boto,andresriancho/boto,kouk/boto,ekalosak/boto,shaunbrady/boto,khagler/boto,pfhayes/boto,jotes/boto,vijaylbais/boto,shipci/boto,andresriancho/boto,nexusz99/boto,lra/boto,garnaat/boto,vishnugonela/boto,jamesls/boto,tpodowd/boto,jamesls/boto,SaranyaKarthikeyan/boto,jindongh/boto,TiVoMaker/boto,darjus-amzn/boto,rosmo/boto,rjschwei/boto,disruptek/boto,jameslegg/boto,dimdung/boto,cyclecomputing/boto,nikhilraog/boto,zzzirk/boto,ric03uec/boto,ryansb/boto,awatts/boto,FATruden/boto,yangchaogit/boto,tpodowd/boto,Pretio/boto,acourtney2015/boto,dablak/boto,Timus1712/boto,Asana/boto,drbild/boto,revmischa/boto,kouk/boto,clouddocx/boto,stevenbrichards/boto,rjschwei/boto,bleib1dj/boto,varunarya10/boto,bryx-inc/boto,s0enke/boto,alex/boto,zachmullen/boto,podhmo/boto,nishigori/boto,ramitsurana/boto,serviceagility/boto,dablak/boto,ddzialak/boto
tests/unit/glacier/test_vault.py
tests/unit/glacier/test_vault.py
#!/usr/bin/env python # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import unittest from cStringIO import StringIO import mock from mock import ANY from boto.glacier import vault class TestVault(unittest.TestCase): def setUp(self): self.size_patch = mock.patch('os.path.getsize') self.getsize = self.size_patch.start() def tearDown(self): self.size_patch.stop() def test_upload_archive_small_file(self): api = mock.Mock() v = vault.Vault(api, None) v.name = 'myvault' self.getsize.return_value = 1 stringio = StringIO('content') m = mock.mock_open() m.return_value.read = stringio.read api.upload_archive.return_value = {'ArchiveId': 'archive_id'} with mock.patch('boto.glacier.vault.open', m, create=True): archive_id = v.upload_archive('filename', 'my description') self.assertEqual(archive_id, 'archive_id') api.upload_archive.assert_called_with('myvault', m.return_value, ANY, ANY, 'my description') if __name__ == '__main__': unittest.main()
mit
Python
d44f12ca4395c0001bbaf0cf0d5436a84484569c
Create fasta2nexus.py
biokit/biokit,biokit/biokit
biokit/converters/fasta2nexus.py
biokit/converters/fasta2nexus.py
from Bio import AlignIO class Fasta2Nexus(object): """ """ def __init__(self, infile, outfile, *args, **kwargs): """ """ self.infile = infile self.outfile = outfile def __call__(self): input_handle = open(self.infile, "rU") output_handle = open(self.outfile, "w") alignments = AlignIO.parse(input_handle, "fasta") AlignIO.write(alignments, output_handle, "nexus") output_handle.close() input_handle.close()
bsd-2-clause
Python
e8cd41a2151e5907aeaac685f5c78300a010ce7e
add sensu plugin to check eventanomaly
yieldbot/sensu-yieldbot-plugins,yieldbot/sensu-yieldbot-plugins,yieldbot/sensu-yieldbot-plugins
plugins/bongo/check-eventanomaly.py
plugins/bongo/check-eventanomaly.py
#!/usr/bin/env python from optparse import OptionParser import socket import sys import httplib import json PASS = 0 FAIL = 1 def get_bongo_host(server, app): try: con = httplib.HTTPConnection(server, timeout=45) con.request("GET","/v2/apps/" + app) data = con.getresponse() if data.status >= 300: print "get_bongo_host: Recieved non-2xx response= %s" % (data.status) sys.exit(FAIL) json_data = json.loads(data.read()) host = "%s:%s" % (json_data['app']['tasks'][0]['host'],json_data['app']['tasks'][0]['ports'][0]) con.close() return host except Exception, e: print "get_bongo_host: %s :exception caught" % (e) sys.exit(FAIL) def get_status(host, group, time): try: con = httplib.HTTPConnection(host,timeout=45) con.request("GET","/v1/eventdrop/" + group + "/" + time) data = con.getresponse() if data.status >= 300: print "get_status: Recieved non-2xx response= %s" % (data.status) sys.exit(FAIL) json_data = json.loads(data.read()) con.close() print "get_status: %s" % (json_data['msg']) sys.exit(json_data['status']) #if json_data['status'] == 1: # print "get_status: %s" % (json_data['msg']) # sys.exit(FAIL) #else: # print "%s is fine" %group # sys.exit(PASS) except Exception, e: print "get_status: %s :exception caught" % (e) sys.exit(FAIL) if __name__=="__main__": parser = OptionParser() parser.add_option("-s", dest="server", action="store", default="localhost:8080", help="Marathon Cluster address with port no") parser.add_option("-a", dest="app", action="store", default="bongo.useast.prod", help="App Id to retrieve the slave address") parser.add_option("-g", dest="group", action="store", default="pmi", help="The group of event pmi or adevents") parser.add_option("-t", dest="time", action="store", default="10min", help="The time gap for which the difference is to be calculated") (options, args) = parser.parse_args() host = get_bongo_host(options.server, options.app) get_status(host, options.group, options.time)
mit
Python
6a95f7aa987994cdd173dc52d5de2754e449ebbb
Add a Python script that controls the user list in my own Twitter lists.
showa-yojyo/bin,showa-yojyo/bin
listmanager.py
listmanager.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """This script manages your own Twitter lists. Examples: You can add users to a list by the "add" command:: $ python listmanager.py add your_screen_name your_list_name user1 [user2 ...] Likewise, you can also remove users by the "remove" command. """ from secret import twitter_instance from argparse import ArgumentParser __version__ = '1.0.0' def configure(): """Parse the command line parameters. Returns: An instance of argparse.ArgumentParser that stores the command line parameters. """ parser = ArgumentParser(description='Twitter List Manager') parser.add_argument('--version', action='version', version=__version__) # Positional arguments parser.add_argument( 'command', choices=['add', 'remove',], help='Either "add" or "remove".') parser.add_argument( 'owner_screen_name', help='The screen name of the user who owns the list being requested by a slug.') parser.add_argument( 'slug', help='The slug of the list.') parser.add_argument( 'screen_names', nargs='+', help='A comma separated list of screen names, up to 100 are allowed in a single request.') return parser def main(args): """The main function. Args: args: An instance of argparse.ArgumentParser parsed in the configure function. Returns: None. """ tw = twitter_instance() # Few commands are available so far. if args.command == 'add': tw.lists.members.create_all( owner_screen_name=args.owner_screen_name, slug=args.slug, screen_name=','.join(args.screen_names)) elif args.command == 'remove': tw.lists.members.destroy_all( owner_screen_name=args.owner_screen_name, slug=args.slug, screen_name=','.join(args.screen_names)) if __name__ == '__main__': parser = configure() main(parser.parse_args())
mit
Python
489c77d3bbd3a9e0e14578f4371870042e2d04d1
Add another debug script
tomislacker/cornbread
debug1.py
debug1.py
import logging import threading from cornbread.xorg import * if __name__ == '__main__': logging.warning('Creating FW') w = FocusedWindow() logging.warning('Creating FW thread') t = threading.Thread(target=FocusedWindowWatcher, args=(w,)) logging.warning('Starting thread') t.start() try: logging.warning('Joining FW thread') t.join(4) except KeyboardInterrupt as e: logging.warning('Keyboard interrupt') w._exit_watch = True t.join(4)
apache-2.0
Python
28d409eea4fbcd3846d0146f878529ed3b1c2145
Create update.py
ASpelling/mw-trading,ASpelling/mw-trading,ASpelling/mw-trading
app/update.py
app/update.py
''' Update functions for Classes in Models - Update SCTR - updatingSCTR() in: array (Adj Close) out: float (Average SCTR over SCTR_AVERAGE days, EMA50) - Update Money wave - updatingMoneyWave() in: array (High, Low, Adj Close, nextMWPrice = False, MW) out: float (Money Wave) - Sub func Update next stock price for a fixed MW - if nextMWPrice = True out: float (Price) - Update weekly EMA Long Term (50) vs Sort Term (10) - updatingEMALTvsST() in: array (Adj Close) out: Boolean (or array for plot) - Update CoppockCurve - updatingCoppock() Not yet implemented! in: ? out: Boolean (or array for plot) - Update plot - updatingPlot() Not yet implemented! in: out: ''' #import pandas as pd import numpy as np import talib as tb from config import SCTR_AVERAGE def updatingSCTR(adjClose): if len(adjClose) > 250: # -- Long term SCTR -------------------- ema200 = tb.EMA(adjClose, timeperiod=200) sctrEMA200 = ((adjClose/ema200)-1) sctrROC125 = tb.ROC(adjClose, timeperiod=125) longTerm = ((sctrEMA200*0.3) + (sctrROC125*0.3)) # -- Medium term SCTR ------------------ ema50 = tb.EMA(adjClose, timeperiod=50) sctrEMA50 = ((adjClose/ema50)-1) sctrROC20 = tb.ROC(adjClose, timeperiod=20) mediumTerm = ((sctrEMA50*0.15) + (sctrROC20*0.15)) # -- Short term SCTR ------------------- ppo = tb.PPO(adjClose, fastperiod=12, slowperiod=26, matype=1) ppoEMA = tb.EMA(ppo, timeperiod=9) ppoHist = ppo - ppoEMA ppoHistSlope = (ppoHist - np.roll(ppoHist,3))/3 ppoHistSlope[ppoHistSlope > 1] = 1 ppoHistSlope[ppoHistSlope < -1] = -1 rsi14 = tb.RSI(adjClose, timeperiod=14) shortTerm = (((ppoHistSlope+1)*50)*0.05) + (rsi14*0.05) sctr = (longTerm + mediumTerm + shortTerm) return sctr[-1] #*SCTR_AVERAGE):].mean() # Throw exception? return None def updatingMoneyWave(highp, lowp, closep, MW = 20, nextMWPrice = False): if len(closep) > 10: slowk, slowd = tb.STOCH(highp, lowp, closep, fastk_period=5, slowk_period=3, slowk_matype=0, slowd_period=1, slowd_matype=0) if nextMWPrice: preStoch = ((MW*3) - slowd[-1] - slowd[-2])/100 newPrice = ((max(highp[-4:]) - min(lowp[-4:]))*preStoch)+min(lowp[-4:]) return (slowd[-1], newPrice) return (slowd[-1]) # Throw exception? return (None, None) def updatingEMA50(adjClose): if len(adjClose) > 60: ema50 = tb.EMA(adjClose, timeperiod=50) return adjClose[-1] > ema50[-1] def updatingEMALTvsST(daily): if len(daily['Adj Close']) > 300: weekly = daily.asfreq('W-FRI', method='pad', how='end') shortTerm = tb.EMA(weekly['Adj Close'].values, timeperiod=10) longTerm = tb.EMA(weekly['Adj Close'].values, timeperiod=50) return shortTerm[-1] > longTerm[-1] # Throw exception return None def updatingCoppock(): return True def updatingPlot(): return True
apache-2.0
Python
b6b65f0ca7253af5325eafc6b19e7cfecda231b3
Add solution for exercise 2b of hw3
escorciav/amcs211,escorciav/amcs211
hw3/hw3_2b.py
hw3/hw3_2b.py
import sympy x1, x2 = sympy.symbols('x1 x2') f = 8*x1 + 12*x2 + x1**2 -2*x2**2 df_dx1 = sympy.diff(f,x1) df_dx2 = sympy.diff(f,x2) H = sympy.hessian(f, (x1, x2)) xs = sympy.solve([df_dx1, df_dx2], [x1, x2]) H_xs = H.subs([(x1,xs[x1]), (x2,xs[x2])]) lambda_xs = H_xs.eigenvals() count = 0 for i in lambda_xs.keys(): if i.evalf() <= 0: count += 1 if count == 0: print 'Local minima' elif count == len(lambda_xs.keys()): print 'Lacal maxima' else: print 'Saddle point'
bsd-2-clause
Python
8649fef1ddea18525fd0f6c5f8aa42e18b0726f8
rename plot to visualizer
clemsos/mitras,clemsos/mitras,clemsos/mitras
lib/visualizer.py
lib/visualizer.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import matplotlib.pyplot as plt from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas import datetime import time from utils import slugify from scipy.cluster.hierarchy import dendrogram def create_bar_graph(_x,_y,_title,_disp): print "Creating bar graph..." # VARIABLES bar_color='#CCCCCC' bar_width=.35 images_path="/home/clemsos/Dev/mitras/out/" w=len(_x) # width of the canvas h=len(_y) # height of the canvas # Create a figure with size 6 _x 6 inches. fig = plt.figure(figsize=(w,h)) # Create a canvas and add the figure to it. canvas = FigureCanvas(fig) # bar plot for volume of bars = fig.add_subplot(111) # Display Grid bars.grid(True,linestyle='-',color='0.75') # Display Bars bars.bar(_x, _y, facecolor=bar_color, align='center', ecolor='black') # This sets the ticks on the x axis to be exactly where we put the center of the bars. # bars.set_xticks(_x) # Create a y label bars.set_ylabel('Counts') # Create a title, in italics bars.set_title(_title,fontstyle='italic') # Generate the Scatter Plot. # bars.scatter(_x,_y,s=20,color='tomato'); # Auto-adjust and beautify the labels fig.autofmt_xdate() # Save the generated Scatter Plot to a PNG file. fn=images_path+slugify(_title) canvas.print_figure(fn,dpi=200) fig.savefig(fn+".pdf") print " graph file has been at %s.png"%fn print " graph file has been at %s.pdf"%fn # Show us everything if _disp is True : plt.show() def plot_sparcity(): # should use matplotlib spy : http://matplotlib.org/examples/pylab_examples/spy_demos.html pass def augmented_dendrogram(*args, **kwargs): ddata = dendrogram(*args, **kwargs) if not kwargs.get('no_plot', False): for i, d in zip(ddata['icoord'], ddata['dcoord']): x = 0.5 * sum(i[1:3]) y = d[1] plt.plot(x, y, 'ro') plt.annotate("%.3g" % y, (x, y), xytext=(0, -8), textcoords='offset points', va='top', ha='center') # VIZ lib # http://bokeh.pydata.org/
mit
Python
71b0af732e6d151a22cc0d0b28b55020780af8b6
Add memoize function for python 2.x
ironman5366/W.I.L.L,ironman5366/W.I.L.L
ftools.py
ftools.py
from functools import wraps def memoize(obj): # This is taken from the Python Decorator Library on the official Python # wiki. https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize # Unfortunately we're using Python 2.x here and lru_cache isn't available cache = obj.cache = {} @wraps(obj) def memoizer(*args, **kwargs): key = str(args) + str(kwargs) if key not in cache: cache[key] = obj(*args, **kwargs) return cache[key] return memoizer
mit
Python
1967db2a9b6e3b4420a1ebc5fe5fe157d61c6314
Initialise entry and do a proper 404 if it could not be found.
ushahidi/mobilefeed,ushahidi/mobilefeed
kindlefeed.py
kindlefeed.py
# KindleFeed Controller # ===================== # # This file is part of KindleFeed. # # KindleFeed is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # KindleFeed is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with KindleFeed. If not, see <http://www.gnu.org/licenses/>. import feedparser, flask, urllib app = flask.Flask(__name__) @app.template_filter('quote_plus') def urlencode(s): return urllib.quote_plus(s) @app.route('/') def index(): feeds = (('Mashable', 'http://feeds.mashable.com/Mashable'), ('TechCrunch', 'http://feeds.feedburner.com/techcrunch')) return flask.render_template('index.html', feeds=feeds) @app.route('/feed') def feed(): url = flask.request.args.get('url') feed = feedparser.parse(url) return flask.render_template('feed.html', url=url, feed=feed) @app.route('/entry') def entry(): feed_url = flask.request.args.get('feed') entry_id = flask.request.args.get('entry') feed = feedparser.parse(feed_url) entry = None for i in feed.entries: if i.id == entry_id: entry = i if entry == None: flask.abort(404) return flask.render_template('entry.html', feed_url=feed_url, feed=feed, entry=entry) def main(): app.debug = True app.run(host='0.0.0.0', port=80) if __name__ == '__main__': main()
# KindleFeed Controller # ===================== # # This file is part of KindleFeed. # # KindleFeed is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # KindleFeed is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with KindleFeed. If not, see <http://www.gnu.org/licenses/>. import feedparser, flask, urllib app = flask.Flask(__name__) @app.template_filter('quote_plus') def urlencode(s): return urllib.quote_plus(s) @app.route('/') def index(): feeds = (('Mashable', 'http://feeds.mashable.com/Mashable'), ('TechCrunch', 'http://feeds.feedburner.com/techcrunch')) return flask.render_template('index.html', feeds=feeds) @app.route('/feed') def feed(): url = flask.request.args.get('url') feed = feedparser.parse(url) return flask.render_template('feed.html', url=url, feed=feed) @app.route('/entry') def entry(): feed_url = flask.request.args.get('feed') entry_id = flask.request.args.get('entry') feed = feedparser.parse(feed_url) for i in feed.entries: if i.id == entry_id: entry = i return flask.render_template('entry.html', feed_url=feed_url, feed=feed, entry=entry) def main(): app.debug = True app.run(host='0.0.0.0', port=80) if __name__ == '__main__': main()
agpl-3.0
Python
4bc2c46e605b7bffb6e7e8206fdb6bb168864c45
test random user fulfilling the specifications
nukru/projectQ,nukru/Swarm-Surveys,nukru/Swarm-Surveys,nukru/projectQ,nukru/Swarm-Surveys,nukru/projectQ,nukru/projectQ
listRandomUser.py
listRandomUser.py
import random class list_random: def __init__(self, n): self.n=n self.count=n/2 self.l_tuple=[] for i in range(n): for j in range(i+1,n): self.l_tuple.append([i,j,0]) # 0 no usado # 1 invalido # 2 usado def _valido(self,i,lista): if self.l_tuple[i][2]==0: for j in lista: if (j[0]==self.l_tuple[i][0] or\ j[0]==self.l_tuple[i][1] or\ j[1]==self.l_tuple[i][0] or\ j[1]==self.l_tuple[i][1]): self.l_tuple[i][2]==1 return False self.l_tuple[i][2]==2 lista.append((self.l_tuple[i][0],self.l_tuple[i][1])) return True return False def list1(self): lista=[] k = self.count while (k>0): i = random.randrange(len(self.l_tuple)) if self._valido(i,lista): pass else: last = len(self.l_tuple)-1 for j in range(len(self.l_tuple)): if self._valido((i+j+1) % len(self.l_tuple),lista): break if j == last: # no encontrada solucion raise "solucion no encontrada" k=k-1 print "UNO MENOS", k return lista
apache-2.0
Python
c2f1717c53042f8ff3a7ba169a2db365aa8bc8ba
ADd gff2togff3.py
konrad/kuf_bio_scripts
gff2togff3.py
gff2togff3.py
"""Change attribute string from GFF2 format GGF3 format.""" import csv import sys for row in csv.reader(open(sys.argv[1]), delimiter="\t"): if not row[0].startswith("#"): row[8] = ";".join( ["%s=%s" % (attribute.split()[0], " ".join(attribute.split()[1:])) for attribute in row[8].split(" ; ")]) print("\t".join(row))
isc
Python
ac89ec64ab619bfa778d0961aeaefc8967d971a3
Add errors.py to move away from Python errors
vakila/kimi
errors.py
errors.py
# Kimi language interpreter in Python 3 # Anjana Vakil # http://www.github.com/vakila/kimi def complain_and_die(message): print(message) quit() def assert_or_complain(assertion, message): try: assert assertion except AssertionError: complain_and_die(message)
mit
Python
5cb726d5139537cbe7c03bc5ed540b9cdb7c7e21
Add bzero simprocedure I have had lying around forever
schieb/angr,angr/angr,schieb/angr,angr/angr,schieb/angr,angr/angr
angr/procedures/posix/bzero.py
angr/procedures/posix/bzero.py
from ..libc import memset class bzero(memset.memset): def run(self, addr, size): return super().run(addr, self.state.solver.BVV(0, self.arch.byte_width), size)
bsd-2-clause
Python
551f78f32665b1397120ada10036c1d9c09daddc
Create flip-bits.py
joshua-jin/algorithm-campus,joshua-jin/algorithm-campus,joshua-jin/algorithm-campus
lulu/flip-bits.py
lulu/flip-bits.py
class Solution: """ @param a, b: Two integer return: An integer """ def bitSwapRequired(self, a, b): # write your code here return self.countOnes(a^b) def countOnes(self, num): # write your code here counter = 0 a = 1 for i in range(0, 32): digit = num & a if digit != 0: counter += 1 a *= 2 return counter
mit
Python
75437fc5607b41763f8c81813ba12dbe1c414c5f
combine the sequence names from various headers and then concatonate the sam entries
jason-weirather/Au-public,jason-weirather/Au-public,jason-weirather/Au-public,jason-weirather/Au-public
iron/utilities/combine_sam.py
iron/utilities/combine_sam.py
#!/usr/bin/python import sys, argparse, re def main(): parser = argparse.ArgumentParser(description = 'Combine sam files') parser.add_argument('sam_files',nargs='+',help='FILENAME for sam files') args = parser.parse_args() header = False seqs = set() tagorder = [] tagseen = {} for file in args.sam_files: with open(file) as inf: for line in inf: line = line.rstrip() f = line.split("\t") m = re.match('^(@\S\S)\s',line) if not m or len(f) > 10: break if m.group(1) == '@SQ': seqs.add(line) if m.group(1) not in tagseen: tagorder.append(m.group(1)) tagseen[m.group(1)] = line #now print the header for tag in tagorder: if tag != '@SQ': print tagseen[tag] else: for seq in sorted(seqs): print seq #now go back through and do the sam data for file in args.samfiles: with open(file) as inf: for line in inf: f = line.rstrip().split("\t") if len(f) > 10: print line main()
apache-2.0
Python
e3c493847ead7352ecad1e92a739a1b79549a70c
Add dodo tape command
mnieber/dodo_commands
dodo_commands/extra/webdev_commands/tape.py
dodo_commands/extra/webdev_commands/tape.py
# noqa import argparse from dodo_commands.extra.standard_commands import DodoCommand class Command(DodoCommand): # noqa help = "" decorators = ["docker"] docker_options = [ '--name=tape', ] def add_arguments_imp(self, parser): # noqa parser.add_argument( 'tape_args', nargs=argparse.REMAINDER ) def handle_imp(self, tape_args, **kwargs): # noqa tape_args = tape_args[1:] if tape_args[:1] == ['-'] else tape_args self.runcmd( [ self.get_config("/TAPE/tape", "tape"), self.get_config("/TAPE/glob") ] + tape_args, cwd=self.get_config("/TAPE/src_dir") )
mit
Python
0f13cc95eeeed58c770e60b74a37f99ca24a28f0
add tests for views
gitgik/updown
api/tests/test_views.py
api/tests/test_views.py
from django.test import TestCase from rest_framework.test import APIClient from rest_framework import status from django.core.urlresolvers import reverse class ViewsTestCase(TestCase): """Test suite for views.""" def setUp(self): """setup variables""" self.client = APIClient() def create_file(self, filepath): """Create a file for testing.""" f = open(filepath, 'w') f.write('this is a good file\n') f.close() f = open(filepath, 'rb') return {'_file': f} def test_file_upload(self): data = self.create_file('/tmp/file') response = self.client.post( reverse('api.upload'), data, format='multipart') self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_getting_all_files(self): response = self.client.get(reverse('file_get')) def test_getting_specific_file(self): pass def test_deleting_a_file(self): """Ensure an existing file can be deleted.""" data = self.create_file('/tmp/file') response = self.client.post( reverse('api.upload'), data, format='multipart') self.assertEqual(response.status_code, status.HTTP_201_CREATED) # get the file that's just been uploaded new_file = File.objects.get() res = self.client.delete( reverse('api.delete'), kwargs={'pk': new_file.id}, follow=True) self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)
mit
Python
aba613ddef5e25e057ca515bb017c4a21095936f
Add example to use CRF1d with automatically sorting sequences
keisuke-umezawa/chainer,chainer/chainer,wkentaro/chainer,niboshi/chainer,wkentaro/chainer,hvy/chainer,keisuke-umezawa/chainer,keisuke-umezawa/chainer,wkentaro/chainer,chainer/chainer,pfnet/chainer,chainer/chainer,okuta/chainer,niboshi/chainer,keisuke-umezawa/chainer,okuta/chainer,niboshi/chainer,hvy/chainer,hvy/chainer,tkerola/chainer,chainer/chainer,hvy/chainer,niboshi/chainer,okuta/chainer,wkentaro/chainer,okuta/chainer
examples/pos/postagging_with_auto_transpose.py
examples/pos/postagging_with_auto_transpose.py
import argparse import collections import nltk import numpy import six import chainer from chainer import datasets import chainer.links as L from chainer import reporter from chainer import training from chainer.training import extensions class CRF(chainer.Chain): def __init__(self, n_vocab, n_pos): super(CRF, self).__init__() with self.init_scope(): self.feature = L.EmbedID(n_vocab, n_pos) self.crf = L.CRF1d(n_pos, transpose=True) def forward(self, xs, ys): # h[i] is feature vector for each batch of words. hs = [self.feature(x) for x in xs] loss = self.crf(hs, ys) reporter.report({'loss': loss}, self) # To predict labels, call argmax method. _, predict = self.crf.argmax(hs) correct = 0 total = 0 for y, p in six.moves.zip(ys, predict): # NOTE y is ndarray because # it does not pass to transpose_sequence correct += self.xp.sum(y == p) total += len(y) reporter.report({'correct': correct}, self) reporter.report({'total': total}, self) return loss def argmax(self, xs): hs = [self.feature(x) for x in xs] return self.crf.argmax(hs) def convert(batch, device): sentences = [ chainer.dataset.to_device(device, sentence) for sentence, _ in batch] poses = [chainer.dataset.to_device(device, pos) for _, pos in batch] return {'xs': sentences, 'ys': poses} def main(): parser = argparse.ArgumentParser( description='Chainer example: POS-tagging') parser.add_argument('--batchsize', '-b', type=int, default=30, help='Number of images in each mini batch') parser.add_argument('--epoch', '-e', type=int, default=20, help='Number of sweeps over the dataset to train') parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU)') parser.add_argument('--out', '-o', default='result', help='Directory to output the result') parser.add_argument('--resume', '-r', default='', help='Resume the training from snapshot') args = parser.parse_args() vocab = collections.defaultdict(lambda: len(vocab)) pos_vocab = collections.defaultdict(lambda: len(pos_vocab)) # Convert word sequences and pos sequences to integer sequences. nltk.download('brown') data = [] for sentence in nltk.corpus.brown.tagged_sents(): xs = numpy.array([vocab[lex] for lex, _ in sentence], numpy.int32) ys = numpy.array([pos_vocab[pos] for _, pos in sentence], numpy.int32) data.append((xs, ys)) print('# of sentences: {}'.format(len(data))) print('# of words: {}'.format(len(vocab))) print('# of pos: {}'.format(len(pos_vocab))) model = CRF(len(vocab), len(pos_vocab)) if args.gpu >= 0: chainer.backends.cuda.get_device_from_id(args.gpu).use() model.to_gpu(args.gpu) optimizer = chainer.optimizers.Adam() optimizer.setup(model) optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001)) test_data, train_data = datasets.split_dataset_random( data, len(data) // 10, seed=0) train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize) test_iter = chainer.iterators.SerialIterator(test_data, args.batchsize, repeat=False, shuffle=False) updater = training.updaters.StandardUpdater( train_iter, optimizer, converter=convert, device=args.gpu) trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out) evaluator = extensions.Evaluator( test_iter, model, device=args.gpu, converter=convert) # Only validate in each 1000 iteration trainer.extend(evaluator, trigger=(1000, 'iteration')) trainer.extend(extensions.LogReport(trigger=(100, 'iteration')), trigger=(100, 'iteration')) trainer.extend( extensions.MicroAverage( 'main/correct', 'main/total', 'main/accuracy')) trainer.extend( extensions.MicroAverage( 'validation/main/correct', 'validation/main/total', 'validation/main/accuracy')) trainer.extend( extensions.PrintReport( ['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time']), trigger=(100, 'iteration')) trainer.extend(extensions.ProgressBar(update_interval=10)) if args.resume: chainer.serializers.load_npz(args.resume, trainer) trainer.run() if __name__ == '__main__': main()
mit
Python
27788308891d9cd82da7782d62b5920ea7a54f80
Add custom command to daily check scores
belatrix/BackendAllStars
employees/management/commands/dailycheck.py
employees/management/commands/dailycheck.py
from constance import config from datetime import datetime from django.core.management.base import BaseCommand from django.core.mail import EmailMessage from django.shortcuts import get_list_or_404 from employees.models import Employee class Command(BaseCommand): help = "Update scores daily." def change_day(self): employees = get_list_or_404(Employee) for employee in employees: employee.yesterday_given = employee.today_given employee.yesterday_received = employee.today_received employee.today_given = 0 employee.today_received = 0 employee.save() def change_month(self): employees = get_list_or_404(Employee) for employee in employees: employee.last_month_given = employee.current_month_given employee.last_month_score = employee.current_month_score employee.current_month_given = 0 employee.current_month_score = 0 employee.save() def change_year(self): employees = get_list_or_404(Employee) for employee in employees: employee.last_year_given = employee.current_year_given employee.last_year_score = employee.current_year_score employee.current_year_given = 0 employee.current_year_score = 0 employee.save() def send_daily_email(self): subject = config.DAILY_EXECUTION_CONFIRMATION_SUBJECT message = config.DAILY_EXECUTION_CONFIRMATION_MESSAGE email = EmailMessage(subject, message, to=[config.DAILY_EXECUTION_CONFIRMATION_EMAIL]) email.send() def send_blocked_notification_email(self, employee): subject = config.USER_BLOCKED_NOTIFICATION_SUBJECT message = config.USER_BLOCKED_NOTIFICATION_MESSAGE % employee.username email = EmailMessage(subject, message, to=[employee.email]) email.send() def evaluate_block_users(self): employees = get_list_or_404(Employee) for employee in employees: if employee.yesterday_given > config.MAX_STARS_GIVEN_DAY: employee.is_blocked = True if employee.yesterday_received > config.MAX_STARS_RECEIVED_DAY: employee.is_blocked = True if employee.current_month_given > config.MAX_STARS_GIVEN_MONTHLY: employee.is_blocked = True if employee.current_month_score > config.MAX_STARS_RECEIVED_MONTHLY: employee.is_blocked = True employee.save() try: if employee.is_blocked: self.send_blocked_notification_email() except Exception as e: print e def handle(self, *args, **options): today = datetime.now() self.change_day() self.evaluate_block_users() self.send_daily_email() if today.day == 1: self.change_month() if (today.day == 1 and today.month == 1): self.change_year()
apache-2.0
Python
8aac73fdc26fd838c3f91ffa9bc58e25777a5179
Add tests for mach angle
iwarobots/TunnelDesign
properties/tests/test_mach_angle.py
properties/tests/test_mach_angle.py
#!/usr/bin/env python """Test Mach angle functions. Test data is obtained from http://www.grc.nasa.gov/WWW/k-12/airplane/machang.html. """ import nose import nose.tools as nt from properties.prandtl_meyer_function import mu_in_deg @nt.raises(ValueError) def test_mach_lesser_than_one(): m = 0.1 mu_in_deg(m) def test_normal_mach(): m1 = 1.5 nt.assert_almost_equal(mu_in_deg(m1), 41.762, places=3) m2 = 2.6 nt.assert_almost_equal(mu_in_deg(m2), 22.594, places=3) if __name__ == '__main__': nose.main()
mit
Python
49dfd690abe794e3b393b8bcac3e0ab1427c41b3
Define riot_open.
soasme/riotpy
riot/app.py
riot/app.py
# -*- coding: utf-8 -*- import urwid def run_tag(tag, *args, **kwargs): loop = urwid.MainLoop(tag, *args, **kwargs) loop.run() def quit_app(): raise urwid.ExitMainLoop()
mit
Python
04021db907109a5291833eb5ae96c45fb8d1802c
Add flask app mocking the EC API
okfn/ckanext-glasgow,okfn/ckanext-glasgow
ckanext/glasgow/tests/mock_ec.py
ckanext/glasgow/tests/mock_ec.py
import uuid import flask from werkzeug.exceptions import default_exceptions from werkzeug.exceptions import HTTPException def make_json_app(import_name, **kwargs): """ Creates a JSON-oriented Flask app. All error responses that you don't specifically manage yourself will have application/json content type, and will contain JSON like this (just an example): { "message": "405: Method Not Allowed" } """ def make_json_error(ex): response = flask.jsonify(Message=str(ex)) response.status_code = (ex.code if isinstance(ex, HTTPException) else 500) return response app = flask.Flask(import_name, **kwargs) for code in default_exceptions.iterkeys(): app.error_handler_spec[None][code] = make_json_error return app app = make_json_app(__name__) dataset_all_fields = [ 'Category', 'Description', 'License', 'MaintainerContact', 'MaintainerName', 'OpennessRating', 'PublishedOnBehalfOf', 'Quality', 'StandardName', 'StandardRating', 'StandardVersion', 'Tags', 'Theme', 'Title', 'UsageGuidance', ] dataset_mandatory_fields = [ 'Title', 'Description', 'MaintainerName', 'MaintainerContact', 'License', 'OpennessRating', 'Quality', ] dataset_fields_under_255_characters = [ 'Title', 'MaintainerName', 'MaintainerContact', 'License', 'Category', 'PublishedOnBehalfOf', 'StandardName', 'StandardVersion', 'Theme', 'UsageGuidance', ] @app.route('/datasets', methods=['POST']) def request_dataset_create(): return handle_dataset_request() @app.route('/datasets', methods=['PUT']) def request_dataset_update(): return handle_dataset_request() def handle_dataset_request(): data = flask.request.json if not data: response = flask.jsonify( Message='No data received' ) response.status_code = 400 return response # Authorization if (not 'Authorization' in flask.request.headers or flask.request.headers['Authorization'] == 'unknown_token'): response = flask.jsonify( Message='Not Auhtorized' ) response.status_code = 401 return response # Basic Validation for field in dataset_mandatory_fields: if not data.get(field): response = flask.jsonify( Message='Missing fields', ModelState={ 'model.' + field: ["The {0} field is required.".format(field)] }) response.status_code = 400 return response for field in dataset_fields_under_255_characters: if len(data.get(field, '')) > 255: response = flask.jsonify( Message='Field too long', ModelState={ 'model.' + field: ["{0} field must be shorter than 255 characters." .format(field)] }) response.status_code = 400 return response # All good, return a request id return flask.jsonify( RequestId=unicode(uuid.uuid4()) ) @app.route('/') def api_description(): api_desc = { 'Request dataset creation': 'POST /datasets', 'Request dataset update': 'PUT /datasets', } return flask.jsonify(**api_desc) def run(**kwargs): app.run(**kwargs) if __name__ == '__main__': run(port=7070, debug=True)
agpl-3.0
Python
f1cc40c716f1e4f598e0a9230cd188fc897ac117
add config
graycarl/moon
moon/config.py
moon/config.py
# -*- coding: utf-8 -*- """ 这里是一些工具, 用来实现简单的项目配置系统 """ import logging _confdata = {} def setconf(prjname, confile, confdict={}): _confdata[prjname] = (confile, confdict) def exportconf(prjname, globals): """ 从文件和字典中导出配置 >>> open("/tmp/testmoonconf.py", "w").write("OSOS = 10") >>> setconf("hongbo", "/tmp/testmoonconf.py", {"OSOSOS": 321}) >>> d = {} >>> exportconf("hongbo", d) >>> print d["OSOS"] 10 >>> print d["OSOSOS"] 321 """ try: filename, confdict = _confdata[prjname] except KeyError as e: e.strerror = "Unable to find confdata for '%s', " \ "you must `setconf` first" % prjname raise try: with open(filename) as config_file: exec(compile(config_file.read(), filename, "exec"), globals) logging.info("Load config from %s", filename) except IOError as e: e.strerror = 'Unable to load configuration file (%s)' % e.strerror raise if confdict: globals.update(confdict) if __name__ == "__main__": import sys, os sys.path.remove(os.path.abspath(os.path.dirname(__file__))) import doctest doctest.testmod()
mit
Python
106a339561f5b79e0cd9508246d2f8da227c4fdc
move file to folder
adwardlee/tf_utils,adwardlee/tf_utils
move_hmdb51.py
move_hmdb51.py
import argparse import os import sys import math import cv2 import numpy as np import multiprocessing import re import shutil parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, help="video image list", default='/media/llj/storage/tvcj/hmdbcnn3_test') parser.add_argument('--origin_file_dir', type=str, default='/media/llj/storage/hmdb51') args = parser.parse_args() txt_files = [] for root, folders, filenames in os.walk(args.data_dir): for filename in filenames: txt_files.append(str(filename)) print ' 1 ', txt_files[0] class_name = os.listdir(args.origin_file_dir) for name in class_name: if not os.path.exists(args.data_dir + '/' + name): os.makedirs(args.data_dir + '/' + name) for root, folders, filename in os.walk(args.origin_file_dir): for folder in folders: folder_dir = os.path.join(root,folder) avi_files = os.listdir(folder_dir) #print ' avi 1', avi_files[0] for txt in txt_files: if txt[:-4] in str(avi_files): shutil.move(args.data_dir + '/' + txt , args.data_dir + '/' + folder+'/'+txt)
mit
Python
6349d8acfd76fc893dfdb6a7c12aebfe9ec1bac9
add plexpy/Plex.tv
pannal/Subliminal.bundle,pannal/Subliminal.bundle,pannal/Subliminal.bundle
Contents/Libraries/Shared/subzero/lib/auth.py
Contents/Libraries/Shared/subzero/lib/auth.py
# coding=utf-8 # thanks, https://github.com/drzoidberg33/plexpy/blob/master/plexpy/plextv.py class PlexTV(object): """ Plex.tv authentication """ def __init__(self, username=None, password=None): self.protocol = 'HTTPS' self.username = username self.password = password self.ssl_verify = plexpy.CONFIG.VERIFY_SSL_CERT self.request_handler = http_handler.HTTPHandler(host='plex.tv', port=443, token=plexpy.CONFIG.PMS_TOKEN, ssl_verify=self.ssl_verify) def get_plex_auth(self, output_format='raw'): uri = '/users/sign_in.xml' base64string = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\n', '') headers = {'Content-Type': 'application/xml; charset=utf-8', 'Content-Length': '0', 'X-Plex-Device-Name': 'PlexPy', 'X-Plex-Product': 'PlexPy', 'X-Plex-Version': 'v0.1 dev', 'X-Plex-Client-Identifier': plexpy.CONFIG.PMS_UUID, 'Authorization': 'Basic %s' % base64string + ":" } request = self.request_handler.make_request(uri=uri, proto=self.protocol, request_type='POST', headers=headers, output_format=output_format) return request def get_token(self): plextv_response = self.get_plex_auth(output_format='xml') if plextv_response: xml_head = plextv_response.getElementsByTagName('user') if not xml_head: logger.warn("Error parsing XML for Plex.tv token") return [] auth_token = xml_head[0].getAttribute('authenticationToken') return auth_token else: return []
mit
Python
d0c2ee2e0d848a586cc03ba5ac5da697b333ef32
Create list of random num
JLJTECH/TutorialTesting
Misc/listOfRandomNum.py
Misc/listOfRandomNum.py
#List of randoms import random import math numList = [] for i in range(10): numList.append(random.randrange(1, 20)) for i in numList: print("Rand num = " + str(i))
mit
Python
9f508a429949d59f9969cc1e17a9094fa7c2441d
Create routines.py
evuez/mutations
routines.py
routines.py
mit
Python
85abbe29c7c764deac75b6e7b95e1ccec645d84b
Add icmp_ping ansible module
coolsvap/clapper,coolsvap/clapper,rthallisey/clapper,coolsvap/clapper,rthallisey/clapper
ansible-tests/validations/library/icmp_ping.py
ansible-tests/validations/library/icmp_ping.py
#!/usr/bin/env python DOCUMENTATION = ''' --- module: icmp_ping short_description: ICMP ping remote hosts requirements: [ ping ] description: - Check host connectivity with ICMP ping. options: host: required: true description: - IP address or hostname of host to ping type: str author: "Martin Andre (@mandre)" ''' EXAMPLES = ''' # Ping host: - icmp: name=somegroup state=present - hosts: webservers tasks: - name: Check Internet connectivity ping: host="www.ansible.com" ''' def main(): module = AnsibleModule( argument_spec = dict( host = dict(required=True, type='str'), ) ) failed = False host = module.params.pop('host') result = module.run_command('ping -c 1 {}'.format(host))[0] if result != 0: failed = True module.exit_json(failed=failed, changed=False) from ansible.module_utils.basic import * if __name__ == '__main__': main()
apache-2.0
Python
d3937b803baf036d5bd96dfcb1e10e51b29bab1e
Create migration
softwaresaved/fat,softwaresaved/fat,softwaresaved/fat,softwaresaved/fat
fellowms/migrations/0023_event_ad_status.py
fellowms/migrations/0023_event_ad_status.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-06-06 13:00 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('fellowms', '0022_fellow_user'), ] operations = [ migrations.AddField( model_name='event', name='ad_status', field=models.CharField(choices=[('U', 'Unprocessed'), ('V', 'Visible'), ('H', 'Hide'), ('A', 'Archived')], default='U', max_length=1), ), ]
bsd-3-clause
Python
0781070ee0c17a34a3cc9521e8a6b67c401aa692
Add WGAN Tests
googleinterns/audio_synthesis
models/wgan_test.py
models/wgan_test.py
# Lint as: python3 """Tests for WGAN model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import numpy as np import os import wgan class SpectralTest(tf.test.TestCase): def test_interpolation_2d(self): x1 = np.random.normal(size=(10, 256)) x2 = np.random.normal(size=(10, 256)) interpolation = wgan._get_interpolation(x1, x2) self.assertShapeEqual(x1, interpolation) def test_interpolation_3d(self): x1 = np.random.normal(size=(10, 256, 32)) x2 = np.random.normal(size=(10, 256, 32)) interpolation = wgan._get_interpolation(x1, x2) self.assertShapeEqual(x1, interpolation) if __name__ == '__main__': os.environ["CUDA_VISIBLE_DEVICES"] = '' tf.test.main()
apache-2.0
Python
e90d12802ff62738cbe4094e8db079f6519f47a5
Create BDayGift.py
tejasnikumbh/Algorithms,tejasnikumbh/Algorithms,tejasnikumbh/Algorithms
Probability/BDayGift.py
Probability/BDayGift.py
import sys; n = int(sys.stdin.readline()); S = 0 for i in range(n): S += int(sys.stdin.readline()); print(S/2.0);
bsd-2-clause
Python
45f91a92fd3ae08dd7403707f3981f306122eb6c
test task creation
ScorpionResponse/freelancefinder,ScorpionResponse/freelancefinder,ScorpionResponse/freelancefinder
freelancefinder/remotes/tests/test_tasks.py
freelancefinder/remotes/tests/test_tasks.py
"""Tests related to the remotes.tasks functions.""" from django_celery_beat.models import IntervalSchedule, PeriodicTask from ..tasks import setup_periodic_tasks def test_make_tasks(): """Ensure that setup makes some tasks/schedules.""" setup_periodic_tasks(None) intervals = IntervalSchedule.objects.all().count() tasks = PeriodicTask.objects.all().count() assert intervals > 0 assert tasks > 0
bsd-3-clause
Python
cd3f59026b9026d62537b38d4e9d70a740e88018
Add tests for java mode
jpfxgood/ped
tests/test_java_mode.py
tests/test_java_mode.py
import editor_manager import editor_common import curses import curses.ascii import keytab from ped_test_util import read_str,validate_screen,editor_test_suite,play_macro,screen_size,match_attr def test_java_mode(testdir,capsys): with capsys.disabled(): def main(stdscr): lines_to_test = [ '// This is a simple Java program.', '// FileName : "HelloWorld.java"', 'class HelloWorld', '{', ' // Your program begins with a call to main()', ' // Prints "Hello, World" to the terminal window', ' public static void main(String args[])', ' {', ' System.out.println("Hello, World");', ' }', '}' ] args = { "java_test":"\n".join(lines_to_test)} testfile = testdir.makefile(".java", **args) green = curses.color_pair(1) red = curses.color_pair(2) cyan = curses.color_pair(3) white = curses.color_pair(4) ed = editor_common.Editor(stdscr,None,str(testfile)) ed.setWin(stdscr.subwin(ed.max_y,ed.max_x,0,0)) ed.main(False) ed.main(False) validate_screen(ed) assert(ed.mode and ed.mode.name() == "java_mode") match_list = [(0,0,32,red),(2,0,5,cyan),(4,4,44,red),(8,27,14,green)] for line,pos,width,attr in match_list: assert(match_attr(ed.scr,line+1,pos,1,width,attr)) ed.goto(7,5) ed.endln() ed.main(False,10) assert(ed.getLine() == 8 and ed.getPos() == 4) ed.insert('if (20 > 18) {') ed.main(False,10) ed.insert('System.out.println("20 greater than 18");') ed.main(False,10) ed.insert('}') ed.main(False,10) ed.main(False) ed.main(False) assert(match_attr(ed.scr,9,4,1,2,cyan)) assert(match_attr(ed.scr,10,27,1,20,green)) assert(ed.getLine() == 11 and ed.getPos() == 4) curses.wrapper(main)
mit
Python
f03f976696077db4146ea78e0d0b1ef5767f00ca
Add high level signing capabilities
cachedout/libnacl,saltstack/libnacl,johnttan/libnacl,mindw/libnacl,coinkite/libnacl,RaetProtocol/libnacl
tests/unit/test_sign.py
tests/unit/test_sign.py
# Import libnacl libs import libnacl.sign # Import pythonlibs import unittest class TestSigning(unittest.TestCase): ''' ''' def test_sign(self): msg = ('Well, that\'s no ordinary rabbit. That\'s the most foul, ' 'cruel, and bad-tempered rodent you ever set eyes on.') signer = libnacl.sign.Signer() signed = signer.sign(msg) self.assertNotEqual(msg, signed) veri = libnacl.sign.Verifier(signer.hex_vk()) verified = veri.verify(signed) self.assertEqual(verified, msg)
apache-2.0
Python
f6609763f832cd5672e40d1dfe8f7dc7c58ca7c5
Create diarygui.py
bambooom/OMOOC2py,bambooom/OMOOC2py
_src/om2py2w/2wex0/diarygui.py
_src/om2py2w/2wex0/diarygui.py
# -*- coding: utf-8 -*- # ------------2w task:simple diary GUI----------- # --------------created by bambooom-------------- from Tkinter import * # import Tkinter module from ScrolledText import * # ScrolledText module = Text Widget + scrollbar global newlog class Application(Frame): # 基本框架 def __init__(self, master=None): Frame.__init__(self, master) self.pack() self.createWidgets() def createWidgets(self): # 组件 newlog = StringVar() l = Label(self, text = "Input here: ") # Label Widget 提示输入 l.grid(row = 0, column = 0, sticky = W) e = Entry(self,textvariable=newlog,width=80) # Entry box 输入框 e.grid(row = 0, column = 1, sticky = W) t = ScrolledText(self) # ScrolledText 打印出文档的框 t.grid(columnspan = 2, sticky = W) b = Button(self, text="QUIT", fg="red", command=self.quit) # 退出的button b.grid(row = 2, column = 0, sticky = W) root = Tk() root.title('MyDiary Application') app = Application(root) # 主消息循环: app.mainloop()
mit
Python
c43c7d523ddbb5b914748a20d55971fbf1c12496
Create oauth2token.py
leydaniel/barcode-attendance
oauth2token.py
oauth2token.py
#!/usr/bin/python ''' This script will attempt to open your webbrowser, perform OAuth 2 authentication and print your access token. It depends on two libraries: oauth2client and gflags. To install dependencies from PyPI: $ pip install python-gflags oauth2client Then run this script: $ python get_oauth2_token.py This is a combination of snippets from: https://developers.google.com/api-client-library/python/guide/aaa_oauth ''' import sys sys.path.append('/usr/lib/python2.7/dist-packages') from oauth2client.client import OAuth2WebServerFlow from oauth2client.tools import run from oauth2client.file import Storage CLIENT_ID = '411103951529-nf611s2285n12mmqrkigq3ckgkac1gmv.apps.googleusercontent.com' CLIENT_SECRET = 'uDKCenlmvo1desQfylHIUnYr' flow = OAuth2WebServerFlow(client_id=CLIENT_ID, client_secret=CLIENT_SECRET, scope='https://spreadsheets.google.com/feeds https://docs.google.com/feeds', redirect_uri='http://example.com/auth_return') storage = Storage('creds.data') credentials = run(flow, storage) print ("access_token: %s") % credentials.access_token
mit
Python
31cdb65a8d370c6f309ad610aa3b969d5bfb8706
Add follow_bot.py
ismailsunni/TweetJaran,ismailsunni/TweetJaran
follow_bot.py
follow_bot.py
"""Follow bot, to follow some followers from an account """ __date__ = '08/01/2014' __author__ = '@ismailsunni' import tweepy import constants # constants consumer_key = constants.consumer_key consumer_secret = constants.consumer_secret access_key = constants.access_key access_secret = constants.access_secret def need_to_follow(user): statuses_count = user.statuses_count followers_count = user.followers_count friends_count = user.friends_count created_at = user.created_at # last_status_time = user.status.created_at if followers_count > friends_count: return True else: return False def main(): auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_key, access_secret) api = tweepy.API(auth) # accounts = ['sarapanhaticom'] accounts = ['rischanmafrur'] for account in accounts: followers = api.followers(account) print followers for follower in followers: if need_to_follow(follower): print follower.screen_name try: friend = api.create_friendship(follower.screen_name) if friend.screen_name == follower.screen_name: print 'Follow ' + follower.name + ' success' else: print 'Follow ' + follower.name + ' failed' except tweepy.TweepError, e: print e print 'benar' if __name__ == '__main__': main()
mit
Python
30f18a4be667b02f8d0f6c2f2bf97146992d3208
Add first version of OpenCV core
sparsebase/stromx,sparsebase/stromx,uboot/stromx,uboot/stromx
opencv/core.py
opencv/core.py
# -*- coding: utf-8 -*- """ Created on Mon Apr 1 18:19:38 2013 @author: matz """ import cvtype import datatype import document import generator import package import test # abbreviations dt = test.Default() # utilitiy functions dcl = document.Document() dcl.line("void checkEnumValue(const stromx::runtime::Enum & value, " "const stromx::runtime::EnumParameter* param, " "const stromx::runtime::OperatorKernel& op);") dclIncludes = ["<stromx/runtime/Enum.h>", "<stromx/runtime/EnumParameter.h>", "<stromx/runtime/OperatorKernel.h>"] dtn = document.Document() dtn.line("void checkEnumValue(const stromx::runtime::Enum & value, " "const stromx::runtime::EnumParameter* param, " "const stromx::runtime::OperatorKernel& op)") dtn.scopeEnter() dtn.line("using namespace runtime;") dtn.blank() dtn.line("for(std::vector<EnumDescription>::const_iterator " "iter = param->descriptions().begin(); iter != " "param->descriptions().end(); ++iter)") dtn.scopeEnter() dtn.line(" if(value == iter->value())") dtn.line("return;") dtn.scopeExit() dtn.line("throw stromx::runtime::WrongParameterValue(*param, op);") dtn.scopeExit() dtnIncludes = ["<stromx/runtime/OperatorException.h>"] checkEnumValue = package.Function(dcl, dclIncludes, dtn, dtnIncludes) dcl = document.Document() dclIncludes = ["<stromx/runtime/NumericParameter.h>", "<stromx/runtime/OperatorException.h>"] dcl.line("template<class T>"); dcl.line("void checkNumericValue(const T & value, const " "runtime::NumericParameter<T>* param, " "const stromx::runtime::OperatorKernel& op)"); dcl.scopeEnter() dcl.line("if(value < runtime::data_cast<T>(param->min()))") dcl.increaseIndent() dcl.line("throw runtime::WrongParameterValue(*param, op);") dcl.decreaseIndent() dcl.line("if(value > runtime::data_cast<T>(param->max()))") dcl.increaseIndent() dcl.line("throw runtime::WrongParameterValue(*param, op);") dcl.decreaseIndent() dcl.scopeExit() checkNumericValue = package.Function(dcl, dclIncludes) # initializations initInCopy = document.Document(( "{1}->initializeImage({0}->width(), {0}->height(), {0}->stride(), " "{1}->data(), {0}->pixelType());").format("srcCastedData", "dstCastedData" )) initOutCopy = document.Document(( "{1}->initializeImage({1}->width(), {1}->height(), {1}->stride(), " "{1}->data(), {0}->pixelType());").format("srcCastedData", "dstCastedData" )) # arguments srcImg1 = package.Argument( "src1", "Source 1", cvtype.Mat(), datatype.Image() ) srcImg2 = package.Argument( "src2", "Source 2", cvtype.Mat(), datatype.Image() ) dstImg = package.Argument( "dst", "Destination", cvtype.Mat(), datatype.Image(), initIn = initInCopy, initOut = initOutCopy ) # test data lenna = test.ImageFile("lenna.jpg") memory = test.ImageBuffer(1000000) # add manual = package.Option( "manual", "Manual", [package.Input(srcImg1), package.Input(srcImg2), package.Output(dstImg)], tests = [ [lenna, lenna, memory] ] ) allocate = package.Option( "allocate", "Allocate", [package.Input(srcImg1), package.Input(srcImg2), package.Allocation(dstImg)], tests = [ [lenna, lenna, dt] ] ) add = package.Method( "add", options = [manual, allocate] ) core = package.Package( "core", 0, 0, 1, methods = [ add ], functions = [ checkEnumValue, checkNumericValue ], testFiles = [ "lenna.jpg" ] ) generator.generatePackageFiles(core)
apache-2.0
Python
e26be1cdee6b40896e7ee5c2a894fba05fc58480
Add traceview directory.
danriti/python-traceview
traceview/__init__.py
traceview/__init__.py
# -*- coding: utf-8 -*- """ TraceView API library :copyright: (c) 2014 by Daniel Riti. :license: MIT, see LICENSE for more details. """ __title__ = 'traceview' __version__ = '0.1.0' __author__ = 'Daniel Riti' __license__ = 'MIT'
mit
Python
b28d2933ac1b5c6375f9dd5142f467a06bd69463
add a simple plot script to visualize the distribution
BerlinUnited/NaoTH,BerlinUnited/NaoTH,BerlinUnited/NaoTH,BerlinUnited/NaoTH,BerlinUnited/NaoTH,BerlinUnited/NaoTH,BerlinUnited/NaoTH
Utils/py/BallDetection/Evaluation/plot_csv.py
Utils/py/BallDetection/Evaluation/plot_csv.py
import matplotlib.pyplot as plt import sys import numpy as np scores = np.genfromtxt(sys.argv[1], usecols=(1), skip_header=1, delimiter=",") scores = np.sort(scores) plt.style.use('seaborn') plt.plot(scores) plt.show()
apache-2.0
Python
6d910181758008d05de3917fdac5b35b34188a8e
add RebootNodeWithPCU call. fails gracefully if dependencies are not met.
dreibh/planetlab-lxc-plcapi,dreibh/planetlab-lxc-plcapi,dreibh/planetlab-lxc-plcapi,dreibh/planetlab-lxc-plcapi
PLC/Methods/RebootNodeWithPCU.py
PLC/Methods/RebootNodeWithPCU.py
import socket from PLC.Faults import * from PLC.Method import Method from PLC.Parameter import Parameter, Mixed from PLC.Nodes import Node, Nodes from PLC.NodeNetworks import NodeNetwork, NodeNetworks from PLC.Auth import Auth from PLC.POD import udp_pod try: from pcucontrol import reboot external_dependency = True except: external_dependency = False class RebootNodeWithPCU(Method): """ Uses the associated PCU to attempt to reboot the given Node. Admins can reboot any node. Techs and PIs can only reboot nodes at their site. Returns 1 if the reboot proceeded without error (Note: this does not guarantee that the reboot is successful). Returns -1 if external dependencies for this call are not available. Returns "error string" if the reboot failed with a specific message. """ roles = ['admin', 'pi', 'tech'] accepts = [ Auth(), Mixed(Node.fields['node_id'], Node.fields['hostname']) ] returns = Parameter(int, '1 if successful') def call(self, auth, node_id_or_hostname): # Get account information nodes = Nodes(self.api, [node_id_or_hostname]) if not nodes: raise PLCInvalidArgument, "No such node" node = nodes[0] # Authenticated function assert self.caller is not None # If we are not an admin, make sure that the caller is a # member of the site at which the node is located. if 'admin' not in self.caller['roles']: if node['site_id'] not in self.caller['site_ids']: raise PLCPermissionDenied, "Not allowed to reboot nodes from specified site" # Verify that the node has pcus associated with it. pcus = PCUs(self.api, {'pcu_id' : node['pcu_ids']} ) if not pcus: raise PLCInvalidArgument, "No PCUs associated with Node" pcu = pcus[0] if not external_dependency: raise PLCNotImplemented, "Could not load external module to attempt reboot" # model, hostname, port, # i = pcu['node_ids'].index(node['node_id']) # p = pcu['ports'][i] ret = reboot.reboot_api(node, pcu) self.event_objects = {'Node': [node['node_id']]} self.message = "RebootNodeWithPCU called" return ret
bsd-3-clause
Python
f75e1397735adcbd39dbc90a0446b9efd9532be4
add initial python script to handle button events that trigger the node process
FrankZZ/awesome-selfie-machine,FrankZZ/awesome-selfie-machine,FrankZZ/awesome-selfie-machine
bin/selfie.py
bin/selfie.py
#!/usr/bin/python import RPi.GPIO as GPIO import time from subprocess import call GPIO.setmode(GPIO.BCM) BUTTON = 18; GPIO.setup(BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP) while True: input_state = GPIO.input(BUTTON) if input_state == False: print('Button Pressed') call(["node", "./index.js"]) time.sleep(1)
isc
Python
fb83969c6467e288ff16661aec2eafc174bdf124
correct fieldsight form issue fix
awemulya/fieldsight-kobocat,awemulya/fieldsight-kobocat,awemulya/fieldsight-kobocat,awemulya/fieldsight-kobocat
onadata/apps/fsforms/management/commands/set_correct_fxf_in_finstance.py
onadata/apps/fsforms/management/commands/set_correct_fxf_in_finstance.py
from django.db import transaction from django.core.management.base import BaseCommand from onadata.apps.fieldsight.models import Site from onadata.apps.fsforms.models import FieldSightXF, FInstance from onadata.apps.viewer.models.parsed_instance import update_mongo_instance class Command(BaseCommand): help = 'Deploy Stages' def handle(self, *args, **options): organization_id = 13 # project_id = 30 sites = Site.objects.filter(project__organization__id=organization_id).values_list('id', flat=True) for site_id in sites: # self.stdout.write('Operating in site '+str(site_id)) with transaction.atomic(): finstances = FInstance.objects.filter(site_id=site_id, site_fxf_id__isnull=False) for fi in finstances: site_fsxf = fi.site_fxf if site_fsxf.site.id != site_id: correct_form = FieldSightXF.objects.get(site__id=site_id, is_staged=True, fsform=fi.project_fxf) fi.site_fxf = correct_form fi.save() parsed_instance = fi.instance.parsed_instance d = parsed_instance.to_dict_for_mongo() d.update({'fs_uuid': correct_form.id}) update_mongo_instance(d) self.stdout.write('Successfully corrected form')
bsd-2-clause
Python
0f76875400ea1a03a23a4b266eb0ca9bf574922d
implement 9 (9) 各行を2コラム目,1コラム目の優先順位で辞書の逆順ソートしたもの(注意: 各行の内容は変更せずに並び替えよ).確認にはsortコマンドを用いよ(この問題は結果が合わなくてもよい).
mihyaeru21/nlp100
set01/09.py
set01/09.py
# -*- coding: utf-8 -*- # (9) 各行を2コラム目,1コラム目の優先順位で辞書の逆順ソートしたもの(注意: 各行の内容は変更せずに並び替えよ).確認にはsortコマンドを用いよ(この問題は結果が合わなくてもよい). import sys lines = [line.decode('utf-8').rstrip(u'\r\n') for line in sys.stdin.readlines()] lines = sorted(lines, key = lambda l: l.split(u'\t')[0]) lines = sorted(lines, key = lambda l: l.split(u'\t')[1]) for line in lines: print line.encode('utf-8')
unlicense
Python
d654bf0fb0c5e3fc7a11029a216c109b5f04d37b
Add __init__ file
OpenSourcePolicyCenter/taxdata
taxdata/cps/__init__.py
taxdata/cps/__init__.py
# flake8: noqa from taxdata.cps import benefits from taxdata.cps import cps_meta from taxdata.cps import cpsmar from taxdata.cps.create import create from taxdata.cps.finalprep import finalprep from taxdata.cps import helpers from taxdata.cps import impute from taxdata.cps import pycps from taxdata.cps import splitincome from taxdata.cps import targeting from taxdata.cps import taxunit from taxdata.cps import validation from taxdata.cps import constants
mit
Python
1fddb845ad99bb65aa7b86155d899043a64ebdcf
Update app/views/main/views.py
apipanda/openssl,apipanda/openssl,apipanda/openssl,apipanda/openssl
app/views/main/views.py
app/views/main/views.py
from flask import current_app as app from flask import flash from flask import redirect from flask import render_template from flask import url_for from flask_login import current_user from flask_login import login_required from . import main from .forms import SearchForm from ..api.utils import _search @main.route('/', methods=['GET', 'POST']) def index(): return render_template('index.html') @main.route('/dashboard', methods=['GET']) @login_required def dashboard(): bio = app.db.bios.count() payroll = app.db.payrolls.count() work = app.db.work_histories.count() context = { 'counter': { 'Bio': bio, 'Payrolls': payroll, 'Work Histories': work, 'Mortgages': 0, 'Rents': 0, 'Utilities': 0, 'Loans': 0, 'Education Histories': 0 }, 'total_records': bio + payroll + work } context.update(labels=list(context['counter'].keys()), values=list(context['counter'].values())) return render_template('main/dashboard.html', **context) @main.route('/search', methods=['GET', 'POST']) @login_required def search(): context = {} form = SearchForm() if form.validate_on_submit(): bvn = form.bvn.data context.update(bvn=form.bvn.data) result = _search(bvn, app) if result.get('status') == 'error': flash(result.get('message'), 'error') context.update(enrollee=result) else: for error in form.errors.values(): if isinstance(error, list): for e in error: flash(e, 'error') else: flash(error, 'error') return render_template('search/results.html', **context)
mit
Python
938a9548b6503136b82fd248258df5f4e0523f8a
add sorting_algorithms.py
Nethermaker/school-projects
adv/sorting_algorithms.py
adv/sorting_algorithms.py
# Sorting Algorithms import random import time my_list = range(10000) random.shuffle(my_list) #print sorted(my_list) #We have a way to sort information. # But how did it do that? ################################################################### # What does "efficiency" mean in terms of a program? # 1. Running time. Does it take a really long time to run? # 2. Resources. (Memory, Power) # 3. Lines of code # 4. Manpower def is_sorted(lst): if len(lst) <= 1: return True else: return lst[0] <= lst[1] and is_sorted(lst[1:]) def stupid_sort(lst): while not is_sorted(lst): random.shuffle(lst) return lst def dumb_sort(lst): number_list= [None] * 10000000 for number in lst: number_list[number] = number sorted_list = [] for thing in number_list: if thing: sorted_list.append(thing) return sorted_list def insertion_sort(lst): new_list = [lst[0]] for element in lst[1:]: for index, new_element in enumerate(new_list): if element <= new_element: new_list.insert(index, element) found = True break else: new_list.append(element) return new_list def selection_sort(lst): new_list = [] length = len(lst) while len(new_list) != length: element = min(lst) lst.remove(element) new_list.append(element) return new_list def merge(left, right): new_list = [] while len(left) > 0 and len(right) > 0: if left[0] <= right[0]: new_list.append(left.pop(0)) else: new_list.append(right.pop(0)) return new_list + left + right def merge_sort(lst): if len(lst) <= 1: return lst else: middle = len(lst) / 2 return merge(merge_sort(lst[:middle]), merge_sort(lst[middle:])) start = time.time() answer = merge_sort(my_list) end = time.time() print 'It took {} seconds!'.format(end-start)
mit
Python
bb7031385af7931f9e12a8987375f929bcfb6b5a
Create script that checks for dev and docs dependencies.
justacec/bokeh,schoolie/bokeh,aiguofer/bokeh,ChinaQuants/bokeh,lukebarnard1/bokeh,roxyboy/bokeh,jakirkham/bokeh,khkaminska/bokeh,srinathv/bokeh,msarahan/bokeh,Karel-van-de-Plassche/bokeh,CrazyGuo/bokeh,rothnic/bokeh,clairetang6/bokeh,quasiben/bokeh,birdsarah/bokeh,azjps/bokeh,mindriot101/bokeh,khkaminska/bokeh,timothydmorton/bokeh,evidation-health/bokeh,msarahan/bokeh,paultcochrane/bokeh,ericmjl/bokeh,ChinaQuants/bokeh,bokeh/bokeh,laurent-george/bokeh,ericdill/bokeh,timsnyder/bokeh,satishgoda/bokeh,ericmjl/bokeh,philippjfr/bokeh,CrazyGuo/bokeh,timsnyder/bokeh,tacaswell/bokeh,abele/bokeh,rhiever/bokeh,percyfal/bokeh,tacaswell/bokeh,stonebig/bokeh,aiguofer/bokeh,daodaoliang/bokeh,dennisobrien/bokeh,muku42/bokeh,awanke/bokeh,clairetang6/bokeh,xguse/bokeh,ahmadia/bokeh,evidation-health/bokeh,mindriot101/bokeh,percyfal/bokeh,CrazyGuo/bokeh,aiguofer/bokeh,laurent-george/bokeh,stuart-knock/bokeh,ericdill/bokeh,maxalbert/bokeh,saifrahmed/bokeh,DuCorey/bokeh,draperjames/bokeh,gpfreitas/bokeh,msarahan/bokeh,jplourenco/bokeh,xguse/bokeh,draperjames/bokeh,bsipocz/bokeh,tacaswell/bokeh,lukebarnard1/bokeh,maxalbert/bokeh,PythonCharmers/bokeh,dennisobrien/bokeh,birdsarah/bokeh,birdsarah/bokeh,azjps/bokeh,ptitjano/bokeh,jplourenco/bokeh,jplourenco/bokeh,philippjfr/bokeh,josherick/bokeh,canavandl/bokeh,ChristosChristofidis/bokeh,eteq/bokeh,azjps/bokeh,justacec/bokeh,ahmadia/bokeh,phobson/bokeh,quasiben/bokeh,matbra/bokeh,ericdill/bokeh,timothydmorton/bokeh,quasiben/bokeh,justacec/bokeh,rhiever/bokeh,daodaoliang/bokeh,eteq/bokeh,mutirri/bokeh,rs2/bokeh,clairetang6/bokeh,ahmadia/bokeh,timsnyder/bokeh,ptitjano/bokeh,awanke/bokeh,KasperPRasmussen/bokeh,htygithub/bokeh,ahmadia/bokeh,maxalbert/bokeh,mindriot101/bokeh,phobson/bokeh,Karel-van-de-Plassche/bokeh,carlvlewis/bokeh,alan-unravel/bokeh,stonebig/bokeh,carlvlewis/bokeh,jakirkham/bokeh,ptitjano/bokeh,philippjfr/bokeh,ericdill/bokeh,laurent-george/bokeh,daodaoliang/bokeh,philippjfr/bokeh,draperjames/bokeh,PythonCharmers/bokeh,ChristosChristofidis/bokeh,rhiever/bokeh,deeplook/bokeh,satishgoda/bokeh,KasperPRasmussen/bokeh,bokeh/bokeh,jakirkham/bokeh,timsnyder/bokeh,matbra/bokeh,abele/bokeh,caseyclements/bokeh,ericmjl/bokeh,mutirri/bokeh,jplourenco/bokeh,rothnic/bokeh,khkaminska/bokeh,matbra/bokeh,ChinaQuants/bokeh,percyfal/bokeh,roxyboy/bokeh,rhiever/bokeh,lukebarnard1/bokeh,matbra/bokeh,KasperPRasmussen/bokeh,canavandl/bokeh,bsipocz/bokeh,josherick/bokeh,aavanian/bokeh,PythonCharmers/bokeh,laurent-george/bokeh,azjps/bokeh,akloster/bokeh,mindriot101/bokeh,tacaswell/bokeh,KasperPRasmussen/bokeh,akloster/bokeh,timothydmorton/bokeh,stuart-knock/bokeh,KasperPRasmussen/bokeh,canavandl/bokeh,muku42/bokeh,jakirkham/bokeh,gpfreitas/bokeh,srinathv/bokeh,dennisobrien/bokeh,satishgoda/bokeh,rs2/bokeh,paultcochrane/bokeh,dennisobrien/bokeh,draperjames/bokeh,Karel-van-de-Plassche/bokeh,htygithub/bokeh,caseyclements/bokeh,bokeh/bokeh,josherick/bokeh,saifrahmed/bokeh,khkaminska/bokeh,aiguofer/bokeh,aiguofer/bokeh,maxalbert/bokeh,Karel-van-de-Plassche/bokeh,srinathv/bokeh,deeplook/bokeh,deeplook/bokeh,satishgoda/bokeh,mutirri/bokeh,evidation-health/bokeh,rs2/bokeh,aavanian/bokeh,bsipocz/bokeh,lukebarnard1/bokeh,DuCorey/bokeh,deeplook/bokeh,eteq/bokeh,alan-unravel/bokeh,phobson/bokeh,gpfreitas/bokeh,saifrahmed/bokeh,DuCorey/bokeh,alan-unravel/bokeh,Karel-van-de-Plassche/bokeh,rothnic/bokeh,evidation-health/bokeh,ChristosChristofidis/bokeh,paultcochrane/bokeh,ptitjano/bokeh,bsipocz/bokeh,awanke/bokeh,roxyboy/bokeh,CrazyGuo/bokeh,azjps/bokeh,josherick/bokeh,stonebig/bokeh,carlvlewis/bokeh,xguse/bokeh,percyfal/bokeh,stuart-knock/bokeh,bokeh/bokeh,stuart-knock/bokeh,caseyclements/bokeh,ericmjl/bokeh,ptitjano/bokeh,gpfreitas/bokeh,DuCorey/bokeh,daodaoliang/bokeh,schoolie/bokeh,dennisobrien/bokeh,schoolie/bokeh,birdsarah/bokeh,ChinaQuants/bokeh,bokeh/bokeh,abele/bokeh,stonebig/bokeh,htygithub/bokeh,rothnic/bokeh,alan-unravel/bokeh,percyfal/bokeh,aavanian/bokeh,awanke/bokeh,abele/bokeh,paultcochrane/bokeh,ericmjl/bokeh,schoolie/bokeh,PythonCharmers/bokeh,msarahan/bokeh,phobson/bokeh,phobson/bokeh,draperjames/bokeh,muku42/bokeh,philippjfr/bokeh,canavandl/bokeh,schoolie/bokeh,muku42/bokeh,akloster/bokeh,ChristosChristofidis/bokeh,htygithub/bokeh,jakirkham/bokeh,srinathv/bokeh,DuCorey/bokeh,akloster/bokeh,xguse/bokeh,roxyboy/bokeh,rs2/bokeh,clairetang6/bokeh,carlvlewis/bokeh,saifrahmed/bokeh,aavanian/bokeh,justacec/bokeh,rs2/bokeh,mutirri/bokeh,aavanian/bokeh,caseyclements/bokeh,timsnyder/bokeh,timothydmorton/bokeh,eteq/bokeh
scripts/devdeps.py
scripts/devdeps.py
from __future__ import print_function import sys try: import colorama def blue(text): return "%s%s%s" % (colorama.Fore.BLUE, text, colorama.Style.RESET_ALL) def red(text): return "%s%s%s" % (colorama.Fore.RED, text, colorama.Style.RESET_ALL) except ImportError: def blue(text) : return text def red(text) : return text def depend_check(deps_name, *args): """Check for missing dependencies """ found = True missing = [] for dependency in args: try: __import__(dependency) except ImportError as e: missing.append(dependency) found = False print('-'*80) if not found: print(red("You are missing the following %s dependencies:") % deps_name) for dep in missing: name = pkg_info_dict.get(dep, dep) print(" * ", name) print() return False else: print(blue("All %s dependencies installed! You are good to go!\n") % deps_name) return True if __name__ == '__main__': #Dictionary maps module names to package names pkg_info_dict = {'bs4' : 'beautiful-soup', 'websocket' : 'websocket-client', 'sphinx_bootstrap_theme' : 'sphinx-bootstrap-theme', 'sphinxcontrib.httpdomain' : 'sphinxcontrib-httpdomain', 'pdiffer' : 'pdiff' } dev_deps = ['bs4', 'colorama', 'pdiffer', 'boto', 'nose', 'mock', 'coverage', 'websocket'] depend_check('Dev', *dev_deps) docs_deps = ['graphviz', 'sphinx', 'pygments', 'sphinx_bootstrap_theme', 'sphinxcontrib.httpdomain'] depend_check('Docs', *docs_deps)
bsd-3-clause
Python
a23e08275652f7356863edada51e7dee345a2dfc
Add functools from Python trunk r65615
python-mechanize/mechanize,python-mechanize/mechanize
test-tools/functools.py
test-tools/functools.py
"""functools.py - Tools for working with functions and callable objects """ # Python module wrapper for _functools C module # to allow utilities written in Python to be added # to the functools module. # Written by Nick Coghlan <ncoghlan at gmail.com> # Copyright (C) 2006 Python Software Foundation. # See C source code for _functools credits/copyright from _functools import partial, reduce # update_wrapper() and wraps() are tools to help write # wrapper functions that can handle naive introspection WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__') WRAPPER_UPDATES = ('__dict__',) def update_wrapper(wrapper, wrapped, assigned = WRAPPER_ASSIGNMENTS, updated = WRAPPER_UPDATES): """Update a wrapper function to look like the wrapped function wrapper is the function to be updated wrapped is the original function assigned is a tuple naming the attributes assigned directly from the wrapped function to the wrapper function (defaults to functools.WRAPPER_ASSIGNMENTS) updated is a tuple naming the attributes of the wrapper that are updated with the corresponding attribute from the wrapped function (defaults to functools.WRAPPER_UPDATES) """ for attr in assigned: setattr(wrapper, attr, getattr(wrapped, attr)) for attr in updated: getattr(wrapper, attr).update(getattr(wrapped, attr, {})) # Return the wrapper so this can be used as a decorator via partial() return wrapper def wraps(wrapped, assigned = WRAPPER_ASSIGNMENTS, updated = WRAPPER_UPDATES): """Decorator factory to apply update_wrapper() to a wrapper function Returns a decorator that invokes update_wrapper() with the decorated function as the wrapper argument and the arguments to wraps() as the remaining arguments. Default arguments are as for update_wrapper(). This is a convenience function to simplify applying partial() to update_wrapper(). """ return partial(update_wrapper, wrapped=wrapped, assigned=assigned, updated=updated)
bsd-3-clause
Python
d3a652111aa7df0a5ecc429db6aa639f9a667ff9
Create imogen.py
mduckles/CodeClub
imogen.py
imogen.py
mit
Python
ca098b540b171460f41ea66c01d2b0d039feb073
Add arrange combination algorithm
tobegit3hub/tobe-algorithm-manual
arrange_combination/arrange.py
arrange_combination/arrange.py
#!/usr/bin/env python def range(input_list, step): if step == 3: print(input_list) return for i in range(step, len(input_list)): input_list[step], input_list[i] = input_list[i], input_list[step] range(input_list, step+1) input_list[step], input_list[i] = input_list[i], input_list[step] def main(): import ipdb;ipdb.set_trace() input_list = ["a", "b", "c"] range(input_list, 0) if __name__ == "__main__": main()
apache-2.0
Python
f0da1774514c839b4b97fa92d2202437932dc99a
Add a small driver for plotting skeletons.
lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment
analysis/plot-skeleton.py
analysis/plot-skeleton.py
#!/usr/bin/env python import climate import database import plots @climate.annotate( root='plot data rooted at this path', pattern=('plot data from files matching this pattern', 'option'), ) def main(root, pattern='*/*block02/*trial00*.csv.gz'): with plots.space() as ax: for trial in database.Experiment(root).trials_matching(pattern): plots.skeleton(ax, trial, 100) break if __name__ == '__main__': climate.call(main)
mit
Python
060c8a4379aef14459929a47bf62a80a3e7eef67
Create af_setJoints.py
aaronfang/personal_scripts
af_scripts/tmp/af_setJoints.py
af_scripts/tmp/af_setJoints.py
import pymel.core as pm curSel = pm.ls(sl=True,type='transform')[0] bBox = pm.xform(curSel,ws=1,q=1,bb=1) sizeX = abs(bBox[0]-bBox[3]) sizeY = abs(bBox[1]-bBox[4]) sizeZ = abs(bBox[2]-bBox[5]) curPvt = [(bBox[0]+sizeX/2),(bBox[1]+sizeY/2),(bBox[2]+sizeZ/2)] ccUD = pm.circle(n='circle_rotUpDown',r=sizeY/2,nr=(1,0,0)) pm.move(ccUD[0],curPvt) ccLR = pm.circle(n='circle_rotLeftRight',r=sizeX/2,nr=(0,1,0)) pm.move(ccLR[0],curPvt) pm.select(d=1) pm.jointDisplayScale(0.1) pm.joint(p=(0,bBox[1],bBox[2]),n='joint_base') pm.joint(p=(pm.xform(ccUD,ws=1,q=1,rp=1)),n='joint_rotUpDown') pm.joint(p=(pm.xform(ccLR,ws=1,q=1,rp=1)),n='joint_rotLeftRight')
mit
Python
bde8b61f419dd6e66a85cc92f3661de6aaadeb94
ADD CHECK FOR YELLING
amperser/proselint,amperser/proselint,jstewmon/proselint,amperser/proselint,jstewmon/proselint,amperser/proselint,amperser/proselint,jstewmon/proselint
proselint/checks/misc/yelling.py
proselint/checks/misc/yelling.py
# -*- coding: utf-8 -*- """EES: Too much yelling.. --- layout: post error_code: SCH source: ??? source_url: ??? title: yelling date: 2014-06-10 12:31:19 categories: writing --- Too much yelling. """ from proselint.tools import blacklist err = "MAU103" msg = u"Too much yelling." check = blacklist(["[A-Z]+ [A-Z]+ [A-Z]+"], err, msg, ignore_case=False)
bsd-3-clause
Python
26e7e7b270bfd5e08cf871f7d89b5a92b07df230
add migration file
adandan01/contmon,adandan01/contmon,adandan01/contmon,adandan01/contmon
contmon/scraper/migrations/0001_initial.py
contmon/scraper/migrations/0001_initial.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone import model_utils.fields class Migration(migrations.Migration): replaces = [('scraper', '0001_initial'), ('scraper', '0002_auto_20150706_2105'), ('scraper', '0003_auto_20150706_2108'), ('scraper', '0004_auto_20150706_2110'), ('scraper', '0005_auto_20150706_2116')] dependencies = [ ] operations = [ migrations.CreateModel( name='WebsiteScraperConfig', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)), ('domain', models.CharField(max_length=400, db_index=True)), ('selector_style', models.CharField(blank=True, max_length=100, choices=[(b'css', b'css'), (b'xpath', b'xpath')])), ('name_selector', models.CharField(max_length=100, blank=True)), ('image_selector', models.CharField(max_length=100, blank=True)), ('content_selector', models.CharField(max_length=100)), ('next_page_selector', models.CharField(max_length=100, blank=True)), ('tabs_selector', models.CharField(max_length=100, blank=True)), ], options={ 'abstract': False, }, ), ]
bsd-3-clause
Python
7fb2b02c7c08912f54ef3cc0f22c53daa34ec639
Add accelerometer and crash analysis
bskari/sparkfun-avc,bskari/sparkfun-avc,bskari/sparkfun-avc,bskari/sparkfun-avc,bskari/sparkfun-avc,bskari/sparkfun-avc
analysis/plot_accelerometer.py
analysis/plot_accelerometer.py
"""Plots the accelerometer readings for x, y, and z.""" from dateutil import parser as dateparser from matplotlib import pyplot import json import sys def main(): if sys.version_info.major <= 2: print('Please use Python 3') sys.exit(1) if len(sys.argv) != 2: print('Usage: plot_accelerometer.py <log file>') sys.exit(1) with open(sys.argv[1]) as file_: lines = file_.readlines() first_stamp = timestamp(lines[0]) acceleration_g_x = [] acceleration_g_y = [] acceleration_g_z = [] acceleration_times = [] not_moving_times = [] run_times = [] stop_times = [] for line in lines: if 'acceleration_g_x' in line: data = json.loads(line[line.find('{'):]) acceleration_g_x.append(data['acceleration_g_x']) acceleration_g_y.append(data['acceleration_g_y']) acceleration_g_z.append(data['acceleration_g_z']) acceleration_times.append(timestamp(line) - first_stamp) elif 'not moving according' in line: not_moving_times.append(timestamp(line) - first_stamp) elif 'Received run command' in line: run_times.append(timestamp(line) - first_stamp) elif 'Received stop command' in line or 'No waypoints, stopping' in line: stop_times.append(timestamp(line) - first_stamp) pyplot.plot(acceleration_times, acceleration_g_x) pyplot.scatter(not_moving_times, [0.25] * len(not_moving_times), color='blue') pyplot.scatter(run_times, [0.3] * len(run_times), color='green') pyplot.scatter(stop_times, [0.35] * len(stop_times), color='red') pyplot.draw() pyplot.show() pyplot.plot(acceleration_times, acceleration_g_y) pyplot.scatter(not_moving_times, [-0.25] * len(not_moving_times), color='blue') pyplot.scatter(run_times, [-0.3] * len(run_times), color='green') pyplot.scatter(stop_times, [-0.35] * len(stop_times), color='red') pyplot.draw() pyplot.show() pyplot.plot(acceleration_times, acceleration_g_z) pyplot.scatter(not_moving_times, [-0.75] * len(not_moving_times), color='blue') pyplot.scatter(run_times, [-0.7] * len(run_times), color='green') pyplot.scatter(stop_times, [-0.65] * len(stop_times), color='red') pyplot.draw() pyplot.show() pyplot.plot(acceleration_times, acceleration_g_x) pyplot.plot(acceleration_times, [i + 0.05 for i in acceleration_g_y]) pyplot.plot(acceleration_times, [i - 0.93 for i in acceleration_g_z]) pyplot.scatter(not_moving_times, [0.25] * len(not_moving_times), color='blue') pyplot.scatter(run_times, [0.3] * len(run_times), color='green') pyplot.scatter(stop_times, [0.35] * len(stop_times), color='red') pyplot.draw() pyplot.show() def timestamp(line): """Returns the timestamp of a log line.""" dt = dateparser.parse(line[:line.find(',')]) comma = line.find(',') millis = float(line[comma + 1:line.find(':', comma)]) return dt.timestamp() + millis / 1000. if __name__ == '__main__': main()
mit
Python
1b0c33c01b179831edc29b0b13a3f60e96b54321
Create joyent.py
ahelal/ansible-joyent-inventory
joyent.py
joyent.py
#!/usr/bin/env python import os import sys import cPickle as pickle from datetime import datetime from smartdc import DataCenter try: import json except ImportError: import simplejson as json debug = False CACHE_EXPIRATION_IN_SECONDS = 300 SERVER_FILENAME = "joyent_server_cache.txt" ## PATH_TO_FILE = os.getenv('HELPER') joyent_key_id = "/" + os.environ['JOYENT_USERNAME'] + "/keys/" + os.environ['JOYENT_KEYNAME'] joyent_secret = os.environ['HOME'] + "/.ssh/id_rsa" joyent_api = ['JOYENT_API_URL'] joyent_location = "eu-ams-1.api.joyentcloud.com" if PATH_TO_FILE and os.path.isdir(PATH_TO_FILE) : SERVER_FILENAME = PATH_TO_FILE + "/" + SERVER_FILENAME if debug: print SERVER_FILENAME def getInventory(): servers = getServers() inventory = {} for server in servers: group = server.type if group is None: group = 'ungrouped' if not group in inventory: inventory[group] = [] inventory[group].append(server.name) return inventory def getHost(hostname): servers = getServers() allhosts = {} for server in servers: ## How to connect if server.public_ips: ssh_connection = server.public_ips[0] elif server.private_ips: ssh_connection = server.private_ips[0] else: ssh_connection = server.name allhosts[server.name] = { "joyent_id": server.id, "joyent_public_ip": server.public_ips, "joyent_private_ip": server.private_ips, "ansible_ssh_host": ssh_connection } ##SmartOS python if server.type == "smartmachine": allhosts[server.name]["ansible_python_interpreter"] = "/opt/local/bin/python" allhosts[server.name]["ansible_ssh_user"] = "root" return allhosts.get(hostname) def getServers(): if not os.path.isfile(SERVER_FILENAME): return retrieveServerList() stats = os.stat(SERVER_FILENAME) modification_time = stats.st_mtime seconds_since_last_modified = (datetime.now() - datetime.fromtimestamp(modification_time)).total_seconds() if debug: print seconds_since_last_modified if seconds_since_last_modified < CACHE_EXPIRATION_IN_SECONDS: if debug: print "retireving servers from cache..." return fetchServersFromCache() else: return retrieveServerList() def retrieveServerList(): """ Check cache period either read from cache or call api """ if debug: print "retireving servers from the API..." sdc = DataCenter(location=joyent_location, key_id=joyent_key_id, secret=joyent_secret, verbose=debug) servers = sdc.machines() storeServersToCache(servers) return servers class MyServer(object): def __init__(self, name, type, public_ips, private_ips, id): self.name = name self.type = type self.id = id self.private_ips = private_ips self.public_ips = public_ips def fetchServersFromCache(): return pickle.load(open(SERVER_FILENAME, "rb")) def storeServersToCache(servers): myservers = [MyServer(server.name, server.type, server.public_ips, server.private_ips, server.id) for server in servers] pickle.dump(myservers, open(SERVER_FILENAME, "wb")) if __name__ == '__main__': if debug: print "using id_rsa" + joyent_secret + " with '" + joyent_key_id + "'" if len(sys.argv) == 2 and (sys.argv[1] == '--list'): print json.dumps(getInventory(), indent=4) elif len(sys.argv) == 3 and (sys.argv[1] == '--host'): print json.dumps(getHost(sys.argv[2]), indent=4) else: print "Usage: %s --list or --host <hostname>" % sys.argv[0] sys.exit(1)
apache-2.0
Python
872dd45173e889db06e9b16105492c241f7badae
Add an example for dynamic RPC lookup.
claws/aiozmq,aio-libs/aiozmq,asteven/aiozmq,MetaMemoryT/aiozmq
examples/rpc_dynamic.py
examples/rpc_dynamic.py
import asyncio import aiozmq import aiozmq.rpc class DynamicHandler(aiozmq.rpc.AttrHandler): def __init__(self, namespace=()): self.namespace = namespace def __getitem__(self, key): try: return getattr(self, key) except AttributeError: return DynamicHandler(self.namespace + (key,)) @aiozmq.rpc.method def func(self): return (self.namespace, 'val') @asyncio.coroutine def go(): server = yield from aiozmq.rpc.start_server( DynamicHandler(), bind='tcp://*:*') server_addr = next(iter(server.transport.bindings())) client = yield from aiozmq.rpc.open_client( connect=server_addr) ret = yield from client.rpc.func() assert ((), 'val') == ret, ret ret = yield from client.rpc.a.func() assert (('a',), 'val') == ret, ret ret = yield from client.rpc.a.b.func() assert (('a', 'b'), 'val') == ret, ret server.close() client.close() def main(): asyncio.set_event_loop_policy(aiozmq.ZmqEventLoopPolicy()) asyncio.get_event_loop().run_until_complete(go()) print("DONE") if __name__ == '__main__': main()
bsd-2-clause
Python
e0b1bea00c56657ef9fb4456203a522920375cc2
add testLCMSpy.py script
openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro
software/ddapp/src/python/tests/testLCMSpy.py
software/ddapp/src/python/tests/testLCMSpy.py
from ddapp.consoleapp import ConsoleApp from ddapp import lcmspy from ddapp import lcmUtils from ddapp import simpletimer as st app = ConsoleApp() app.setupGlobals(globals()) if app.getTestingInteractiveEnabled(): app.showPythonConsole() lcmspy.findLCMModulesInSysPath() timer = st.SimpleTimer() stats = {} channelToMsg = {} items = {} def item(r, c): rowDict = items.setdefault(r, {}) try: return rowDict[c] except KeyError: i = QtGui.QTableWidgetItem('') table.setItem(r, c, i) rowDict[c] = i return i def printStats(): print '\n------------------------\n' averages = [(channel, stat.getAverage()) for channel, stat in stats.iteritems()] averages.sort(key=lambda x: x[1]) table.setRowCount(len(averages)) i = 0 for channel, bytesPerSecond in reversed(averages): print channel, '%.3f kbps' % (bytesPerSecond/1024.0) item(i, 0).setText(channel) item(i, 1).setText(channelToMsg[channel]) item(i, 2).setText('%.3f kbps' % (bytesPerSecond/1024.0)) i += 1 def onMessage(messageData, channel): messageData = str(messageData) msgType = lcmspy.getMessageClass(messageData) if not msgType: #print 'failed decode:', channel pass else: name = lcmspy.getMessageTypeFullName(msgType) stat = stats.get(channel) if not stat: stat = st.AverageComputer() stats[channel] = stat stat.update(len(messageData)) if channel not in channelToMsg: channelToMsg[channel] = lcmspy.getMessageTypeFullName(msgType) if msgType else '<unknown msg type>' if timer.elapsed() > 3: printStats() timer.reset() for stat in stats.values(): stat.reset() #msg = lcmspy.decodeMessage(messageData) sub = lcmUtils.addSubscriber(channel='.+', callback=onMessage) sub.setNotifyAllMessagesEnabled(True) from PythonQt import QtGui, QtCore table = QtGui.QTableWidget() table.setColumnCount(3) table.setHorizontalHeaderLabels(['channel', 'type', 'bandwidth']) table.verticalHeader().setVisible(False) table.show() app.start()
bsd-3-clause
Python
cb2cc713c29c20ba239a60b6151c5e5c001c8e0b
Add joinkb.py
Metaleer/hexchat-scripts
joinkb.py
joinkb.py
from __future__ import print_function __module_name__ = 'Join Kickban' __module_version__ = '0.1' __module_description__ = 'Kickbans clients from specified channels on regex match against their nickname on join' __author__ = 'Daniel A. J.' import hexchat import re re = re.compile(r'\bfoo\b') # regex pattern to be matched against in user's nickname check_channels = ['#test', '#fooness'] # channel(s) where script is active net = 'freenode' # network where script is active def join_search(word, word_eol, userdata): channel = word[2] user_nickname = ''.join(word[0][1:word[0].index('!')]) user_host = ''.join(word[0][word[0].index('@'):]) for x in check_channels: if re.search(user_nickname) != None and channel == x and hexchat.get_info("network") == net: hexchat.command("mode %s +b *!*%s" % (channel, user_host)) hexchat.command("kick %s regex pattern detected" % user_nickname) return hexchat.EAT_ALL def unload_joinkb(userdata): print(__module_name__, 'version', __module_version__, 'unloaded.') hexchat.hook_server("JOIN", join_search) hexchat.hook_unload(unload_joinkb) print(__module_name__, 'version', __module_version__, 'loaded.')
mit
Python
f6864179a2dc1c531afc2c3ba6be300006e01fab
Create consecZero.py
NendoTaka/CodeForReference,NendoTaka/CodeForReference,NendoTaka/CodeForReference
Codingame/Python/Clash/consecZero.py
Codingame/Python/Clash/consecZero.py
import sys import math # Auto-generated code below aims at helping you parse # the standard input according to the problem statement. n = input() # Write an action using print # To debug: print("Debug messages...", file=sys.stderr) c = 0 t = 0 for x in n: if x == '0': t += 1 else: if t > c: c = t t = 0 if t > c: c = t print(c)
mit
Python
6a4fb74befd22c2bc814dbe51a1fa884a077be9d
Create django_audit_snippets.py
stephenbradshaw/pentesting_stuff,stephenbradshaw/pentesting_stuff,stephenbradshaw/pentesting_stuff,stephenbradshaw/pentesting_stuff
example_code/django_audit_snippets.py
example_code/django_audit_snippets.py
from django.conf import settings from urls import urlpatterns ''' Access shell via ./manage.py shell (or shell_plus if you have django-extensions) Dont forget you may need to set environment variables: - DJANGO_SETTINGS_MODULE to the settings file (python module load syntax like settings.filename) and - PYTHONPATH to include the path where the Django code sits Install ipython and django-extensions to get a better shell (shell_plus) pip install django-extensions This also has show_urls command which will do something similar to get_urls_friendly below urls will not contain urlpatterns in later django releases ''' # all the configured apps settings are now in here settings # this prints out mapped urls and associated views def get_urls_friendly(raw_urls, nice_urls=[], urlbase=''): '''Recursively builds a list of all the urls in the current project and the name of their associated view''' for entry in raw_urls: fullurl = (urlbase + entry.regex.pattern).replace('^','') if entry.callback: viewname = entry.callback.func_name nice_urls.append('%s - %s' %(fullurl, viewname)) else: get_urls_friendly(entry.url_patterns, nice_urls, fullurl) nice_urls = sorted(list(set(nice_urls))) return nice_urls
bsd-3-clause
Python
73819cea7150e15212a014f9c3a42a69d0351ab8
Create cutrope.py
vikramraman/algorithms,vikramraman/algorithms
cutrope.py
cutrope.py
# Author: Vikram Raman # Date: 08-15-2015 import time # Given a rope with length n, how to cut the rope into m parts with length n[0], n[1], ..., n[m-1], # in order to get the maximal product of n[0]*n[1]* ... *n[m-1]? # We have to cut once at least. Additionally, the length of the whole length of the rope, # as well as the length of each part, are in integer value. # For example, if the length of the rope is 8, # the maximal product of the part lengths is 18. # In order to get the maximal product, # the rope is cut into three parts with lengths 2, 3, and 3 respectively. # immediate thoughts: this is a dynamic programming knapsack kind of problem def cutrope(l): d = [0, 1] for i in range(2, l+1): maxVal = 0 for j in range(1, i): maxVal = max(j * d[i-j], j * (i-j), maxVal) d.append(maxVal) print d l = 8 start_time = time.clock() cutrope(l) print("--- %s seconds ---" % (time.clock() - start_time))
mit
Python
6b95af9822b9d94793eef503609b48d83066f594
add test that causes KeyError for disabled text
WojciechMula/canvas2svg
test/test-text-diabled.py
test/test-text-diabled.py
from framework import * root.title("Disabled text") canv.create_text(200, 200, text = "Test disabled text", font = ("Times", 20), state = DISABLED ) thread.start_new_thread(test, (canv, __file__, True)) root.mainloop()
bsd-3-clause
Python
06efe8a8be913fb63f27016268d86f1ad0a5bcdf
Add test_engine_seed.py
probcomp/cgpm,probcomp/cgpm
tests/test_engine_seed.py
tests/test_engine_seed.py
# -*- coding: utf-8 -*- # Copyright (c) 2015-2016 MIT Probabilistic Computing Project # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from cgpm.crosscat.engine import Engine from cgpm.utils import general as gu def test_engine_simulate_no_repeat(): """Generate 3 samples from 2 states 10 times, and ensure uniqueness.""" rng = gu.gen_rng(1) engine = Engine(X=[[1]], cctypes=['normal'], num_states=2, rng=rng) samples_list = [ [s[0] for s in engine.simulate(rowid=-i, query=[0], N=3)[0]] for i in xrange(10) ] samples_set = set([frozenset(s) for s in samples_list]) assert len(samples_set) == len(samples_list)
apache-2.0
Python
3de2b08133f6f721a3a30120a93b81be0eacefb6
add tests for the scuba.filecleanup sub-module
JonathonReinhart/scuba,JonathonReinhart/scuba,JonathonReinhart/scuba
tests/test_filecleanup.py
tests/test_filecleanup.py
from __future__ import print_function from nose.tools import * from unittest import TestCase try: from unittest import mock except ImportError: import mock from scuba.filecleanup import FileCleanup def assert_set_equal(a, b): assert_equal(set(a), set(b)) class TestFilecleanup(TestCase): @mock.patch('os.remove') def test_files_tracked(self, os_remove_mock): '''FileCleanup.files works''' fc = FileCleanup() fc.register('foo.txt') fc.register('bar.bin') assert_set_equal(fc.files, ['foo.txt', 'bar.bin']) @mock.patch('os.remove') def test_basic_usage(self, os_remove_mock): '''FileCleanup removes one file''' fc = FileCleanup() fc.register('foo.txt') fc.cleanup() os_remove_mock.assert_any_call('foo.txt') @mock.patch('os.remove') def test_multiple_files(self, os_remove_mock): '''FileCleanup removes multiple files''' fc = FileCleanup() fc.register('foo.txt') fc.register('bar.bin') fc.register('/something/snap.crackle') fc.cleanup() os_remove_mock.assert_any_call('bar.bin') os_remove_mock.assert_any_call('foo.txt') os_remove_mock.assert_any_call('/something/snap.crackle') @mock.patch('os.remove') def test_multiple_files(self, os_remove_mock): '''FileCleanup ignores os.remove() errors''' def os_remove_se(path): if path == 'INVALID': raise OSError('path not found') os_remove_mock.side_effect = os_remove_se fc = FileCleanup() fc.register('foo.txt') fc.register('bar.bin') fc.register('INVALID') fc.cleanup() os_remove_mock.assert_any_call('bar.bin') os_remove_mock.assert_any_call('foo.txt') assert_set_equal(fc.files, [])
mit
Python
d41005d14239a93237fb839084f029208b94539d
Use the custom.js as served from the CDN for try
dietmarw/jupyter-docker-images,iamjakob/docker-demo-images,Zsailer/docker-jupyter-teaching,odewahn/docker-demo-images,jupyter/docker-demo-images,tanyaschlusser/docker-demo-images,iamjakob/docker-demo-images,Zsailer/docker-demo-images,CognitiveScale/docker-demo-images,Zsailer/docker-jupyter-teaching,ericdill/docker-demo-images,willjharmer/docker-demo-images,Zsailer/docker-jupyter-teaching,philipz/docker-demo-images,parente/docker-demo-images,philipz/docker-demo-images,vanceb/docker-demo-images,parente/docker-demo-images,willjharmer/docker-demo-images,modulexcite/docker-demo-images,parente/docker-demo-images,willjharmer/docker-demo-images,vanceb/docker-demo-images,vanceb/docker-demo-images,CognitiveScale/docker-demo-images,Zsailer/docker-demo-images,tanyaschlusser/docker-demo-images,Zsailer/docker-jupyter-teaching,pelucid/docker-demo-images,mjbright/docker-demo-images,modulexcite/docker-demo-images,mjbright/docker-demo-images,CognitiveScale/docker-demo-images,dietmarw/jupyter-docker-images,rgbkrk/docker-demo-images,philipz/docker-demo-images,danielballan/docker-demo-images,rgbkrk/docker-demo-images,rgbkrk/docker-demo-images,dietmarw/jupyter-docker-images,mjbright/docker-demo-images,modulexcite/docker-demo-images,pelucid/docker-demo-images,jupyter/docker-demo-images,iamjakob/docker-demo-images,Zsailer/docker-demo-images,pelucid/docker-demo-images,danielballan/docker-demo-images,jupyter/docker-demo-images,danielballan/docker-demo-images,ericdill/docker-demo-images,ericdill/docker-demo-images,odewahn/docker-demo-images,odewahn/docker-demo-images,CognitiveScale/docker-demo-images,tanyaschlusser/docker-demo-images
common/profile_default/ipython_notebook_config.py
common/profile_default/ipython_notebook_config.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Configuration file for ipython-notebook. c = get_config() c.NotebookApp.ip = '*' c.NotebookApp.open_browser = False c.NotebookApp.port = 8888 # Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded- # For headerssent by the upstream reverse proxy. Necessary if the proxy handles # SSL c.NotebookApp.trust_xheaders = True # Include our extra templates c.NotebookApp.extra_template_paths = ['/srv/templates/'] # Supply overrides for the tornado.web.Application that the IPython notebook # uses. c.NotebookApp.tornado_settings = { 'headers': { 'Content-Security-Policy': "frame-ancestors 'self' https://*.jupyter.org https://jupyter.github.io https://*.tmpnb.org" }, 'static_url_prefix': 'https://cdn.jupyter.org/notebook/try/' }
#!/usr/bin/env python # -*- coding: utf-8 -*- # Configuration file for ipython-notebook. c = get_config() c.NotebookApp.ip = '*' c.NotebookApp.open_browser = False c.NotebookApp.port = 8888 # Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded- # For headerssent by the upstream reverse proxy. Necessary if the proxy handles # SSL c.NotebookApp.trust_xheaders = True # Include our extra templates c.NotebookApp.extra_template_paths = ['/srv/templates/'] # Supply overrides for the tornado.web.Application that the IPython notebook # uses. c.NotebookApp.tornado_settings = { 'headers': { 'Content-Security-Policy': "frame-ancestors 'self' https://*.jupyter.org https://jupyter.github.io https://*.tmpnb.org" }, 'static_url_prefix': 'https://cdn.jupyter.org/notebook/3.1.0/' }
bsd-3-clause
Python
2b380d501b80afad8c7c5ec27537bcc682ed2775
Fix some scope mistakes. This fix was part of the reverted commit.
TiberiumPY/puremine,Armored-Dragon/pymineserver
commands/handle.py
commands/handle.py
import commands.cmds as cmds def handle(self, chat_raw): self.logger.info("Handling command: " + chat_raw + " (for player" + self.fquid + ")") _atmp1 = chat_raw.split(" ") _atmp2 = list(_atmp1[0]) del _atmp2[0] del _atmp1[0] cmdobj = { "base": _atmp2, "args_raw": _atmp1, "scope": self, "chat_raw": chat_raw } cmds.InvalidCommand.begin(self, cmdobj) if _atmp2 not in cmds.baseList else cmds.baseList[_atmp2].begin(self, cmdobj)
import commands.cmds as cmds def handle(self, chat_raw): self.logger.info("Handling command: " + chat_raw + " (for player" + self.fquid + ")") _atmp1 = chat_raw.split(" ") _atmp2 = list(_atmp1[0]) del _atmp2[0] del _atmp1[0] cmdobj = { "base": _atmp2, "args_raw": _atmp1, "scope": self, "chat_raw": chat_raw } commands.cmds.InvalidCommand.begin(self, cmdobj) if _atmp2 not in commands.cmds.baseList else commands.cmds.baseList[_atmp2].begin(self, cmdobj)
mit
Python
b37f31b5adbdda3e5d40d2d8a9dde19b2e305c2c
Add tests for the controller module
conwetlab/ckanext-wirecloud_view,conwetlab/ckanext-wirecloud_view,conwetlab/ckanext-wirecloud_view,conwetlab/ckanext-wirecloud_view
ckanext/wirecloudview/tests/test_controller.py
ckanext/wirecloudview/tests/test_controller.py
# -*- coding: utf-8 -*- # Copyright (c) 2018 Future Internet Consulting and Development Solutions S.L. # This file is part of CKAN WireCloud View Extension. # CKAN WireCloud View Extension is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # CKAN WireCloud View Extension is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with CKAN WireCloud View Extension. If not, see <http://www.gnu.org/licenses/>. # This file is part of CKAN Data Requests Extension. import json import unittest from mock import DEFAULT, patch from ckanext.wirecloudview.controller import WireCloudViewController class WirecloudViewControllerTest(unittest.TestCase): @patch.multiple("ckanext.wirecloudview.controller", request=DEFAULT, get_plugin=DEFAULT, toolkit=DEFAULT, OAuth2Session=DEFAULT, response=DEFAULT) def test_get_workspaces(self, request, get_plugin, toolkit, OAuth2Session, response): self.controller = WireCloudViewController() self.controller.client_id = "aclientid" request.params = { 'incomplete': 'key words', 'limit': '20', } get_plugin().wirecloud_url = "https://dashboards.example.org" oauth = OAuth2Session() OAuth2Session.reset_mock() oauth.get().json.return_value = { "results": [ {"owner": "user1", "name": "dashboard1"}, {"owner": "user2", "name": "other-dashboard"}, ] } oauth.get.reset_mock() response.headers = {} result = self.controller.get_workspaces() self.assertEqual( json.loads(result.decode('utf-8')), { "ResultSet": { "Result": [ {"Name": "user1/dashboard1"}, {"Name": "user2/other-dashboard"}, ] } } ) self.assertEqual(response.headers[b'Content-Type'], b"application/json") OAuth2Session.assert_called_once_with(self.controller.client_id, token=toolkit.c.usertoken) oauth.get.assert_called_once_with("https://dashboards.example.org/api/search?namespace=workspace&q=key+words&maxresults=20")
agpl-3.0
Python
d1d1892551d805b5a73aaef07932c65fd375e342
Add Rules unit test
desihub/desisurvey,desihub/desisurvey
py/desisurvey/test/test_rules.py
py/desisurvey/test/test_rules.py
import unittest import numpy as np import desisurvey.tiles from desisurvey.rules import Rules class TestRules(unittest.TestCase): def setUp(self): pass def test_rules(self): rules = Rules() tiles = desisurvey.tiles.get_tiles() completed = np.ones(tiles.ntiles, bool) rules.apply(completed) completed[:] = False rules.apply(completed) gen = np.random.RandomState(123) for i in range(10): completed[gen.choice(tiles.ntiles, tiles.ntiles // 10, replace=False)] = True rules.apply(completed) def test_suite(): """Allows testing of only this module with the command:: python setup.py test -m <modulename> """ return unittest.defaultTestLoader.loadTestsFromName(__name__)
bsd-3-clause
Python
6350092030d267621d2430d4505c01455d1de2d3
Create Misha_rungaKutta.py
ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms
math/runge-kutta_method/Misha_rungaKutta.py
math/runge-kutta_method/Misha_rungaKutta.py
import matplotlib.pyplot as plt # Python program to implement Runge Kutta method def dydx(x, y): return (18 * x + 1.33 * y) / (1.33 * x + 18 * y) # Finds value of y for a given x using step size h # and initial value y0 at x0. def rungeKutta(x0, y0, x, h, Q1=0.5, Q2=0.5, w1=0.5, w2=0.5, c1=1, c2=2, c3=2, c4=1): # Count number of iterations using step size or step height h yn = [] n = (int)((x - x0) / h) # Iterate for number of iterations. y = y0 for i in range(1, n + 1): "Apply Runge Kutta Formulas to find next value of y" k1 = h * dydx(x0, y) k2 = h * dydx(x0 + Q1 * h, y + w1 * k1) k3 = h * dydx(x0 + Q2 * h, y + w2 * k2) k4 = h * dydx(x0 + h, y + k3) # Update next value of y y = y + (1.0 / (c1 + c2 + c3 + c4)) * \ (c1 * k1 + c2 * k2 + c3 * k3 + c4 * k4) yn.append(y) # Update next value of x x0 = x0 + h # print("value of yn ",yn[-1]) return yn def rootMeanSquareError(standard,predicted): sum = 0 for a, p in zip(standard, predicted): err = (p-a) ** 2 sum += err mean = sum/len(standard) rmse = mean**(1/2) return rmse # Driver method x0 = 0 y = 18 / 1.33 x = 18 h = 0.5 # reducing errors by using modifying values of w,q,ci def compare(): errList = [] y_standard = rungeKutta(x0, y, x, h) for i in range(1, 11): y_modified = rungeKutta(x0, y, x, h, 0.8, 0.5, 0.8, 0.4, 2, 3, 4, 0) err = rootMeanSquareError(y_modified, y_standard) errList.append(err) print(errList) print("minimum error", min(errList), errList.index(min(errList))+1) return errList n = [h,h/2,h/4,h/8,h/16] def calc(): List = [] for i in (n): a = rungeKutta(x0, y, x, i) b = rungeKutta(x0, y, x, i, 0.8, 0.5, 0.8, 0.4, 2, 3, 4, 0) c = rootMeanSquareError(b, a) print("A ",a[-1]," b ",b[-1]," c ",c ) List.append(c) print("error", c) print("error list", List) return List # u = compare() # print("compare", u) y = calc() # plotting the points plt.plot(n, y) plt.xlabel('Interval step size (h)') plt.ylabel('Root mean square error') plt.title('Error Analysis Graph!') plt.show()
cc0-1.0
Python
3efa20e0d93c922bec6ae0f41774fd406532257a
Allow manually graded code cells
alope107/nbgrader,MatKallada/nbgrader,jupyter/nbgrader,dementrock/nbgrader,EdwardJKim/nbgrader,jhamrick/nbgrader,modulexcite/nbgrader,ellisonbg/nbgrader,dementrock/nbgrader,EdwardJKim/nbgrader,jupyter/nbgrader,EdwardJKim/nbgrader,ellisonbg/nbgrader,jhamrick/nbgrader,jupyter/nbgrader,alope107/nbgrader,ellisonbg/nbgrader,jhamrick/nbgrader,ellisonbg/nbgrader,EdwardJKim/nbgrader,jhamrick/nbgrader,modulexcite/nbgrader,MatKallada/nbgrader,jupyter/nbgrader,jupyter/nbgrader
nbgrader/preprocessors/checkcellmetadata.py
nbgrader/preprocessors/checkcellmetadata.py
from nbgrader import utils from nbgrader.preprocessors import NbGraderPreprocessor class CheckCellMetadata(NbGraderPreprocessor): """A preprocessor for checking that grade ids are unique.""" def preprocess(self, nb, resources): resources['grade_ids'] = ids = [] nb, resources = super(CheckCellMetadata, self).preprocess(nb, resources) id_set = set([]) for grade_id in ids: if grade_id in id_set: raise RuntimeError("Duplicate grade id: {}".format(grade_id)) id_set.add(grade_id) return nb, resources def preprocess_cell(self, cell, resources, cell_index): if utils.is_grade(cell): # check for blank grade ids grade_id = cell.metadata.nbgrader.get("grade_id", "") if grade_id == "": raise RuntimeError("Blank grade id!") resources['grade_ids'].append(grade_id) # check for valid points points = cell.metadata.nbgrader.get("points", "") try: points = float(points) except ValueError: raise RuntimeError( "Point value for grade cell {} is invalid: {}".format( grade_id, points)) # check that markdown cells are grade AND solution (not either/or) if cell.cell_type == "markdown" and utils.is_grade(cell) and not utils.is_solution(cell): raise RuntimeError( "Markdown grade cell '{}' is not marked as a solution cell".format( grade_id)) if cell.cell_type == "markdown" and not utils.is_grade(cell) and utils.is_solution(cell): raise RuntimeError( "Markdown solution cell (index {}) is not marked as a grade cell".format( cell_index)) return cell, resources
from nbgrader import utils from nbgrader.preprocessors import NbGraderPreprocessor class CheckCellMetadata(NbGraderPreprocessor): """A preprocessor for checking that grade ids are unique.""" def preprocess(self, nb, resources): resources['grade_ids'] = ids = [] nb, resources = super(CheckCellMetadata, self).preprocess(nb, resources) id_set = set([]) for grade_id in ids: if grade_id in id_set: raise RuntimeError("Duplicate grade id: {}".format(grade_id)) id_set.add(grade_id) return nb, resources def preprocess_cell(self, cell, resources, cell_index): if utils.is_grade(cell): # check for blank grade ids grade_id = cell.metadata.nbgrader.get("grade_id", "") if grade_id == "": raise RuntimeError("Blank grade id!") resources['grade_ids'].append(grade_id) # check for valid points points = cell.metadata.nbgrader.get("points", "") try: points = float(points) except ValueError: raise RuntimeError( "Point value for grade cell {} is invalid: {}".format( grade_id, points)) # check that code cells are grade OR solution (not both) if cell.cell_type == "code" and utils.is_grade(cell) and utils.is_solution(cell): raise RuntimeError( "Code grade cell '{}' is also marked as a solution cell".format( grade_id)) # check that markdown cells are grade AND solution (not either/or) if cell.cell_type == "markdown" and utils.is_grade(cell) and not utils.is_solution(cell): raise RuntimeError( "Markdown grade cell '{}' is not marked as a solution cell".format( grade_id)) if cell.cell_type == "markdown" and not utils.is_grade(cell) and utils.is_solution(cell): raise RuntimeError( "Markdown solution cell (index {}) is not marked as a grade cell".format( cell_index)) return cell, resources
bsd-3-clause
Python
c6b9ef93b8d20589d454e2c63bba60fe383975b5
Add files via upload
stevelatif/erasure_coding
erasure.py
erasure.py
#!/usr/bin/env python import numpy as np import random import hashlib ''' Reed Solomon Encoding data - column vector array sz - integer length of data Encodes data and returns a code that can be decoded ''' class ErasureCoding(): def __init__(self): pass def _encode(self, x_vector, xform): '''Do a Reed Solomon encoding of a vector of data Keyword Arguemts: x_vector -- numpy vector of data xform -- numpy array to transform data returns transformed vector ''' res = np.dot(xform, x_vector) return res def _decode(self, code, inv): '''Decode data that has been transfomed by Reed Solomon transformation Keyword Arguments: code -- Encodeing data in a numpy array inv -- Inverse Reed Solomon transformation in numpy matrix returns transformed vector ''' return(np.dot(inv, code)) def chunks(self, data, ch_sz): '''Convert an array of data into chunks Keywords arguments: data -- the data to be converted ch_sz -- chunk size returns a generator with the chunk ''' for ii in xrange(0, len(data), ch_sz): yield data[ii:ii + ch_sz] def rs_read(self, _dd): _out = [] _buf = [] for ii in self.chunks(_dd, self.ndim): data = np.array(ii) _buf[:] = [ chr(x) for x in data] _out += _buf output = "".join(_out) output = output[:-self.pad_len -1 or None] return output def rs_write(self, _data): ''' ''' Id = np.identity(self.ndim) b = np.array([[0,0,1,0,0],[0,0,0,1,1],[0,0,0,0,1]]) B = np.vstack((Id, b)) bad_rows = [2,3,4] B_prime = np.delete(B, bad_rows , 0) B_prime_inv = np.linalg.inv(B_prime) m = hashlib.md5() m.update(_data) print m.hexdigest() _d_len = len(_data) self.pad_len = _d_len % self.ndim for ii in xrange(0, self.pad_len + 1): _data += '0' _dd = [] _dd[:] = [ ord(x) for x in _data ] #self.dest_arr.node return _dd def rs(self, _data): ''' ''' self.ndim = 5 self.mdim = 3 dd = self.rs_write(_data) print self.rs_read(dd) def main(): ec = ErasureCoding() ec.dest_arr = [] node_1 = {} node_2 = {} node_3 = {} node_4 = {} node_5 = {} ec.dest_arr.append(node_1) ec.dest_arr.append(node_2) ec.dest_arr.append(node_3) ec.dest_arr.append(node_4) ec.dest_arr.append(node_5) ec.rs("holy smokes bat man! would you look at that!") ec.rs("The ugly man rides a big motorcycle") ec.rs("There has also been White House conflict with Cabinet members such as Treasury Secretary Steven Mnuchin, who has vented to friends that Priebus has blocked his choice for deputy secretary, Goldman Sachs managing director Jim Donovan, according to one person familiar with the talks. Secretary of State Rex Tillerson, meanwhile, has complained that the chief of staff is picking who will get plum ambassador posts without always consulting others, said another person familiar with that situation.") if __name__ == '__main__': main()
bsd-3-clause
Python
22e8cc6200cafd5cec386c35142cd742d4a2a735
add problem 34
smrmkt/project_euler
problem_034.py
problem_034.py
#!/usr/bin/env python #-*-coding:utf-8-*- ''' 145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145. Find the sum of all numbers which are equal to the sum of the factorial of their digits. Note: as 1! = 1 and 2! = 2 are not sums they are not included. ''' import math import timeit def calc(): eqs = [] for i in range(3, 2177280): if i == sum(map(lambda j: math.factorial(j), map(int, list(str(i))))): eqs.append(i) return eqs if __name__ == '__main__': print calc() print timeit.Timer('problem_034.calc()', 'import problem_034').timeit(1)
mit
Python
f64068b7b6e50f9280b51831715df8cb4c586daa
Update merge person tool
barberscore/barberscore-api,dbinetti/barberscore,barberscore/barberscore-api,dbinetti/barberscore,dbinetti/barberscore-django,dbinetti/barberscore-django,barberscore/barberscore-api,barberscore/barberscore-api
project/apps/api/management/commands/merge_persons.py
project/apps/api/management/commands/merge_persons.py
from optparse import make_option from django.core.management.base import ( BaseCommand, CommandError, ) from apps.api.models import ( Person, Singer, Director, Arranger, ) class Command(BaseCommand): help = "Merge selected singers by name" option_list = BaseCommand.option_list + ( make_option( "-o", "--old", dest="old", help="specify old name", ), ) option_list = option_list + ( make_option( "-n", "--new", dest="new", help="specify new name", ), ) def handle(self, *args, **options): # make sure file option is present if options['old'] is None: raise CommandError("Option `--old=...` must be specified.") if options['new'] is None: raise CommandError("Option `--new=...` must be specified.") # make sure both singers exist try: new_person = Person.objects.get( name__iexact=options['new'], ) except Person.DoesNotExist: raise CommandError("New person does not exist.") try: old_person = Person.objects.get( name__iexact=options['old'], ) except Singer.DoesNotExist: raise CommandError("Old person does not exist.") # Move related records for director in old_person.choruses.all(): Director.objects.create( person=new_person, contestant=director.contestant, part=director.part, ) for singer in old_person.quartets.all(): Singer.objects.create( person=new_person, contestant=singer.contestant, part=singer.part, ) for arranger in old_person.arrangements.all(): Arranger.objects.create( person=new_person, chart=arranger.chart, part=arranger.part, ) # remove redundant singer try: old_person.delete() except Exception as e: raise CommandError("Error deleted old singer: {0}".format(e)) return "Merged {0} into {1}".format(old_person, new_person)
bsd-2-clause
Python
12bca37026ef4db41bd452dcb8cdc9022cdcf8c9
Create pythonhelloworld.py
msjones217/alrington-class
pythonhelloworld.py
pythonhelloworld.py
print "hello word"
unlicense
Python
8e4240cd9bc2c06264ef23fddfc93ccf76e5ff9b
Create progressbar.py
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
progressbar.py
progressbar.py
################################################################################ # Example usage: # $ python # >>> import Progress # >>> total = 100 # >>> message = 'Doing this task ' # >>> with Progress.Bar(total, message) as bar: # ... for n in range(total): # ... time.sleep(0.1) # ... bar.update() # ... # Doing this task [------------------------------------------------------------] ################################################################################ import sys ################################################################################ class Bar: # A progress bar is draw using 4 elements: # 1. A message # 2. The left (start) boundary # 3. The body of the progress bar # 4. The right (end) boundary template = '{msg}{start}{body}{end}' ################################################## def __init__(self, total, message='', max_width=80, marker='#', placeholders='-', start='[', end=']'): # Assume zero width so that self.from_template() works self.width = 0 # A bar measures progress towards a total self.total = total # A progress bar may have a message before it self.message = message # A Progress.Bar is a series of markers self.marker = marker # drawn over the top of placeholders self.placeholders = placeholders # and delimited by start and end characters self.start=start self.end=end # calculate how much of the max_width will be consumed by the message # and the start/end delimiters. padding_width = len(self.from_template()) # Calculate the width of the body of the bar self.width = max_width - padding_width # How many parts of the total go per marker in the body of the bar self.granularity = total / self.width ############################## def from_template(self): ''' Returns a string representation of the Progress.Bar, including the message, the start and end markers and a series of placeholders. ''' return self.template.format(msg = self.message, start = self.start, end = self.end, body = self.placeholders * self.width) ################################################## def __enter__(self): # How much of the total has passed self.progress = 0 # How much of the width has been drawn self.rendered = 0 # Write out the Progress.Bar with placeholders sys.stdout.write(self.from_template()) # Write out backspaces until the cursor is at the start marker sys.stdout.write('\b' * (self.width + len(self.end))) sys.stdout.flush() # act as a proper generator return self ############################## def __exit__(self, type, value, traceback): # always render a completed Progress.Bar while not self.is_fully_rendered(): self.render() # then finish on the next line print('') ################################################## def render(self): ''' Outputs one marker over the top of a placeholder if the progress bar is still not fully rendered. ''' self.rendered += 1 if not self.is_fully_rendered(): sys.stdout.write(self.marker) sys.stdout.flush() ############################## def is_fully_rendered(self): return self.rendered > self.width ################################################## def update(self, n=1): ''' Update the Progress.Bar n counts towards the total. ''' if n > 0: self.progress += 1 while self.progress / self.granularity > self.rendered: self.render() self.update(n-1)
mit
Python
465c2c92da5db91bcc1f9149fbfa5722d30e10f9
add some tests for the Basic Auth filter
livingbio/libsaas,ducksboard/libsaas,80vs90/libsaas,CptLemming/libsaas
test/test_basic_auth.py
test/test_basic_auth.py
import unittest from libsaas import http from libsaas.filters import auth class BasicAuthTestCase(unittest.TestCase): def test_simple(self): auth_filter = auth.BasicAuth('user', 'pass') req = http.Request('GET', 'http://example.net/') auth_filter(req) self.assertEqual(req.headers['Authorization'], 'Basic dXNlcjpwYXNz') def test_unicode(self): # try both a unicode and a bytes parameter _lambda = b'\xce\xbb' _ulambda = _lambda.decode('utf-8') auth_bytes = auth.BasicAuth('user', _lambda) auth_unicode = auth.BasicAuth('user', _ulambda) auth_mixed = auth.BasicAuth(_lambda, _ulambda) expected_bytes = 'Basic dXNlcjrOuw==' expected_unicode = expected_bytes expected_mixed = 'Basic zrs6zrs=' for auth_filter, expected in ((auth_bytes, expected_bytes), (auth_unicode, expected_unicode), (auth_mixed, expected_mixed)): req = http.Request('GET', 'http://example.net/') auth_filter(req) self.assertEqual(req.headers['Authorization'], expected)
mit
Python
4cac86aeb2d24a916fc5ae9ca98e3898f4729e1c
add protocol.py module
JasonLai256/plumbca
plumbca/protocol.py
plumbca/protocol.py
# -*- coding: utf-8 -*- """ plumbca.protocol ~~~~~~~~~~~~~~~~ Implements the protocol support for Plumbca. :copyright: (c) 2015 by Jason Lai. :license: BSD, see LICENSE for more details. """ import logging import asyncio from .message import Request from .worker import Worker actlog = logging.getLogger('activity') errlog = logging.getLogger('errors') class PlumbcaCmdProtocol: def __init__(self): self.handler = Worker() async def plumbca_cmd_handle(self, reader, writer): """Simple plumbca command protocol implementation. plumbca_cmd_handle handles incoming command request. """ data = await reader.read() req = Request(data) addr = writer.get_extra_info('peername') actlog.info("<Server> Received %r from %r", req.command, addr) # drive the command process resp = self.handler.run_command(req) writer.write(req.args) await writer.drain() actlog.info("Close the client %r socket", addr) writer.close()
bsd-3-clause
Python
545af0493cf08cb15d262f3a5333df6d1fce6848
Add util convenience functions for accessing data without decorators
SilentCircle/django-brake,SilentCircle/django-brake,skorokithakis/django-brake,skorokithakis/django-brake
brake/utils.py
brake/utils.py
from decorators import _backend """Access limits and increment counts without using a decorator.""" def get_limits(request, label, field, periods): limits = [] count = 10 for period in periods: limits.extend(_backend.limit( label, request, field=field, count=count, period=period )) count += 10 return limits def inc_counts(request, label, field, periods): for period in periods: _backend.count(label, request, field=field, period=period)
bsd-3-clause
Python
e0c3a46d1c3c13b5c956bf3cc6f30ad495f87ccd
put the logger config in a separate file for cleanliness
darrenwee/voglbot
voglogger.py
voglogger.py
#!/usr/bin/python """ logger management for VOGLbot writes out to both the console and a file 'voglbot.log' """ import sys import logging import time logging.basicConfig( filename = 'voglbot.log', filemode = 'w', level=logging.DEBUG, format='%(asctime)s: %(message)s', datefmt = '%d-%m %H:%M:%S', stream = sys.stdout, ) # for console logging console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)-12s : %(levelname)-8s %(message)s') console.setFormatter(formatter) logger = logging.getLogger() logging.getLogger('').addHandler(console)
mit
Python
a984120bdb6c67a3dc2ca89ce9ae5498230015ea
Add initial runner
origingod/hug,philiptzou/hug,yasoob/hug,philiptzou/hug,shaunstanislaus/hug,janusnic/hug,jean/hug,alisaifee/hug,alisaifee/hug,STANAPO/hug,timothycrosley/hug,MuhammadAlkarouri/hug,origingod/hug,giserh/hug,janusnic/hug,shaunstanislaus/hug,jean/hug,gbn972/hug,gbn972/hug,giserh/hug,yasoob/hug,timothycrosley/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug,STANAPO/hug,timothycrosley/hug
hug/run.py
hug/run.py
"""hug/run.py Contains logic to enable execution of hug APIS from the command line """ from wsgiref.simple_server import make_server import falcon import sys import importlib def server(module): api = falcon.API() for url, method_handlers in module.HUG_API_CALLS: api.add_route(url, namedtuple('Router', method_handlers.keys())(**method_handlers)) return api def terminal(): if len(sys.argv) < 2: print("Please specify a hug API file to start the server with", file=sys.stderr) api = server(importlib.machinery.SourceFileLoader(sys.argv[1].split(".")[0], sys.argv[1]).load_module()) httpd = make_server('', 8000, api) print("Serving on port 8000...") httpd.serve_forever()
mit
Python
1578e1a129d91605148cf48f8793ac098ad0de7e
add command group
ojengwa/ibu,ojengwa/migrate
ibu/cli.py
ibu/cli.py
# -*- coding: utf-8 -*- from __future__ import print_function import click CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @click.group() def ibu(): pass @click.command(context_settings=CONTEXT_SETTINGS) def test(): print("hello")
mit
Python
33dc091a43d3868324631fdb420721ab35d1f6ce
Create dis_q.py
ionutvilie/ibm_mq_tools,ionutvilie/ibm_mq_tools,ionutvilie/ibm_mq_tools,ionutvilie/ibm_mq_tools
dis_q.py
dis_q.py
#!/usr/bin/python import pymqi queue_manager = "MQSD.TEST" channel = "SYSTEM.DEF.SVRCONN" host = "10.21.218.15" port = "14123" conn_info = "%s(%s)" % (host, port) prefix = "*" queue_type = pymqi.CMQC.MQQT_ALL # queue_type = pymqi.CMQC.MQQT_LOCAL excluded_prefix = ['SYSTEM', 'MSB', 'AMQ' , 'MQAI'] # excluded_prefix = [ ] args = {pymqi.CMQC.MQCA_Q_NAME: prefix, pymqi.CMQC.MQIA_Q_TYPE: queue_type} qmgr = pymqi.connect(queue_manager, channel, conn_info) pcf = pymqi.PCFExecute(qmgr) try: response = pcf.MQCMD_INQUIRE_Q(args) except pymqi.MQMIError, e: if e.comp == pymqi.CMQC.MQCC_FAILED and e.reason == pymqi.CMQC.MQRC_UNKNOWN_OBJECT_NAME: print "No queues matched given arguments." else: raise else: for queue_info in response: # Queue Name QueueDepth MaxDepth XMITQ Type # https://www-01.ibm.com/support/knowledgecenter/SSFKSJ_7.1.0/com.ibm.mq.javadoc.doc/WMQJavaClasses/com/ibm/mq/pcf/CMQC.html queue_name = queue_info[pymqi.CMQC.MQCA_Q_NAME] if not any(queue_name.startswith(prefix) for prefix in excluded_prefix): queue_type = queue_info[pymqi.CMQC.MQIA_Q_TYPE] if queue_type == 1: #LOCAL queue_type = "LOCAL" queue_depth = queue_info[pymqi.CMQC.MQIA_CURRENT_Q_DEPTH] queue_mdepth = queue_info[pymqi.CMQC.MQIA_MAX_Q_DEPTH] print "%s \t %s \t %s \t %s" % (queue_name, queue_depth, queue_mdepth, queue_type) # elif queue_type == 2: #MODEL elif queue_type == 3: #ALIAS queue_type = "ALIAS" queue_depth = "-" queue_mdepth = "------" print "%s \t %s \t %s \t %s" % (queue_name, queue_depth, queue_mdepth, queue_type) elif queue_type == 6: #REMOTE queue_type = "REMOTE" queue_depth = "-" queue_mdepth = "------" print "%s \t %s \t %s \t %s" % (queue_name, queue_depth, queue_mdepth, queue_type) # print "%s \t %s" % (queue_name, queue_type) else: print "%s \t %s" % (queue_name, queue_type) # print "%s \t %s" % (queue_name, queue_type) qmgr.disconnect()
mit
Python
b7541c063b6fc10fdd622cbd680ea4418c679f6b
Add NodeList iterator
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
d1_libclient_python/src/d1_client/iter/node.py
d1_libclient_python/src/d1_client/iter/node.py
# -*- coding: utf-8 -*- # This work was created by participants in the DataONE project, and is # jointly copyrighted by participating institutions in DataONE. For # more information on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2016 DataONE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Iterate over the nodes that are registered in a DataONE environment For each Node in the environment, returns a PyXB representation of a DataONE Node document. https://releases.dataone.org/online/api-documentation-v2.0/apis/Types.html#Types.Node """ import d1_client.mnclient_1_1 import d1_client.mnclient_2_0 import d1_common.types.dataoneTypes_v1_1 as v1 import d1_common.types.dataoneTypes_v2_0 as v2 MAJOR_VERSION = 2 class NodeListIterator(object): def __init__( self, base_url, major_version=MAJOR_VERSION, client_dict=None, listNodes_dict=None, ): self._base_url = base_url self._major_version = major_version self._client_dict = client_dict or {} self._listNodes_dict = listNodes_dict def __iter__(self): client = d1_client.mnclient_2_0.MemberNodeClient_2_0( self._base_url, **self._client_dict ) node_list_pyxb = client.listNodes() logging.debug( 'Retrieved {} Node documents'.format(len(node_list_pyxb.node)) ) for node_pyxb in sorted( node_list_pyxb.node, key=lambda x: x.identifier.value() ): yield node_pyxb
apache-2.0
Python
2c900f8bddc9efb40d900bf28f8c6b3188add71e
Disable trix parser tests with Jython
RDFLib/rdflib,avorio/rdflib,yingerj/rdflib,ssssam/rdflib,ssssam/rdflib,marma/rdflib,armandobs14/rdflib,armandobs14/rdflib,dbs/rdflib,dbs/rdflib,RDFLib/rdflib,marma/rdflib,RDFLib/rdflib,yingerj/rdflib,avorio/rdflib,ssssam/rdflib,armandobs14/rdflib,RDFLib/rdflib,dbs/rdflib,marma/rdflib,marma/rdflib,ssssam/rdflib,armandobs14/rdflib,dbs/rdflib,avorio/rdflib,yingerj/rdflib,yingerj/rdflib,avorio/rdflib
test/test_trix_parse.py
test/test_trix_parse.py
#!/usr/bin/env python from rdflib.graph import ConjunctiveGraph import unittest class TestTrixParse(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def testAperture(self): g=ConjunctiveGraph() g.parse("test/trix/aperture.trix",format="trix") c=list(g.contexts()) #print list(g.contexts()) t=sum(map(len, g.contexts())) self.assertEquals(t,24) self.assertEquals(len(c),4) #print "Parsed %d triples"%t def testSpec(self): g=ConjunctiveGraph() g.parse("test/trix/nokia_example.trix",format="trix") #print "Parsed %d triples"%len(g) def testNG4j(self): g=ConjunctiveGraph() g.parse("test/trix/ng4jtest.trix",format="trix") #print "Parsed %d triples"%len(g) import platform if platform.system() == 'Java': from nose import SkipTest raise SkipTest('Jython issues - "JavaSAXParser" object has no attribute "start_namespace_decl"') if __name__=='__main__': unittest.main()
#!/usr/bin/env python from rdflib.graph import ConjunctiveGraph import unittest class TestTrixParse(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def testAperture(self): g=ConjunctiveGraph() g.parse("test/trix/aperture.trix",format="trix") c=list(g.contexts()) #print list(g.contexts()) t=sum(map(len, g.contexts())) self.assertEquals(t,24) self.assertEquals(len(c),4) #print "Parsed %d triples"%t def testSpec(self): g=ConjunctiveGraph() g.parse("test/trix/nokia_example.trix",format="trix") #print "Parsed %d triples"%len(g) if __name__=='__main__': unittest.main()
bsd-3-clause
Python
0cb6474b8c02f2cb7af54f8321f82a53175e8345
check for globals in the lib that are not prefixed with toku. addresses #74
ollie314/server,natsys/mariadb_10.2,natsys/mariadb_10.2,ollie314/server,davidl-zend/zenddbi,natsys/mariadb_10.2,davidl-zend/zenddbi,davidl-zend/zenddbi,flynn1973/mariadb-aix,flynn1973/mariadb-aix,natsys/mariadb_10.2,natsys/mariadb_10.2,flynn1973/mariadb-aix,flynn1973/mariadb-aix,flynn1973/mariadb-aix,davidl-zend/zenddbi,davidl-zend/zenddbi,natsys/mariadb_10.2,ollie314/server,flynn1973/mariadb-aix,ollie314/server,davidl-zend/zenddbi,flynn1973/mariadb-aix,slanterns/server,ollie314/server,natsys/mariadb_10.2,davidl-zend/zenddbi,davidl-zend/zenddbi,davidl-zend/zenddbi,ollie314/server,flynn1973/mariadb-aix,ollie314/server,flynn1973/mariadb-aix,davidl-zend/zenddbi,ollie314/server,davidl-zend/zenddbi,ollie314/server,natsys/mariadb_10.2,natsys/mariadb_10.2,ollie314/server,ollie314/server,flynn1973/mariadb-aix,flynn1973/mariadb-aix,natsys/mariadb_10.2,natsys/mariadb_10.2
src/tokuglobals.py
src/tokuglobals.py
#!/usr/bin/python import sys import os import re def checkglobals(libname, exceptsymbols, verbose): badglobals = 0 nmcmd = "nm -g " + libname f = os.popen(nmcmd) b = f.readline() while b != "": match = re.match("^([0-9a-f]+)\s(.?)\s(.*)$", b) if match == None: match = re.match("^\s+(.*)$", b) if match == None: print "unknown", b badglobals = 1 else: type = match.group(2) symbol = match.group(3) if verbose: print type, symbol match = re.match("^toku_", symbol) if match == None and not exceptsymbols.has_key(symbol): print "non toku symbol=", symbol badglobals = 1 b = f.readline() f.close() return badglobals def main(): verbose = 0 for arg in sys.argv[1:]: if arg == "-v": verbose += 1 exceptsymbols = {} for n in [ "_init", "_fini", "_end", "_edata", "__bss_start" ]: exceptsymbols[n] = 1 for n in [ "db_env_create", "db_create", "db_strerror", "db_version", "log_compare" ]: exceptsymbols[n] = 1 return checkglobals("libdb.so", exceptsymbols, verbose) sys.exit(main())
lgpl-2.1
Python
24b8437003269ebd10c46d0fbdaa3e432d7535d6
Add VCF -> non-reference likelihood table script.
roryk/junkdrawer,roryk/junkdrawer
genotype-likelihoods.py
genotype-likelihoods.py
from __future__ import print_function import sys import cyvcf from argparse import ArgumentParser, FileType import toolz as tz description = ("Create a table of probability of a non reference call for each " "genotype for each sample. This is PL[0]. -1 is output for samples " "with a missing PL call at a position.") parser = ArgumentParser(description=description) parser.add_argument("vcf", type=FileType('r'), help="VCF file to convert, use '-' to read from stdin") args = parser.parse_args() vcf_reader = cyvcf.Reader(args.vcf) records = tz.take(10, vcf_reader) samples = vcf_reader.samples[1:5] header = "\t".join([str(x) for x in ["CHROM", "POS", "ID", "REF", "ALT"] + samples]) print(header, file=sys.stdout) for record in records: line = [record.CHROM, record.POS, record.ID, record.REF, record.alleles[1]] pls = [x.data.get("PL", None) for x in record.samples[1:5]] pls = [x[0] if x else "-1" for x in pls] print("\t".join([str(x) for x in line + pls]), file=sys.stdout)
mit
Python
6a9ddbf5d775df14c994c9af9e89195ca05a58f9
Add pyjokes CLI test
trojjer/pyjokes,ElectronicsGeek/pyjokes,pyjokes/pyjokes,martinohanlon/pyjokes,gmarkall/pyjokes,borjaayerdi/pyjokes,bennuttall/pyjokes
tests/test_cli_error.py
tests/test_cli_error.py
import pytest import subprocess from subprocess import Popen, PIPE def test_pyjokes_call_exception(): pytest.raises(subprocess.CalledProcessError, "subprocess.check_call('pyjokes')") def test_pyjokes_call_output(): try: p = subprocess.Popen('pyjokes', stdin=PIPE, stdout=PIPE, stderr=PIPE) except: out, err = p.communicate() assert out == b'Did you mean pyjoke?' assert p.returncode == 1 pass
bsd-3-clause
Python
83a4c9bfa64543ecda65ed4c916fad8ad0a9233d
Create markov.py
tenkisi/markovtweet
markov.py
markov.py
# -*- coding: utf-8 -*- import random ngram = lambda text, n: [text[i:i+n] for i in xrange(len(text) - n + 1)] flatten2D = lambda data: [flattened for inner in data for flattened in inner] randelement = lambda x: x[random.randint(0, len(x) - 1)] class Markov: def __init__(self, data, n): self.data = data self.n = n def markov(self, limit, firstword, lastword, getlength, lengthlimit=None, result=None): if limit == 0: return [k for k in [i[0] for i in result[:-1]] + result[-1]] candidatelist = [] if result != None: candidatelist = [candidate for candidate in self.data if result[-1][1:self.n] == candidate[0:self.n - 1]] else: result = [] candidatelist = [candidate for candidate in self.data if candidate[0] == firstword] if candidatelist == []: result.append(randelement(self.data)) else: result.append(randelement(candidatelist)) wordcount = getlength([k for k in [i[0] for i in result[:-1]] + result[-1]]) charlimitflag = lengthlimit == None or wordcount < lengthlimit if not charlimitflag: result = result[:-1] mrkv = lambda li: self.markov(li, firstword, lastword, getlength, lengthlimit, result) return mrkv(limit - 1) if charlimitflag and result[-1][-1] != lastword else mrkv(0)
mit
Python
0970115f9bc1bab019c23ab46e64b26d5e754313
Implement function for displaying tuning guidance on a DIY 8-segment LEDs display
Bastien-Brd/pi-tuner
led_display.py
led_display.py
import math from gpiozero import LED from time import sleep g0 = LED(12) f0 = LED(16) a0 = LED(20) b0 = LED(21) e0 = LED(17) d0 = LED(27) c0 = LED(22) g1 = LED(25) f1 = LED(24) a1 = LED(23) b1 = LED(18) e1 = LED(5) d1 = LED(6) c1 = LED(13) PITCHES = { 'E2': ((a0, d0, e0, f0, g0), (b0, c0)), 'A2': ((a0, b0, c0, e0, f0, g0), (d0, )), 'D3': ((b0, c0, d0, e0, g0), (a0, f0,)), 'G3': ((a0, b0, c0, d0, f0, g0), (e0, )), 'B3': ((c0, d0, e0, f0, g0), (a0, b0,)), 'E4': ((a0, d0, e0, f0, g0), (b0, c0)), } DIRECTIONS = { -1: ((a1, b1, f1, g1), (c1, d1, e1,)), 0: ((g1, ), (a1, b1, c1, d1, e1, f1, )), 1: ((c1, d1, e1, g1), (a1, b1, f1)), } def display_tuning_guidance(pitch, direction): leds_on = PITCHES[pitch][0] + DIRECTIONS[direction][0] leds_off = PITCHES[pitch][1] + DIRECTIONS[direction][1] # Turn the appropriate leds on or off for led in leds_on: led.off() for led in leds_off: led.on()
mit
Python
550d8bcd49e5ec591286f3f42de7dd54ef853bb8
Add a utility script to print duplicates
mubaris/motivate,mubaris/motivate
find_dupes.py
find_dupes.py
#!/usr/bin/env python3 import json import os import random scriptpath = os.path.dirname(__file__) data_dir = os.path.join(scriptpath, 'data') all_json = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))] quotes = [] for f in all_json: filename = os.path.join(data_dir, f) with open(filename) as json_data: quotes += json.load(json_data)['data'] uniq_authors = { quote['author'] for quote in quotes} uniq_quotes = { quote['quote'] for quote in quotes} print('Unique quotes: {}, authors: {}'.format(len(uniq_quotes), len(uniq_authors))) seen = set() dupes = sorted([x for x in quotes if x['quote'] in seen or seen.add(x['quote'])], key=lambda x:x['quote']) print(*dupes, sep='\n')
mit
Python