commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
ed463c7ea52bea26d724ee372fbd7319bfee8e1f | add preprocessor | LeeTZ/Yo,LeeTZ/Yo,LeeTZ/Yo | src/preprocessor.py | src/preprocessor.py | #! /usr/bin/python
import os
import re
import sys
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
def process(input_file):
invalidchar = ('\t')
blockcomment = ['#{','}#']
stack = [0]
output = StringIO()
newindent = False
commented = False
linejoin = False
debug = False
for i, line in enumerate(input_file):
lineout = remove_inline(line)
if lineout:
for x in invalidchar:
if x in lineout:
error("SyntaxError: Invalid character {} found on line {}".format(x,i))
# Check if first statement is a block comment
lstripline = lineout.lstrip()
if len(lstripline) > 1 and blockcomment[0] == lstripline[:2]:
commented = True
# Checks if line gets uncommented
if commented:
if len(lineout) > 1 and blockcomment[1] == lineout[-2:]:
commented = False
else:
if not linejoin:
wcount = len(lineout) - len(lineout.lstrip(' '))
# If the previous line began an indentation, add the new indentation level to the block (so long as the new indentation
# level is greater than the previous one)
if newindent == True:
if wcount > stack[-1]:
stack.append(wcount)
newindent = False
else:
error("IndentationError on line {}".format(i))
# If the indentation level is greater than expected, throw an error
if wcount > stack[-1]:
if debug:
print "=== ERROR 1 ==="
print "proc. line: '{}'".format(lineout)
print "wcount: {}".format(wcount)
print "stack[-1]: {}".format(stack[-1])
print "newindent: {}".format(wcount)
error("IndentationError on line {}".format(i))
else:
# If the indentation level is less than the current level, return to a previous indentation block. Throw an error if you return to an indentation level that doesn't exist
while(wcount < stack[-1]):
lineout = "}" + lineout
stack.pop()
if wcount != stack[-1]:
if debug:
print "=== ERROR 2 ==="
print "proc. line: '{}'".format(lineout)
print "wcount: {}".format(wcount)
print "stack[-1]: {}".format(stack[-1])
print "newindent: {}".format(wcount)
error("IndentationError on line {}".format(i))
# Given that the indentation level is correct, check for the start of a new code block (where a line ends with a ':') and insert a '{'. At the end of a line, add a semicolon ';' unless if there is a linejoin character '\'.
if lineout[-1] == ':':
lineout = lineout + '{\n'
newindent = True
elif lineout[-1] == '\\':
linejoin = True
lineout = lineout[:-1]
else:
lineout = lineout + '\n'
linejoin = False
output.write(lineout)
while 0 < stack[-1]:
output.write("}")
stack.pop()
if debug:
print output.getvalue()
return output
def error(msg):
sys.stderr.write(msg+"\n")
sys.exit(2)
def remove_inline(line):
if "##" in line:
regex = re.compile("^(.*?)#.*|.*")
m = regex.match(line)
comments_removed = m.group(1)
else:
comments_removed = line
return comments_removed.rstrip()
def usage():
print"""
python preprocessor.py [input.yo]
"""
if __name__ == "__main__":
if len(sys.argv) != 2:
usage()
sys.exit(2)
try:
f_in = open(sys.argv[1],"r")
except IOError:
error("IOError: Cannot read input file %s.\n" % sys.argv[1])
name_ext = os.path.basename(f_in.name)
dir_ext = os.path.dirname(f_in.name)+"/"
if name_ext.lower().endswith(".yo"):
fname = os.path.splitext(name_ext)[0]
else:
error('NameError: Input must have yo file extension')
out_str = process(f_in)
f_out = open(dir_ext+fname+".processed.yo", 'w')
f_out.write(out_str.getvalue())
| mit | Python |
|
8fe5e768f20abfdd790870075950b6537c5cad6a | Add class containing test state and report + print methods | Mikko-Finell/ptest | ptest.py | ptest.py | #!/usr/bin/python3
from sys import exit
class Ptest(object):
def __init__(self, module_name):
self.module_name = module_name
self.passed = 0
self.failed = 0
print('\nRunning tests for module "', module_name, '"', sep='')
def report(self, test_name, test_result):
if test_result not in (True, False):
print('Invalid report argument for test "', test_name, '"', sep='')
exit(1)
NORMAL = '\x1B[0m'
RED = '\x1B[31m'
GREEN = '\x1B[32m'
if test_result:
self.passed += 1
print('[', GREEN, 'PASSED', NORMAL, '] ', test_name, sep='')
else:
self.failed += 1
print('[', RED, 'FAILED', NORMAL, '] ', test_name, sep='')
def print_statistics(self):
test_count = self.passed + self.failed
if test_count == 0:
print('No tests yet...')
return
pass_rate = 0
if self.passed != 0:
pass_rate = round(float(self.passed) / float(test_count), 3) * 100
print('Passed: ', self.passed, '/', test_count,
' (', pass_rate, '%)', sep='', end='\n\n')
| unlicense | Python |
|
20375ca41cce0ee6a9a22bfe6faa766ab6db53fc | add tests for coordinate rounding and basic pen commands | googlefonts/fonttools,fonttools/fonttools | Lib/fontTools/pens/t2CharStringPen_test.py | Lib/fontTools/pens/t2CharStringPen_test.py | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.pens.t2CharStringPen import T2CharStringPen
import unittest
class T2CharStringPenTest(unittest.TestCase):
def assertAlmostEqualProgram(self, expected, actual):
self.assertEqual(len(expected), len(actual))
for i1, i2 in zip(expected, actual):
if isinstance(i1, basestring):
self.assertIsInstance(i2, basestring)
self.assertEqual(i1, i2)
else:
self.assertAlmostEqual(i1, i2)
def test_draw_lines(self):
pen = T2CharStringPen(100, {})
pen.moveTo((0, 0))
pen.lineTo((10, 0))
pen.lineTo((10, 10))
pen.lineTo((0, 10))
pen.closePath() # no-op
charstring = pen.getCharString(None, None)
self.assertEqual(
[100,
0, 0, 'rmoveto',
10, 0, 'rlineto',
0, 10, 'rlineto',
-10, 0, 'rlineto',
'endchar'],
charstring.program)
def test_draw_curves(self):
pen = T2CharStringPen(100, {})
pen.moveTo((0, 0))
pen.curveTo((10, 0), (20, 10), (20, 20))
pen.curveTo((20, 30), (10, 40), (0, 40))
pen.endPath() # no-op
charstring = pen.getCharString(None, None)
self.assertEqual(
[100,
0, 0, 'rmoveto',
10, 0, 10, 10, 0, 10, 'rrcurveto',
0, 10, -10, 10, -10, 0, 'rrcurveto',
'endchar'],
charstring.program)
def test_default_width(self):
pen = T2CharStringPen(None, {})
charstring = pen.getCharString(None, None)
self.assertEqual(['endchar'], charstring.program)
def test_no_round(self):
# no rounding is the default
pen = T2CharStringPen(100.1, {}, roundTolerance=0.0)
pen.moveTo((0, 0))
pen.curveTo((10.1, 0.1), (19.9, 9.9), (20.49, 20.49))
pen.curveTo((20.49, 30.49), (9.9, 39.9), (0.1, 40.1))
pen.closePath()
charstring = pen.getCharString(None, None)
self.assertAlmostEqualProgram(
[100.1,
0, 0, 'rmoveto',
10.1, 0.1, 9.8, 9.8, 0.59, 10.59, 'rrcurveto',
0, 10, -10.59, 9.41, -9.8, 0.2, 'rrcurveto',
'endchar'],
charstring.program)
def test_round_all(self):
# 1.0 rounds everything
pen = T2CharStringPen(100.1, {}, roundTolerance=1.0)
pen.moveTo((0, 0))
pen.curveTo((10.1, 0.1), (19.9, 9.9), (20.49, 20.49))
pen.curveTo((20.49, 30.49), (9.9, 39.9), (0.1, 40.1))
pen.closePath()
charstring = pen.getCharString(None, None)
self.assertEqual(
[100,
0, 0, 'rmoveto',
10, 0, 10, 10, 0, 10, 'rrcurveto',
0, 10, -10, 10, -10, 0, 'rrcurveto',
'endchar'],
charstring.program)
def test_round_some(self):
pen = T2CharStringPen(100, {}, roundTolerance=0.2)
pen.moveTo((0, 0))
# the following two are rounded as within the tolerance
pen.lineTo((10.1, 0.1))
pen.lineTo((19.9, 9.9))
# this one is not rounded as it exceeds the tolerance
pen.lineTo((20.49, 20.49))
pen.closePath()
charstring = pen.getCharString(None, None)
self.assertAlmostEqualProgram(
[100,
0, 0, 'rmoveto',
10, 0, 'rlineto',
10, 10, 'rlineto',
0.49, 10.49, 'rlineto',
'endchar'],
charstring.program)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
| mit | Python |
|
1669f9a3a9fabc2ded8fa92542dca65036c201e5 | Create sizes.py | danforthcenter/plantcv,stiphyMT/plantcv,stiphyMT/plantcv,danforthcenter/plantcv,danforthcenter/plantcv,stiphyMT/plantcv | plantcv/plantcv/visualize/sizes.py | plantcv/plantcv/visualize/sizes.py | # Visualize an annotated image with object sizes
import os
import cv2
import random
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv import plot_image
from plantcv.plantcv import print_image
from plantcv.plantcv import find_objects
from plantcv.plantcv import color_palette
def sizes(img, mask, num_objects=100):
""" Visualize an RGB image in all potential colorspaces
Inputs:
img = RGB or grayscale image data
mask = Binary mask made from selected contours
num_objects = Optional parameter to limit the number of objects that will get annotated.
Returns:
plotting_img = Plotting image containing the original image and L,A,B,H,S, and V colorspaces
:param img: numpy.ndarray
:param mask: numpy.ndarray
:param num_objects: int
:return plotting_img: numpy.ndarray
"""
plotting_img = np.copy(img)
# Store debug
debug = params.debug
params.debug = None
id_objects, obj_hierarchy = find_objects(img=img, mask=mask)
rand_color = color_palette(num=len(id_objects), saved=False)
random.shuffle(rand_color)
label_coord_x = []
label_coord_y = []
area_vals = []
for i, cnt in enumerate(id_objects):
# Calculate geodesic distance, divide by two since cv2 seems to be taking the perimeter of the contour
area_vals.append(cv2.contourArea(cnt))
cv2.drawContours(plotting_img, id_objects, i, rand_color[i], thickness=-1)
# Store coordinates for labels
label_coord_x.append(id_objects[i][0][0][0])
label_coord_y.append(id_objects[i][0][0][1])
segment_ids = []
# Put labels of length
for c, value in enumerate(area_vals):
text = "{:.0f}".format(value)
w = label_coord_x[c]
h = label_coord_y[c]
if c < int(num_objects):
cv2.putText(img=plotting_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)
else:
print("There were " + str(len(area_vals)-num_objects) + " objects not annotated.")
break
# Auto-increment device
params.device += 1
# Reset debug mode
params.debug = debug
if params.debug == 'print':
print_image(plotting_img, os.path.join(params.debug_outdir, str(params.device) + '_object_sizes.png'))
elif params.debug == 'plot':
plot_image(plotting_img)
return plotting_img
| mit | Python |
|
e03ecf68055e820106172413967713f98f7905ac | copy api_util to client to make it self-contained | olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net | net/client/api_util.py | net/client/api_util.py | import simplejson
def json2python(json):
try:
return simplejson.loads(json)
except:
pass
return None
python2json = simplejson.dumps
| bsd-3-clause | Python |
|
addc7f33af75070333369a01c71e8acd231376ba | Add FilterNotifier for keyword based notification filtering | flakas/reconbot | reconbot/notifiers/filter.py | reconbot/notifiers/filter.py | class FilterNotifier:
""" Filters notifications based on their type or keywords """
def __init__(self, notifier, keywords=[], ignore=[]):
self.notifier = notifier
self.keywords = keywords
self.ignore = ignore
def notify(self, text, options={}):
if len(self.ignore) > 0 and any(keyword in text for keyword in self.ignore):
return False
if len(self.keywords) == 0 or any(keyword in text for keyword in self.keywords):
self.notifier.notify(text, options)
| mit | Python |
|
b09b11de1a025196cceb1c8fd71bda5515437a10 | Add max31855 example driver | alvarop/silta,alvarop/silta,alvarop/silta | sw/examples/drivers/max31855.py | sw/examples/drivers/max31855.py | #!/usr/bin/env python
#
# SPI example (using the STM32F407 discovery board)
#
import sys
import time
import ctypes
from silta import stm32f407
def bytes_to_int(byte_list):
num = 0
for byte in range(len(byte_list)):
num += byte_list[byte] << ((len(byte_list) - 1 - byte) * 8)
return num
class MAX31855(object):
def __init__(self, bridge, cs_pin):
self.bridge = bridge
self.cs_pin = cs_pin
self.last_fault = 0
# Set the CS line as an output
self.bridge.gpiocfg(self.cs_pin, 'output')
# Configure ~1.05MHz clock with CPOL=0,CPHA=0
self.bridge.spicfg(10500000, 0, 0)
# CS is active low in this case
self.bridge.gpio(self.cs_pin, 1)
def read(self):
# Read 32 bits
txbuff = [0x00, 0x00, 0x00, 0x00]
rval = self.bridge.spi(self.cs_pin, txbuff)
if isinstance(rval, list):
reg = bytes_to_int(rval)
fault = ((reg >> 16) & 1) == 1
if fault:
temperature = None
last_fault = reg & 0x7
else:
temperature = ctypes.c_int16((reg >> 16) & 0xFFFC).value >> 2
temperature = temperature * 0.25
return temperature
else:
print('SPI Error: ' + str(rval))
return None
def get_last_fault(self):
return last_fault
| bsd-2-clause | Python |
|
a15e363718ab41c5e02b9eaa919fb689cd266af6 | Add common module for our tests | ptthiem/nose2,ojengwa/nose2,ezigman/nose2,ojengwa/nose2,leth/nose2,ezigman/nose2,ptthiem/nose2,little-dude/nose2,leth/nose2,little-dude/nose2 | nose2/tests/_common.py | nose2/tests/_common.py | """Common functionality."""
import os.path
import tempfile
import shutil
import sys
class TestCase(unittest2.TestCase):
"""TestCase extension.
If the class variable _RUN_IN_TEMP is True (default: False), tests will be
performed in a temporary directory, which is deleted afterwards.
"""
_RUN_IN_TEMP = False
def setUp(self):
super(TestCase, self).setUp()
if self._RUN_IN_TEMP:
self.__orig_dir = os.getcwd()
work_dir = self.__work_dir = tempfile.mkdtemp()
os.chdir(self.__work_dir)
# Make sure it's possible to import modules from current directory
sys.path.insert(0, work_dir)
def tearDown(self):
super(TestCase, self).tearDown()
if self._RUN_IN_TEMP:
os.chdir(self.__orig_dir)
shutil.rmtree(self.__work_dir, ignore_errors=True)
class _FakeEventBase(object):
"""Baseclass for fake Events."""
| bsd-2-clause | Python |
|
b802f1d5453840ea4b16113d5d03f6c27224ce0c | Add try/except example. | honeybadger-io/honeybadger-python,honeybadger-io/honeybadger-python | examples/try.py | examples/try.py | # Honeybadger for Python
# https://github.com/honeybadger-io/honeybadger-python
#
# This file is an example of how to catch an exception in Python and report it
# to Honeybadger without re-raising. To run this example:
# $ pip install honeybadger
# $ HONEYBADGER_API_KEY=your-api-key python try.py
from honeybadger import honeybadger
# Uncomment the following line or use the HONEYBADGER_API_KEY environment
# variable to configure the API key for your Honeybadger project:
# honeybadger.configure(api_key='your api key')
import logging
logging.getLogger('honeybadger').addHandler(logging.StreamHandler())
def method_two():
mydict = dict(a=1)
try:
print mydict['b']
except KeyError, exc:
honeybadger.notify(exc, context={'foo': 'bar'})
def method_one():
method_two()
if __name__ == '__main__':
honeybadger.set_context(user_email="user@example.com")
method_one()
| mit | Python |
|
6bbef11c982ddee4981318e6bca9fa85610f1cc8 | Increase revision content lenght | VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core | src/ggrc/migrations/versions/20170112112254_177a979b230a_update_revision_content_field.py | src/ggrc/migrations/versions/20170112112254_177a979b230a_update_revision_content_field.py | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Update revision content field.
Create Date: 2017-01-12 11:22:54.998164
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from alembic import op
# revision identifiers, used by Alembic.
revision = '177a979b230a'
down_revision = '275cd0dcaea'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.alter_column(
"revisions",
"content",
existing_type=sa.Text(),
type_=mysql.LONGTEXT,
nullable=False
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column(
"revisions",
"content",
existing_type=mysql.LONGTEXT,
type_=sa.Text(),
nullable=False
)
| apache-2.0 | Python |
|
eb4fbb28ed06b223282b02bb31f5f91e1eeb3f9f | Add RenormalizeWeight callback | EderSantana/seya,berleon/seya | seya/callbacks.py | seya/callbacks.py | import numpy as np
from keras.callbacks import Callback
class RenormalizeWeight(Callback):
def __init__(self, W):
Callback.__init__(self)
self.W = W
self.W_shape = self.W.get_value().shape
def on_batch_start(self, batch, logs={}):
W = self.W.get_value()
if self.W_shape == 4:
W = W.reshape((self.W_shape[0], -1))
norm = np.sqrt((W**2).sum(axis=-1))
W /= norm[:, None]
W = W.reshape(self.W_shape)
self.W.set_value(W)
| bsd-3-clause | Python |
|
c575f030feb90d3c6383d11265fcf7f80414ce34 | Add an example hook script for checking valid commits | gerrit-review/gerrit,keerath/gerrit_newssh,TonyChai24/test,WANdisco/gerrit,sudosurootdev/gerrit,bootstraponline-archive/gerrit-mirror,quyixia/gerrit,jeblair/gerrit,anminhsu/gerrit,sudosurootdev/gerrit,quyixia/gerrit,austinchic/Gerrit,anminhsu/gerrit,qtproject/qtqa-gerrit,dwhipstock/gerrit,ckamm/gerrit,teamblueridge/gerrit,joshuawilson/merrit,1yvT0s/gerrit,dwhipstock/gerrit,ckamm/gerrit,gerrit-review/gerrit,keerath/gerrit_newssh,TonyChai24/test,thesamet/gerrit,midnightradio/gerrit,gcoders/gerrit,teamblueridge/gerrit,catrope/gerrit,thesamet/gerrit,dwhipstock/gerrit,Overruler/gerrit,skurfuerst/gerrit,qtproject/qtqa-gerrit,gcoders/gerrit,Saulis/gerrit,bpollack/gerrit,GerritCodeReview/gerrit,m1kah/gerrit-contributions,TonyChai24/test,gerrit-review/gerrit,basilgor/gerrit,quyixia/gerrit,supriyantomaftuh/gerrit,GerritCodeReview/gerrit,hdost/gerrit,gracefullife/gerrit,CandyShop/gerrit,thinkernel/gerrit,basilgor/gerrit,pkdevbox/gerrit,1yvT0s/gerrit,jackminicloud/test,skurfuerst/gerrit,renchaorevee/gerrit,keerath/gerrit_newssh,quyixia/gerrit,jeblair/gerrit,Seinlin/gerrit,GerritCodeReview/gerrit,jackminicloud/test,atdt/gerrit,renchaorevee/gerrit,gcoders/gerrit,duboisf/gerrit,pkdevbox/gerrit,joshuawilson/merrit,CandyShop/gerrit,thesamet/gerrit,Seinlin/gerrit,cjh1/gerrit,WANdisco/gerrit,anminhsu/gerrit,atdt/gerrit,GerritCodeReview/gerrit,hdost/gerrit,supriyantomaftuh/gerrit,renchaorevee/gerrit,zommarin/gerrit,Team-OctOS/host_gerrit,jackminicloud/test,evanchueng/gerrit,supriyantomaftuh/gerrit,makholm/gerrit-ceremony,basilgor/gerrit,Seinlin/gerrit,bpollack/gerrit,jackminicloud/test,ckamm/gerrit,Team-OctOS/host_gerrit,thinkernel/gerrit,dwhipstock/gerrit,austinchic/Gerrit,ashang/aaron-gerrit,Distrotech/gerrit,dwhipstock/gerrit,hdost/gerrit,Distrotech/gerrit,thinkernel/gerrit,TonyChai24/test,anminhsu/gerrit,Team-OctOS/host_gerrit,evanchueng/gerrit,anminhsu/gerrit,netroby/gerrit,Seinlin/gerrit,joshuawilson/merrit,CandyShop/gerrit,supriyantomaftuh/gerrit,midnightradio/gerrit,thesamet/gerrit,cjh1/gerrit,Team-OctOS/host_gerrit,jackminicloud/test,supriyantomaftuh/gerrit,WANdisco/gerrit,gracefullife/gerrit,TonyChai24/test,anminhsu/gerrit,bootstraponline-archive/gerrit-mirror,qtproject/qtqa-gerrit,Seinlin/gerrit,supriyantomaftuh/gerrit,jeblair/gerrit,midnightradio/gerrit,qtproject/qtqa-gerrit,sudosurootdev/gerrit,WANdisco/gerrit,bpollack/gerrit,austinchic/Gerrit,makholm/gerrit-ceremony,catrope/gerrit,Saulis/gerrit,MerritCR/merrit,hdost/gerrit,joshuawilson/merrit,ashang/aaron-gerrit,atdt/gerrit,teamblueridge/gerrit,bpollack/gerrit,thinkernel/gerrit,TonyChai24/test,m1kah/gerrit-contributions,Overruler/gerrit,thesamet/gerrit,midnightradio/gerrit,Seinlin/gerrit,bpollack/gerrit,supriyantomaftuh/gerrit,ashang/aaron-gerrit,hdost/gerrit,Overruler/gerrit,pkdevbox/gerrit,rtyley/mini-git-server,netroby/gerrit,gcoders/gerrit,thesamet/gerrit,bootstraponline-archive/gerrit-mirror,pkdevbox/gerrit,gcoders/gerrit,netroby/gerrit,bootstraponline-archive/gerrit-mirror,midnightradio/gerrit,gracefullife/gerrit,gcoders/gerrit,pkdevbox/gerrit,makholm/gerrit-ceremony,evanchueng/gerrit,GerritCodeReview/gerrit,ashang/aaron-gerrit,CandyShop/gerrit,sudosurootdev/gerrit,Saulis/gerrit,quyixia/gerrit,Distrotech/gerrit,MerritCR/merrit,zommarin/gerrit,quyixia/gerrit,Overruler/gerrit,hdost/gerrit,netroby/gerrit,MerritCR/merrit,skurfuerst/gerrit,duboisf/gerrit,Overruler/gerrit,MerritCR/merrit,Team-OctOS/host_gerrit,evanchueng/gerrit,sudosurootdev/gerrit,duboisf/gerrit,renchaorevee/gerrit,qtproject/qtqa-gerrit,joshuawilson/merrit,atdt/gerrit,GerritCodeReview/gerrit,duboisf/gerrit,evanchueng/gerrit,keerath/gerrit_newssh,Team-OctOS/host_gerrit,hdost/gerrit,gerrit-review/gerrit,thinkernel/gerrit,keerath/gerrit_newssh,ckamm/gerrit,dwhipstock/gerrit,cjh1/gerrit,makholm/gerrit-ceremony,m1kah/gerrit-contributions,1yvT0s/gerrit,zommarin/gerrit,thesamet/gerrit,joshuawilson/merrit,anminhsu/gerrit,ckamm/gerrit,gerrit-review/gerrit,netroby/gerrit,basilgor/gerrit,1yvT0s/gerrit,WANdisco/gerrit,midnightradio/gerrit,MerritCR/merrit,austinchic/Gerrit,atdt/gerrit,qtproject/qtqa-gerrit,Team-OctOS/host_gerrit,dwhipstock/gerrit,zommarin/gerrit,bootstraponline-archive/gerrit-mirror,MerritCR/merrit,skurfuerst/gerrit,gracefullife/gerrit,cjh1/gerrit,m1kah/gerrit-contributions,gcoders/gerrit,netroby/gerrit,basilgor/gerrit,ashang/aaron-gerrit,GerritCodeReview/gerrit,bpollack/gerrit,Distrotech/gerrit,Overruler/gerrit,jackminicloud/test,renchaorevee/gerrit,renchaorevee/gerrit,gerrit-review/gerrit,netroby/gerrit,Distrotech/gerrit,Saulis/gerrit,joshuawilson/merrit,renchaorevee/gerrit,gracefullife/gerrit,bootstraponline-archive/gerrit-mirror,jackminicloud/test,gerrit-review/gerrit,qtproject/qtqa-gerrit,WANdisco/gerrit,CandyShop/gerrit,1yvT0s/gerrit,Saulis/gerrit,catrope/gerrit,thinkernel/gerrit,zommarin/gerrit,rtyley/mini-git-server,teamblueridge/gerrit,quyixia/gerrit,Distrotech/gerrit,joshuawilson/merrit,Distrotech/gerrit,WANdisco/gerrit,MerritCR/merrit,pkdevbox/gerrit,teamblueridge/gerrit,Seinlin/gerrit,catrope/gerrit,MerritCR/merrit,Saulis/gerrit,jeblair/gerrit,pkdevbox/gerrit,thinkernel/gerrit,GerritCodeReview/gerrit,TonyChai24/test | contrib/check-valid-commit.py | contrib/check-valid-commit.py | #!/usr/bin/env python
import commands
import getopt
import sys
SSH_USER = 'bot'
SSH_HOST = 'localhost'
SSH_PORT = 29418
SSH_COMMAND = 'ssh %s@%s -p %d gerrit approve ' % (SSH_USER, SSH_HOST, SSH_PORT)
FAILURE_SCORE = '--code-review=-2'
FAILURE_MESSAGE = 'This commit message does not match the standard.' \
+ ' Please correct the commit message and upload a replacement patch.'
PASS_SCORE = '--code-review=0'
PASS_MESSAGE = ''
def main():
change = None
project = None
branch = None
commit = None
patchset = None
try:
opts, args = getopt.getopt(sys.argv[1:], '', \
['change=', 'project=', 'branch=', 'commit=', 'patchset='])
except getopt.GetoptError, err:
print 'Error: %s' % (err)
usage()
sys.exit(-1)
for arg, value in opts:
if arg == '--change':
change = value
elif arg == '--project':
project = value
elif arg == '--branch':
branch = value
elif arg == '--commit':
commit = value
elif arg == '--patchset':
patchset = value
else:
print 'Error: option %s not recognized' % (arg)
usage()
sys.exit(-1)
if change == None or project == None or branch == None \
or commit == None or patchset == None:
usage()
sys.exit(-1)
command = 'git cat-file commit %s' % (commit)
status, output = commands.getstatusoutput(command)
if status != 0:
print 'Error running \'%s\'. status: %s, output:\n\n%s' % \
(command, status, output)
sys.exit(-1)
commitMessage = output[(output.find('\n\n')+2):]
commitLines = commitMessage.split('\n')
if len(commitLines) > 1 and len(commitLines[1]) != 0:
fail(commit, 'Invalid commit summary. The summary must be ' \
+ 'one line followed by a blank line.')
i = 0
for line in commitLines:
i = i + 1
if len(line) > 80:
fail(commit, 'Line %d is over 80 characters.' % i)
passes(commit)
def usage():
print 'Usage:\n'
print sys.argv[0] + ' --change <change id> --project <project name> ' \
+ '--branch <branch> --commit <sha1> --patchset <patchset id>'
def fail( commit, message ):
command = SSH_COMMAND + FAILURE_SCORE + ' -m \\\"' \
+ _shell_escape( FAILURE_MESSAGE + '\n\n' + message) \
+ '\\\" ' + commit
commands.getstatusoutput(command)
sys.exit(1)
def passes( commit ):
command = SSH_COMMAND + PASS_SCORE + ' -m \\\"' \
+ _shell_escape(PASS_MESSAGE) + ' \\\" ' + commit
commands.getstatusoutput(command)
def _shell_escape(x):
s = ''
for c in x:
if c in '\n':
s = s + '\\\"$\'\\n\'\\\"'
else:
s = s + c
return s
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
1f1d2df36a16b80c770974a9ac2bf48ccbebc3ab | add callable list | Cologler/py.jasily.cologler,Jasily/jasily-python | jasily/collection/funcs.py | jasily/collection/funcs.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2018~2999 - Cologler <skyoflw@gmail.com>
# ----------
#
# ----------
from functools import partial
class CallableList(list):
'''
a simple callable list.
'''
def __call__(self):
ret = None
for func in self:
ret = func()
return ret
def append_func(self, func, *args, **kwargs):
'''
append func with given arguments and keywords.
'''
wraped_func = partial(func, *args, **kwargs)
self.append(wraped_func)
def insert_func(self, index, func, *args, **kwargs):
'''
insert func with given arguments and keywords.
'''
wraped_func = partial(func, *args, **kwargs)
self.insert(index, wraped_func)
| mit | Python |
|
467170482c97c3b586d58c4729d051c1b1b99f3d | Add sentence level classifier. | chiubaka/actionizer | actionizer_sentences.py | actionizer_sentences.py | #! /usr/bin/python
import numpy as np
import os
import re
from sklearn import datasets, cross_validation
from sklearn.base import TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics import classification_report
from sklearn.naive_bayes import GaussianNB
from sklearn.pipeline import Pipeline
MESSAGES_DIR = "data/messages/"
MESSAGE_FILENAME_FORMAT = "msg-%d.txt"
JUDGMENTS_PATH = "data/judgments/judgments.txt"
def load_messages():
messages = []
# Read message files in numeric order. os.listdir() returns them sorted by string, not message
# number.
filenames = os.listdir(MESSAGES_DIR)
num_messages = len(filenames)
for i in range(num_messages):
filename = MESSAGE_FILENAME_FORMAT % i
with open(MESSAGES_DIR + filename) as message_file:
messages.append(message_file.read())
return messages
def load_sentence_judgments():
judgments = []
with open(JUDGMENTS_PATH) as judgments_file:
for line in judgments_file:
judgments.append([int(x) for x in line.split()[2:] if len(line.split()) > 2])
return judgments
def load_sentences():
messages = load_messages()
judgments = load_sentence_judgments()
action_sentences = []
no_action_sentences = []
for i in range(len(messages)):
message = messages[i]
sentences = parse_sentences(message)
action_indices = judgments[i]
if len(action_indices) > 0:
for i in range(0, len(action_indices), 2):
start_index = action_indices[i]
length = action_indices[i+1]
stop_index = start_index + length
action_sentence = message[start_index:stop_index].strip().replace('\n', ' ')
if action_sentence in sentences:
action_sentences.append(action_sentence)
sentences.remove(action_sentence)
no_action_sentences.extend(sentences)
target = [1 for _ in action_sentences]
target.extend([0 for _ in no_action_sentences])
action_sentences.extend(no_action_sentences)
return action_sentences, target
def parse_sentences(message):
# Split the sentence on periods, exclamation marks, and double newlines. Recombine punctuation
# marks with their sentences.
sentences = reduce(lambda acc, elem: acc[:-1] + [acc[-1] + elem] \
if elem == '.' or elem == '?' or elem == '!' \
else acc + [elem], re.split(r'([\.\!\?]|\n\n)', message), [])
# Strip sentences of extra white space.
# Replace internal newlines with spaces so that newlines don't trip up sklearn tokenizers.
# Remove all sentences that have length 0 or are completely comprised of whitespace.
# Remove any sentence starting with the 'From:' header, which should remove the From:, To:,
# and Subject:
sentences = [s.strip().replace('\n', ' ') for s in sentences if len(s) > 0 and not s.isspace() and not s.startswith('From:')]
return sentences
# Transformer to transform a sparse matrix into a dense matrix for use in an sklearn pipeline.
class DenseTransformer(TransformerMixin):
def transform(self, X, y=None, **fit_params):
return X.todense()
def fit_transform(self, X, y=None, **fit_params):
self.fit(X, y, **fit_params)
return self.transform(X)
def fit(self, X, y=None, **fit_params):
return self
def main():
sentences, target = load_sentences()
pipeline = Pipeline([('vect', CountVectorizer(ngram_range=(1, 3))), ('to_dense', DenseTransformer()), ('clf', GaussianNB())])
pipeline.fit(sentences, target)
scores = cross_validation.cross_val_score(pipeline, sentences, target, scoring='f1', cv=5)
print "F1: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)
if __name__ == "__main__":
main()
| mit | Python |
|
44b6b0ff5efc6d9fcda4f886640663b68e7d6c14 | Add initial code for getting batting stats over a specified timeframe | jldbc/pybaseball | pybaseball/league_batting_stats.py | pybaseball/league_batting_stats.py |
"""
TODO
pull batting stats over specified time period
allow option to get stats for full seasons instead of ranges
"""
import requests
import pandas as pd
from bs4 import BeautifulSoup
def get_soup(start_dt, end_dt):
# get most recent standings if date not specified
if((start_dt is None) or (end_dt is None)):
print('Error: a date range needs to be specified')
return None
url = "http://www.baseball-reference.com/leagues/daily.cgi?user_team=&bust_cache=&type=b&lastndays=7&dates=fromandto&fromandto={}.{}&level=mlb&franch=&stat=&stat_value=0".format(start_dt, end_dt)
s=requests.get(url).content
return BeautifulSoup(s, "html.parser")
def get_table(soup):
table = soup.find_all('table')[0]
data = []
headings = [th.get_text() for th in table.find("tr").find_all("th")][1:]
data.append(headings)
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
data = pd.DataFrame(data)
data = data.rename(columns=data.iloc[0])
data = data.reindex(data.index.drop(0))
return data
def batting_stats(start_dt=None, end_dt=None):
# retrieve html from baseball reference
soup = get_soup(start_dt, end_dt)
table = get_table(soup)
return table
| mit | Python |
|
072423365ad1c03dd593f5b8528a7b60c0c9bee9 | Add AuctionHouse table. | LegionXI/pydarkstar,AdamGagorik/pydarkstar | pydarkstar/tables/auction_house.py | pydarkstar/tables/auction_house.py | """
.. moduleauthor:: Adam Gagorik <adam.gagorik@gmail.com>
"""
from sqlalchemy import Column, Integer, SmallInteger, String, text
from pydarkstar.tables.base import Base
class AuctionHouse(Base):
__tablename__ = 'auction_house'
id = Column(Integer, primary_key=True)
itemid = Column(SmallInteger, nullable=False, index=True, server_default=text("'0'"))
stack = Column(Integer, nullable=False, server_default=text("'0'"))
seller = Column(Integer, nullable=False, server_default=text("'0'"))
seller_name = Column(String(15))
date = Column(Integer, nullable=False, server_default=text("'0'"))
price = Column(Integer, nullable=False, server_default=text("'0'"))
buyer_name = Column(String(15))
sale = Column(Integer, nullable=False, server_default=text("'0'"))
sell_date = Column(Integer, nullable=False, server_default=text("'0'"))
if __name__ == '__main__':
pass | mit | Python |
|
f85f6ba07c47a6ccbd38a9e7bc2e9a2c69ebd09a | read senor values from rpi | yopzolo/ArduinoI2CMoistureSensor | pythonLib/ArduinoMoistureSensor.py | pythonLib/ArduinoMoistureSensor.py | import smbus
import time
bus = smbus.SMBus(1)
address = int(sys.argv[1])
data = bus.read_i2c_block_data(address,0)
for i in range (0,6):
print (data[2*i] << 8)+ data[2*i+1]
| apache-2.0 | Python |
|
47d7cfcd9db1a54e52532819895060527e1988b9 | update qlcoder | YcheLanguageStudio/PythonStudy | qlcoder/scheme_study/functional.py | qlcoder/scheme_study/functional.py | if __name__ == '__main__':
my_arr = [None] * 7654321
for i in range(0, 7654321):
my_arr[i]=i
| mit | Python |
|
7618697cdb892388d7c5ddb731f5b9f138389ca4 | add A4 | rfdickerson/CS241,rfdickerson/cs241-data-structures,rfdickerson/CS241,rfdickerson/cs241-data-structures,rfdickerson/cs241-data-structures,rfdickerson/CS241 | A4/TestHashtable.py | A4/TestHashtable.py | #!/usr/bin/env python2
from hashtable import Hashtable, LinkedList, hashFunction
import unittest
import collections
class TestHashtable(unittest.TestCase):
def setUp(self):
buildings = {
"CSCI" : "McGlothlin-Street",
"GSWS" : "Tucker",
"ENGL" : "Tucker",
"LING" : "Tyler",
"GERM" : "Washington",
}
def testWithoutFunction(self):
testingFunction = lambda key, numBuckets: sum(map(ord, key)) % numBuckets
q = Hashtable(testingFunction, 1000)
for key, value in buildings.items:
q[key] = value
for key, expected in buildings.items:
observed = q[key]
self.assertEquals(observed, expected, "small hashtable without your hash function: value changed after being added!\nkey:{}\nexpected value:{}\nobserved value:{}".format(key, value, q[key]))
def testWithFunction(self):
q = Hashtable(hashFunction, 1000)
for key, value in buildings.items:
q[key] = value
for key, expected in buildings.items:
observed = q[key]
self.assertEquals(observed, expected, "small hashtable with your hash function: value changed after being added! check __getitem__/__setitem__\nkey:{}\nexpected value:{}\nobserved value:{}".format(key, value, q[key]))
def testContains(self):
q = Hashtable(hashFunction, 1000)
for key, value in buildings.items:
q[key] = value
for key in buildings.keys:
self.assertIn(key, q, "membership in small hashtable: `in` keyword didn't work! check __contains__.\nkey:{}".format(key,))
def testLen(self):
q = Hashtable(hashFunction, 1000)
for key, value in buildings.items:
q[key] = value
self.assertLessEqual(len(q), len(buildings), "length: {} items is too many! check __len__.".format(len(q)))
self.assertGreaterEqual(len(q), len(buildings), "length: {} items is not enough! check __len__.".format(len(q)))
if __name__ == "__main__":
unittest.main()
| mit | Python |
|
c26b20a44c47474f88c8f155b36c8c6f0dcfd072 | Move packet processing into its own class | elliotta/hephaestus | innovate/packet.py | innovate/packet.py | """One data packet in Innovate Serial Protocol version 2 (ISP2).
For data format specifications, see
http://www.innovatemotorsports.com/support/downloads/Seriallog-2.pdf
"""
import struct
class InnovatePacket(object):
"""An packet in the Innovate Serial Protocol version 2 (ISP2).
ISP2 packets are composed of 16 bit words.
"""
# Define some bitmasks
START_MARKER_MASK = 0b1000000000000000
# In a header word, bits 13, 9, and 7 will be 1.
HEADER_MASK = START_MARKER_MASK | 0b0010001010000000
RECORDING_TO_FLASH_MASK = 0b0100000000000000 # In header. 1 is is recording.
SENSOR_DATA_MASK = 0b0001000000000000 # In header. 1 if data, 0 if reply to command.
CAN_LOG_MASK = 0b0000100000000000 # In header. 1 if originating device can do internal logging.
LC1_HIGH_MASK = 0b0100001000000000 # First of two words from an LC-1, bits always high
LC1_LOW_MASK = 0b1010000010000000 # First of two words from an LC-1, bits always low
def __init__(self, header=None, data=None):
self.header = header
self.data = data
def _to_words(self, bytestring):
"""Convert a byte string to a list of words.
Each word is an integer.
"""
if bytestring is None:
return None
# Each word is two bytes long
n_words = len(bytestring)/2
# ISP2 words are big endian, indicated by ">"
# ISP2 words are unsigned short, indicated by "H"
return struct.unpack(">%dH" % n_words, bytestring)
@property
def header(self):
return self._header
@header.setter
def header(self, header):
"""Input header as a bytestring.
"""
header = self._to_words(header)
if len(header) != 1:
raise Exception('Header must be exactly one word long.')
header = header[0]
if not header & self.HEADER_MASK == self.HEADER_MASK:
raise Exception('Invalid header')
self._header = header
## Data stored in the header ##
@property
def packet_length(self):
"""Get the packet length from the header.
Packet length is the number of data words after the header.
Note that each word is 2 bytes long.
"""
if not self._header:
return None
# Packet length is encoded in bit 8 and bits 6-0
# First, get bits 6-0
packet_length = self._header[0] & 0b0000000001111111
# Bit 8 is the 7th (zero-indexed) bit in the length
if self._headeri[0] & 0b0000000100000000:
packet_length += 0b10000000 # 128
return packet_length
@property
def is_recording_to_flash(self):
"""Return boolean indicating whether the data is being recorded to flash.
"""
if not self._header:
return None
return self.header & self.RECORDING_TO_FLASH_MASK == self.RECORDING_TO_FLASH_MASK
@property
def is_sensor_data(self):
"""Return True if the packet contains sensor data, False if it is a reply to a command.
"""
if not self._header:
return None
return self.header & self.SENSOR_DATA_MASK == self.SENSOR_DATA_MASK
@property
def can_log(self):
"""Return boolean indicating whether the originating device can do internal logging.
"""
if not self._header:
return None
return self.header & self.CAN_LOG_MASK == self.CAN_LOG_MASK
@property
def data(self):
return self._data
@data.setter
def data(self, data):
"""Input data as a bytestring.
"""
self._data = self._to_words(data)
| mit | Python |
|
d635a60140c11c64db4ac887bc79396484bb55e3 | Add model_utils.print_graph_layer_shapes to handle Graph models. Also handle Merge layers | nehz/keras,ogrisel/keras,eulerreich/keras,DeepGnosis/keras,3dconv/keras,xurantju/keras,xiaoda99/keras,cheng6076/keras,JasonTam/keras,zhmz90/keras,keras-team/keras,nt/keras,Yingmin-Li/keras,jayhetee/keras,asampat3090/keras,zxytim/keras,dolaameng/keras,florentchandelier/keras,LIBOTAO/keras,zxsted/keras,navyjeff/keras,dxj19831029/keras,nebw/keras,untom/keras,vseledkin/keras,sjuvekar/keras,marchick209/keras,jbolinge/keras,iamtrask/keras,dribnet/keras,pjadzinsky/keras,llcao/keras,kuza55/keras,amy12xx/keras,yingzha/keras,ledbetdr/keras,keskarnitish/keras,relh/keras,DLlearn/keras,meanmee/keras,pthaike/keras,Cadene/keras,rodrigob/keras,OlafLee/keras,johmathe/keras,bboalimoe/keras,bottler/keras,printedheart/keras,MagicSen/keras,jasonyaw/keras,imcomking/Convolutional-GRU-keras-extension-,keras-team/keras,ml-lab/keras,jalexvig/keras,daviddiazvico/keras,EderSantana/keras,danielforsyth/keras,hhaoyan/keras,abayowbo/keras,zhangxujinsh/keras,fmacias64/keras,why11002526/keras,gamer13/keras,Aureliu/keras,saurav111/keras,wubr2000/keras,dhruvparamhans/keras,kemaswill/keras,mikekestemont/keras,tencrance/keras,ashhher3/keras,gavinmh/keras,Smerity/keras,jiumem/keras,nzer0/keras,cvfish/keras,kod3r/keras,wxs/keras,jimgoo/keras,ekamioka/keras,brainwater/keras,chenych11/keras,rudaoshi/keras,happyboy310/keras,iScienceLuvr/keras,stephenbalaban/keras,rlkelly/keras,harshhemani/keras | keras/utils/model_utils.py | keras/utils/model_utils.py | from __future__ import print_function
import numpy as np
import theano
def print_graph_layer_shapes(graph, input_shapes):
"""
Utility function to print the shape of the output at each layer of a Graph
Arguments:
graph: An instance of models.Graph
input_shapes: A dict that gives a shape for each input to the Graph
"""
input_vars = [graph.inputs[name].input
for name in graph.input_order]
output_vars = [graph.outputs[name].get_output()
for name in graph.output_order]
input_dummy = [np.zeros(input_shapes[name], dtype=np.float32)
for name in graph.input_order]
print("input shapes : ", input_shapes)
for name, l in graph.nodes.items():
shape_f = theano.function(input_vars,
l.get_output(train=False).shape,
on_unused_input='ignore')
out_shape = shape_f(*input_dummy)
print('shape after', l.get_config()['name'], "(", name, ") :", out_shape)
def print_model_layer_shapes(model, input_shapes):
"""
Utility function that prints the shape of the output at each layer.
Arguments:
model: An instance of models.Model
input_shape: The shape of the input you will provide to the model.
Either a tuple (for a single input) or a list of tuple
"""
# This is to handle the case where a model has been connected to a previous
# layer (and therefore get_input would recurse into previous layer's
# output).
if hasattr(model.layers[0], 'previous'):
# TODO: If the model is used as a part of another model, get_input will
# return the input of the whole model and this won't work. So this is
# not handled yet
raise Exception("This function doesn't work on model used as subparts "
" for other models")
# We allow the shortcut input_shapes=(1, 1, 28) instead of
# input_shapes=[(1, 1, 28)].
if not isinstance(input_shapes[0], tuple):
input_shapes = [input_shapes]
input_vars = model.get_input(train=False)
# theano.function excepts a list of variables
if not isinstance(input_vars, list):
input_vars = [input_vars]
input_dummy = [np.zeros(shape, dtype=np.float32)
for shape in input_shapes]
print("input shapes : ", input_shapes)
for l in model.layers:
shape_f = theano.function(input_vars,
l.get_output(train=False).shape)
out_shape = shape_f(*input_dummy)
print('shape after', l.get_config()['name'], ":", out_shape)
| from __future__ import print_function
import numpy as np
import theano
def print_layer_shapes(model, input_shape):
"""
Utility function that prints the shape of the output at each layer.
Arguments:
model: An instance of models.Model
input_shape: The shape of the input you will provide to the model.
"""
# This is to handle the case where a model has been connected to a previous
# layer (and therefore get_input would recurse into previous layer's
# output).
if hasattr(model.layers[0], 'previous'):
# TODO: If the model is used as a part of another model, get_input will
# return the input of the whole model and this won't work. So this is
# not handled yet
raise Exception("This function doesn't work on model used as subparts "
" for other models")
input_var = model.get_input(train=False)
input_tmp = np.zeros(input_shape, dtype=np.float32)
print("input shape : ", input_shape)
for l in model.layers:
shape_f = theano.function([input_var], l.get_output(train=False).shape)
out_shape = shape_f(input_tmp)
print('shape after', l.get_config()['name'], ":", out_shape)
| mit | Python |
f379160e56a94359d9571ea1b1db1f7544677a57 | Fix reference to `latestEvent` in tests. | gencer/sentry,jean/sentry,beeftornado/sentry,ifduyue/sentry,mvaled/sentry,looker/sentry,ifduyue/sentry,mvaled/sentry,beeftornado/sentry,looker/sentry,jean/sentry,gencer/sentry,beeftornado/sentry,gencer/sentry,gencer/sentry,looker/sentry,ifduyue/sentry,mvaled/sentry,jean/sentry,looker/sentry,gencer/sentry,mvaled/sentry,ifduyue/sentry,jean/sentry,mvaled/sentry,looker/sentry,jean/sentry,ifduyue/sentry,mvaled/sentry | tests/sentry/api/serializers/test_grouphash.py | tests/sentry/api/serializers/test_grouphash.py | from __future__ import absolute_import
from sentry.api.serializers import serialize
from sentry.models import Event, GroupHash
from sentry.testutils import TestCase
class GroupHashSerializerTest(TestCase):
def test_no_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
result = serialize(hash, user=user)
assert result['latestEvent'] is None
def test_missing_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
GroupHash.record_last_processed_event_id(
group.project_id,
[hash.id],
['invalid'],
)
result = serialize(hash, user=user)
assert result['latestEvent'] is None
def test_mismatched_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
event = self.create_event(group=self.create_group())
GroupHash.record_last_processed_event_id(
group.project_id,
[hash.id],
event.event_id,
)
result = serialize(hash, user=user)
assert result['latestEvent'] is None
def test_valid_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
event = Event.objects.get(id=self.create_event(group=group).id)
GroupHash.record_last_processed_event_id(
group.project_id,
[hash.id],
event.event_id,
)
result = serialize(hash, user=user)
assert result['latestEvent'] == serialize(event, user=user)
| from __future__ import absolute_import
from sentry.api.serializers import serialize
from sentry.models import Event, GroupHash
from sentry.testutils import TestCase
class GroupHashSerializerTest(TestCase):
def test_no_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
result = serialize(hash, user=user)
assert result['latest_event'] is None
def test_missing_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
GroupHash.record_last_processed_event_id(
group.project_id,
[hash.id],
['invalid'],
)
result = serialize(hash, user=user)
assert result['latest_event'] is None
def test_mismatched_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
event = self.create_event(group=self.create_group())
GroupHash.record_last_processed_event_id(
group.project_id,
[hash.id],
event.event_id,
)
result = serialize(hash, user=user)
assert result['latest_event'] is None
def test_valid_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
event = Event.objects.get(id=self.create_event(group=group).id)
GroupHash.record_last_processed_event_id(
group.project_id,
[hash.id],
event.event_id,
)
result = serialize(hash, user=user)
assert result['latest_event'] == serialize(event, user=user)
| bsd-3-clause | Python |
db13f88055d5ea2357ecc4b996f80d3392655516 | Create parse.py | abbacode/ciscoconfparser | parse.py | parse.py | __version__ = "1.0"
import os
from ciscoconfparse import CiscoConfParse
# -----------------------------------------------
# Create the db dictionary to store all records
# -----------------------------------------------
db = {}
# ----------------------------------------------------------------
# Update the dictionary below to search for new search parameters
# ----------------------------------------------------------------
data_to_search = {"NTP" : r"ntp server",
"SNMP" : r"snmp server",
"USERNAME" : r"username",
"AAA" : r"aaa",
"VERSION" : r"System image file"}
print ("--------------------------------------------------------------------")
print (" Searching current directory and sub-directories for .txt files....")
print ("--------------------------------------------------------------------")
for path, dirs, files in os.walk("."):
for f in files:
if f.endswith('.txt'):
hostname = f.replace(".txt","")
print ("Reading data from: {}".format(os.path.join(path, f)))
# Create an entry for the devices based on the hostname
db[hostname] = {}
for search_parameter in data_to_search:
db[hostname][search_parameter] = []
# Read the configuration file
parse = CiscoConfParse(os.path.join(path, f))
#----------------------------------------------------------
# Search for all relevant items and store findings in the
# db dictionary so that we can use later on
#----------------------------------------------------------
for search_parameter in data_to_search:
for obj in parse.find_objects(data_to_search[search_parameter]):
db[hostname][search_parameter].append(obj.text)
print ("-----------------------")
print (" Configuration snapshot")
print ("-----------------------")
# Cycle through all the devices in the database
for device in sorted(db):
print ("[{}]".format(device))
# Cycle through each item in data_to_search
for search_parameter in data_to_search:
# If there is a value then print it
if db[device][search_parameter]:
for line in db[device][search_parameter]:
print (" {}: {}".format(search_parameter.ljust(10),line))
# Otherwise print that nothing was found
else:
print (" {}: NOT FOUND".format(search_parameter.ljust(10)))
print ("")
print ("-------------------------------")
print (" Devices with missing entries ")
print ("-------------------------------")
for device in sorted(db):
for entry in data_to_search:
if not db[device][entry]:
print ("[{}] has no entry defined for '{}'".format(device.ljust(25),entry))
| mit | Python |
|
bee35885bb845ea77aa4586bca33da3e54b92ed2 | Add `albumtypes` plugin | beetbox/beets,beetbox/beets,beetbox/beets,beetbox/beets | beetsplug/albumtypes.py | beetsplug/albumtypes.py | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2021, Edgars Supe.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds an album template field for formatted album types."""
from __future__ import division, absolute_import, print_function
from beets.autotag.mb import VARIOUS_ARTISTS_ID
from beets.library import Album
from beets.plugins import BeetsPlugin
class AlbumTypesPlugin(BeetsPlugin):
"""Adds an album template field for formatted album types."""
def __init__(self):
"""Init AlbumTypesPlugin."""
super(AlbumTypesPlugin, self).__init__()
self.album_template_fields['atypes'] = self._atypes
def _atypes(self, item: Album):
self.config.add({
'types': [],
'ignore_va': [],
'brackets': '[]'
})
types = self.config['types'].as_pairs()
ignore_va = self.config['ignore_va'].as_str_seq()
bracket = self.config['bracket'].as_str()
# Assign a left and right bracket or leave blank if argument is empty.
if len(bracket) == 2:
bracket_l = bracket[0]
bracket_r = bracket[1]
else:
bracket_l = u''
bracket_r = u''
res = ''
albumtypes = item.albumtypes.split('; ')
is_va = item.mb_albumartistid == VARIOUS_ARTISTS_ID
for type in types:
if type[0] in albumtypes and type[1]:
if not is_va or (not type[0] in ignore_va and is_va):
res += bracket_l + type[1] + bracket_r
return res
| mit | Python |
|
f859eb67fdc66b930c3664a3586c454f5c9afe87 | Add files via upload | dionysius07/robot-vision | subunits/blink.py | subunits/blink.py | from nanpy import ArduinoApi
from nanpy import SerialManager
from time import sleep
link = SerialManager(device='/dev/ttyACM0')
A = ArduinoApi(connection=link)
led = 13
# SETUP:
A.pinMode(led, A.OUTPUT)
# LOOP:
while True:
A.digitalWrite(led, A.HIGH) # turn the LED on (HIGH is the voltage level)
print "blink on"
sleep(1) # use Python sleep instead of arduino delay
A.digitalWrite(led, A.LOW) # turn the LED off by making the voltage LOW
print "blink off"
sleep(1)
| mit | Python |
|
a11cee952e1abc7e7310b760c8a4845c4f46fbae | add date_range.py | Impactstory/oadoi,Impactstory/oadoi,Impactstory/sherlockoa,Impactstory/oadoi,Impactstory/sherlockoa | date_range.py | date_range.py | from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import deferred
from sqlalchemy import or_
from sqlalchemy import sql
from sqlalchemy import text
from sqlalchemy import orm
import requests
from time import sleep
from time import time
import datetime
import shortuuid
from urllib import quote
from app import logger
from app import db
from util import elapsed
from util import safe_commit
from util import clean_doi
class DateRange(db.Model):
id = db.Column(db.DateTime, primary_key=True)
# end_date = db.Column(db.DateTime)
@property
def first(self):
return self.id
@property
def first_day(self):
return self.id.isoformat()[0:10]
@property
def last_day(self):
return self.last.isoformat()[0:10]
@property
def last(self):
return self.first + datetime.timedelta(days=1)
def get_crossref_api_raw(self, rows=100):
headers={"Accept": "application/json", "User-Agent": "impactstory.org"}
base_url_with_last = "http://api.crossref.org/works?filter=from-created-date:{first},until-created-date:{last}&rows={rows}&cursor={next_cursor}"
# but if want all changes, use "indexed" not "created" as per https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#notes-on-incremental-metadata-updates
next_cursor = "*"
has_more_responses = True
num_so_far = 0
num_between_commits = 0
while has_more_responses:
start_time = time()
url = base_url_with_last.format(
first=self.first_day,
last=self.last_day,
rows=rows,
next_cursor=next_cursor)
logger.info(u"calling url: {}".format(url))
resp = requests.get(url, headers=headers)
logger.info(u"getting crossref response took {} seconds".format(elapsed(start_time, 2)))
if resp.status_code != 200:
logger.info(u"error in crossref call, status_code = {}".format(resp.status_code))
return
resp_data = resp.json()["message"]
next_cursor = resp_data.get("next-cursor", None)
if next_cursor:
next_cursor = quote(next_cursor)
if not resp_data["items"] or not next_cursor:
has_more_responses = False
for api_raw in resp_data["items"]:
doi = clean_doi(api_raw["DOI"])
crossref_api_obj = CrossrefApi(doi=doi, api_raw=api_raw)
db.session.add(crossref_api_obj)
num_between_commits += 1
num_so_far += 1
if num_between_commits > 100:
safe_commit(db)
num_between_commits = 0
logger.info(u"at bottom of loop, got {} records".format(len(resp_data["items"])))
# make sure to get the last ones
logger.info(u"done everything, saving last ones")
safe_commit(db)
return num_so_far
def __repr__(self):
return u"<DateRange (starts: {})>".format(self.id)
class CrossrefApi(db.Model):
id = db.Column(db.Text, primary_key=True)
doi = db.Column(db.Text)
updated = db.Column(db.DateTime)
api_raw = db.Column(JSONB)
def __init__(self, **kwargs):
self.id = shortuuid.uuid()[0:10]
self.updated = datetime.datetime.utcnow()
super(CrossrefApi, self).__init__(**kwargs)
| mit | Python |
|
7d258bdb68119ad54a69e92ac7c7c1c2fc51e087 | Create scrap.py | shekhar-singh/scrap | scrap.py | scrap.py | #!usr/bin/env python
import requests
from bs4 import BeautifulSoup
uri = requests.get("http://video9.in/english/")
soup=BeautifulSoup(url.text)
for link in soup.find_all("div",{"class": "updates"}):
print link.text
| mit | Python |
|
2aae4701fd98f560e7e112084f47f66515f6f574 | Add setup.py | matham/go_nogo,matham/sock_cond,matham/sniffer,matham/go_nogo,matham/forced_choice | setup.py | setup.py | from setuptools import setup, find_packages
import go_nogo_rig
setup(
name='Go-NoGo',
version=go_nogo_rig.__version__,
packages=find_packages(),
install_requires=['moa', 'pybarst', 'moadevs'],
author='Matthew Einhorn',
author_email='moiein2000@gmail.com',
url='https://cpl.cornell.edu/',
license='MIT',
description='Go/NoGo experiment.',
entry_points={'console_scripts': ['go_nogo=go_nogo_rig.main:run_app']},
)
| mit | Python |
|
aef67e19a3494880620fd87a68ff581edaa9ce81 | Add unittest for madx.evaluate | pymad/jpymad,pymad/cpymad,pymad/cpymad,pymad/jpymad,pymad/jpymad | test/test_madx.py | test/test_madx.py | import unittest
from cern.madx import madx
from math import pi
class TestMadX(unittest.TestCase):
"""Test methods of the madx class."""
def setUp(self):
self.madx = madx()
def tearDown(self):
del self.madx
def testEvaluate(self):
self.madx.command("FOO = PI*3;")
val = self.madx.evaluate("1/FOO")
self.assertAlmostEqual(val, 1/(3*pi))
| apache-2.0 | Python |
|
0a55f6f2bf49c679a422d44007df3f66c323e719 | mask unit test | bengranett/minimask | test/test_mask.py | test/test_mask.py | import numpy as np
from minimask.mask import Mask
from minimask.spherical_poly import spherical_polygon
def test_mask_sample():
""" """
vertices = [[0,0],[10,0],[10,10],[0,10]]
S = spherical_polygon(vertices)
M = Mask(polys=[S], fullsky=False)
x,y = M.sample(100)
assert len(x) == 1000
assert len(y) == 1000
assert np.abs(x.min()) < 1
assert np.abs(y.min()) < 1
assert np.abs(x.max() - 10) < 1
assert np.abs(y.max() - 10) < 1
r = M.contains(x, y)
assert np.sum(r) == 0 | mit | Python |
|
25495d675c44a75d7dedfe123f30a858f9cd60be | Add minimal (no asserts) test for play plugin | MyTunesFreeMusic/privacy-policy,diego-plan9/beets,sampsyo/beets,sampsyo/beets,jcoady9/beets,swt30/beets,jcoady9/beets,LordSputnik/beets,jackwilsdon/beets,MyTunesFreeMusic/privacy-policy,parapente/beets,ibmibmibm/beets,ibmibmibm/beets,lengtche/beets,Freso/beets,xsteadfastx/beets,LordSputnik/beets,madmouser1/beets,lengtche/beets,shamangeorge/beets,mried/beets,madmouser1/beets,lengtche/beets,shamangeorge/beets,mosesfistos1/beetbox,SusannaMaria/beets,jcoady9/beets,ibmibmibm/beets,swt30/beets,Kraymer/beets,artemutin/beets,LordSputnik/beets,SusannaMaria/beets,jackwilsdon/beets,diego-plan9/beets,pkess/beets,Freso/beets,Kraymer/beets,diego-plan9/beets,sampsyo/beets,jackwilsdon/beets,mried/beets,pkess/beets,xsteadfastx/beets,shamangeorge/beets,madmouser1/beets,ibmibmibm/beets,mried/beets,artemutin/beets,Freso/beets,artemutin/beets,mosesfistos1/beetbox,SusannaMaria/beets,diego-plan9/beets,Freso/beets,xsteadfastx/beets,parapente/beets,sampsyo/beets,beetbox/beets,MyTunesFreeMusic/privacy-policy,jackwilsdon/beets,Kraymer/beets,pkess/beets,mried/beets,swt30/beets,beetbox/beets,artemutin/beets,pkess/beets,beetbox/beets,LordSputnik/beets,shamangeorge/beets,SusannaMaria/beets,swt30/beets,madmouser1/beets,beetbox/beets,lengtche/beets,Kraymer/beets,mosesfistos1/beetbox,xsteadfastx/beets,parapente/beets,parapente/beets,jcoady9/beets,MyTunesFreeMusic/privacy-policy,mosesfistos1/beetbox | test/test_play.py | test/test_play.py | # -*- coding: utf-8 -*-
"""Tests for the play plugin"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from mock import patch, Mock
from test._common import unittest
from test.helper import TestHelper
from beetsplug.play import PlayPlugin
class PlayPluginTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.load_plugins('play')
self.add_item(title='aNiceTitle')
def tearDown(self):
self.teardown_beets()
self.unload_plugins()
@patch('beetsplug.play.util.interactive_open', Mock())
def test_basic(self):
self.run_command('play', 'title:aNiceTitle')
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| mit | Python |
|
83d00fea8adf611984c3b56a63f080f144612c69 | Create data_tool.py | gu-yan/mlAlgorithms | data_tool.py | data_tool.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import pickle
import random
def load_data():
with open('dataset.pkl', 'r') as file:
data_set = pickle.load(file)
return data_set
def feature_format(data_set):
features = []
labels = []
for item in data_set:
features.append(item[:-1])
labels.append(item[-1])
return features, labels
def train_test_split(features, test_rate):
random.shuffle(features)
total_number = len(features)
test_number = int(round(len(features) * test_rate))
train_data = features[0:-test_number]
test_data = features[-test_number:total_number]
features_train, labels_train = feature_format(train_data)
features_test, labels_test = feature_format(test_data)
return features_train, labels_train, features_test, labels_test
| apache-2.0 | Python |
|
c488e446aee3d28fa84bb24d446ca22af20e461c | Add setup.py | bacher09/dynsupdate | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
def lt27():
import sys
v = sys.version_info
return (v[0], v[1]) < (2, 7)
tests_require = [
'nose>=1.0',
'mock',
]
if lt27():
tests_require.append('unittest2')
setup(
name='dynsupdate',
description='Dynamic DNS update like nsupdate',
install_requires=[
'dnspython',
],
tests_require=tests_require,
packages=find_packages(),
test_suite="nose.collector"
)
| bsd-3-clause | Python |
|
6d3a9f41bec03405fa648ce169b9565f937e4598 | add setup.py | trehn/timekeeper | setup.py | setup.py | from setuptools import setup
setup(
name="timekeeper",
version="0.1.0",
description="Send runtime measurements of your code to InfluxDB",
author="Torsten Rehn",
author_email="torsten@rehn.email",
license="ISC",
url="https://github.com/trehn/timekeeper",
keywords=["profiling", "profile", "metrics", "instrumentation", "measure", "influxdb"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: ISC License (ISCL)",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: Unix",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Monitoring",
],
install_requires=[
"influxdb >= 2.0.0",
],
py_modules=['timekeeper'],
)
| isc | Python |
|
bc9401da60e8f10827f37772af937d4fb11ca248 | Add PyPI setup.py file | import/component.py | setup.py | setup.py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='component',
author='Daniel Chatfield',
author_email='chatfielddaniel@gmail.com',
version='0.0.1',
url='http://github.com/import/component',
py_modules=['component'],
description='A python library that makes component(1) play nicely with python.',
zip_safe=False,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
) | mit | Python |
|
1d7fa31d9f4ce42586fb33bea98d5af87bd95f3a | Allow setup.py install | cdw/multifil | setup.py | setup.py | from setuptools import setup
setup(name='multifil',
version='0.2',
description='A spatial half-sarcomere model and the means to run it',
url='https://github.com/cdw/multifil',
author='C David Williams',
author_email='cdave@uw.edu',
license='MIT',
packages=['multifil'],
install_requires=['numpy', 'boto']
)
| mit | Python |
|
30220f57bc5052cb05ed5c7e3dc01c763152d175 | Add setup for python installation | jnez71/lqRRT | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='lqrrt',
version='1.0',
description='Kinodynamic RRT Implementation',
author='Jason Nezvadovitz',
packages=['lqrrt'],
)
| mit | Python |
|
0c7ec853c97a71eacc838be925c46ac0c26d1518 | Create setup.py | daTokenizer/ratio-merge-python | setup.py | setup.py | from distutils.core import setup
setup(
name = 'ratio-merge',
packages = ['ratio-merge'],
version = '0.1',
description = 'A small utility function for merging two lists by some ratio',
author = 'Adam Lev-Libfeld',
author_email = 'adam@tamarlabs.com',
url = 'https://github.com/daTokenizer/ratio-merge-python',
download_url = 'https://github.com/daTokenizer/ratio-merge-python/archive/0.1.tar.gz',
keywords = ['merge', 'ratio', 'lists'], # arbitrary keywords
classifiers = [],
)
| apache-2.0 | Python |
|
a29b7195af2550e5646f3aac581cbaf47244e8f4 | Create setup.py | david-hoffman/pyOTF | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# setup.py
"""
Setup files
Copyright (c) 2020, David Hoffman
"""
import setuptools
# read in long description
with open("README.md", "r") as fh:
long_description = fh.read()
# get requirements
with open("requirements.txt", "r") as fh:
requirements = [line.strip() for line in fh]
setuptools.setup(
name="py-otf",
version="0.0.1",
author="David Hoffman",
author_email="dave.p.hoffman@gmail.com",
description="A python library for simulating and analyzing microscope point spread functions (PSFs)",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
],
python_requires=">=3",
install_requires=requirements,
)
| apache-2.0 | Python |
|
45d734cb495e7f61c5cbbac2958e220868033a9d | Add setup.py for RTD | westernx/mayatools,westernx/mayatools | setup.py | setup.py | from distutils.core import setup
setup(
name='mayatools',
version='0.1-dev',
description='Collection of general tools and utilities for working in and with Maya.',
url='https://github.com/westernx/mayatools',
packages=['mayatools'],
author='Mike Boers',
author_email='mayatools@mikeboers.com',
license='BSD-3',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| bsd-3-clause | Python |
|
1ac147a2a9f627cccd917006f61cdda7b25ccc06 | Add setup.py | szabba/applied-sims | setup.py | setup.py | from distutils.core import setup
setup(
name='applied-sims',
version='0.1',
classifiers=[
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Physics',
'Intended Audience :: Other Audience',
],
packages=['polymer_states'],
url='http://github.com/szabba/applied-sims',
license='MPL-2.0',
author='Karol Marcjan',
author_email='karol.marcjan@gmail.com',
description=''
)
| mpl-2.0 | Python |
|
e6e96d9fa725ec28028b090c900086474e69cdb8 | Add basic setup.py | mikeboers/LiteMap,mikeboers/SerialView | setup.py | setup.py |
from distutils.core import setup
setup(
name='litemap',
version='1.0a',
description='Mapping class which stores in SQLite database.',
url='http://github.com/mikeboers/LiteMap',
py_modules=['litemap'],
author='Mike Boers',
author_email='litemap@mikeboers.com',
license='New BSD License',
)
| bsd-3-clause | Python |
|
479ff810c07ebe5c309bb4c9f712e689e831945e | Add setup.py | msabramo/ansible_role_apply,rajiteh/ansible_role_apply | setup.py | setup.py | import os
from setuptools import setup
this_dir = os.path.dirname(__file__)
long_description = "\n" + open(os.path.join(this_dir, 'README.rst')).read()
setup(
name='ansible_role_apply',
version='0.0.0',
description='Apply a single Ansible role to host(s) easily',
long_description=long_description,
keywords='ansible',
author='Marc Abramowitz',
author_email='mmsabramo@gmail.com',
url='https://github.com/msabramo/ansible-role-apply',
py_modules=['ansible-role-apply'],
zip_safe=False,
install_requires=[
'ansible',
'click',
],
entry_points="""\
[console_scripts]
ansible-role-apply = ansible_role_apply:ansible_role_apply
""",
license='MIT',
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Testing',
'Natural Language :: English',
'Intended Audience :: Developers',
],
)
| mit | Python |
|
9d12617170982fc1b6b01d109d986f5cd45e0552 | Update setup.py. | Bismarrck/pymatgen,ctoher/pymatgen,ctoher/pymatgen,Bismarrck/pymatgen,Dioptas/pymatgen,yanikou19/pymatgen,rousseab/pymatgen,migueldiascosta/pymatgen,yanikou19/pymatgen,Dioptas/pymatgen,Bismarrck/pymatgen,migueldiascosta/pymatgen,rousseab/pymatgen,Bismarrck/pymatgen,ctoher/pymatgen,sonium0/pymatgen,migueldiascosta/pymatgen,rousseab/pymatgen,sonium0/pymatgen,sonium0/pymatgen,yanikou19/pymatgen,Bismarrck/pymatgen | setup.py | setup.py | from setuptools import setup,find_packages
setup (
name = 'pymatgen',
version = '1.0.1',
packages = find_packages(),
# Declare your packages' dependencies here, for eg:
install_requires = ['numpy','scipy','matplotlib','PyCIFRW'],
author = 'Shyue Ping Ong, Anubhav Jain, Michael Kocher, Dan Gunter',
author_email = 'shyue@mit.edu, anubhavj@mit.edu, mpkocher@lbnl.gov, dkgunter@lbl.gov',
summary = 'The Materials Project Python Library',
url = 'www.materialsproject.org',
license = '',
long_description= 'pymatgen is a Python library for the Materials Project. It includes core structure definition and utilities, electronic structure objects, and convenient IO from VASP and CIF files.',
# could also include long_description, download_url, classifiers, etc.
)
| from setuptools import setup,find_packages
setup (
name = 'pymatgen',
version = '1.0.1',
packages = find_packages(),
# Declare your packages' dependencies here, for eg:
install_requires = ['numpy','matplotlib','pymongo','PyCIFRW','psycopg2'],
author = 'Shyue Ping Ong, Anubhav Jain, Michael Kocher, Dan Gunter',
author_email = 'shyue@mit.edu, anubhavj@mit.edu, mpkocher@lbnl.gov, dkgunter@lbl.gov',
summary = 'The Materials Project Python Library',
url = 'www.materialsproject.org',
license = '',
long_description= 'pymatgen is a Python library for the Materials Project. It includes core structure definition and utilities, electronic structure objects, database access APIs, and convenient IO from VASP and CIF files.',
# could also include long_description, download_url, classifiers, etc.
)
| mit | Python |
4a7234d4592166a1a13bc6b8e8b3b201019df23b | Create prims_minimum_spanning.py | keon/algorithms | algorithms/graph/prims_minimum_spanning.py | algorithms/graph/prims_minimum_spanning.py | import heapq # for priority queue
# input number of nodes and edges in graph
n, e = map (int,input().split())
# initializing empty graph as a dictionary (of the form {int:list})
g = dict (zip ([i for i in range(1,n+1)],[[] for i in range(n)]))
# input graph data
for i in range(e):
a, b, c = map (int,input().split())
g[a].append([c,b])
g[b].append([c,a])
vis = []
s = [[0,1]]
prim = []
mincost = 0
# prim's algo. to find weight of minimum spanning tree
while (len(s)>0):
v = heapq.heappop(s)
x = v[1]
if (x in vis):
continue
mincost += v[0]
prim.append(x)
vis.append(x)
for j in g[x]:
i = j[-1]
if(i not in vis):
heapq.heappush(s,j)
print(mincost)
| mit | Python |
|
24f6cbdcf2f4261a651d058934c65c3696988586 | add setup.py to document deps | lowerquality/gentle,lowerquality/gentle,lowerquality/gentle,lowerquality/gentle | setup.py | setup.py | from setuptools import setup
setup(
name='gentle',
version='0.1',
description='Robust yet lenient forced-aligner built on Kaldi.',
url='http://lowerquality.com/gentle',
author='Robert M Ochshorn',
license='MIT',
packages=['gentle'],
install_requires=['twisted'],
)
| mit | Python |
|
654bd46a8226ea97000a1263132a37f7bf130718 | ADD setup.py | jmetzen/kernel_regression | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='kernel_regression',
version='1.0',
description='Implementation of Nadaraya-Watson kernel regression with automatic bandwidth selection compatible with sklearn.',
author='Jan Hendrik Metzen',
author_email='jhm@informatik.uni-bremen.de',
url='https://github.com/jmetzen/kernel_regression',
py_modules = ['kernel_regression']
)
| bsd-3-clause | Python |
|
1707306cdee6442e78fe9eaee1d472a0248f75d5 | make license consistent | kislyuk/argcomplete,neizod/argcomplete,douglas-larocca/argcomplete,landonb/argcomplete,lisongmin/argcomplete,landonb/argcomplete,lisongmin/argcomplete,douglas-larocca/argcomplete,kislyuk/argcomplete,neizod/argcomplete | setup.py | setup.py | # -*- coding: utf-8 -*-
"""
argcomplete
~~~~
Argcomplete provides easy and extensible automatic tab completion of arguments and options for your Python script.
It makes two assumptions:
- You're using bash as your shell
- You're using argparse to manage your command line options
See AUTODOCS_LINK for more info.
"""
from setuptools import setup, find_packages
setup(
name='argcomplete',
version='0.1.0',
url='https://github.com/kislyuk/argcomplete',
license='GPL',
author='Andrey Kislyuk',
author_email='kislyuk@gmail.com',
description='Bash tab completion for argparse',
long_description=__doc__,
packages = find_packages(),
scripts = ['scripts/register-python-argcomplete'],
zip_safe=False,
include_package_data=True,
platforms=['MacOS X', 'Posix'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| # -*- coding: utf-8 -*-
"""
argcomplete
~~~~
Argcomplete provides easy and extensible automatic tab completion of arguments and options for your Python script.
It makes two assumptions:
- You're using bash as your shell
- You're using argparse to manage your command line options
See AUTODOCS_LINK for more info.
"""
from setuptools import setup, find_packages
setup(
name='argcomplete',
version='0.1.0',
url='https://github.com/kislyuk/argcomplete',
license='BSD',
author='Andrey Kislyuk',
author_email='kislyuk@gmail.com',
description='Bash tab completion for argparse',
long_description=__doc__,
packages = find_packages(),
scripts = ['scripts/register-python-argcomplete'],
zip_safe=False,
include_package_data=True,
platforms=['MacOS X', 'Posix'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| apache-2.0 | Python |
1f1096046e11067c4d42235d3b1aadbfec869bff | Remove setuptools from install_requires | ixc/django_polymorphic,chrisglass/django_polymorphic,pombredanne/django_polymorphic,skirsdeda/django_polymorphic,skirsdeda/django_polymorphic,chrisglass/django_polymorphic,pombredanne/django_polymorphic,ixc/django_polymorphic,pombredanne/django_polymorphic,skirsdeda/django_polymorphic,ixc/django_polymorphic | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
import codecs
import os
import re
import sys
def read(*parts):
file_path = path.join(path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name='django-polymorphic',
version=find_version('polymorphic', '__init__.py'),
license='BSD',
description='Seamless Polymorphic Inheritance for Django Models',
long_description=read('README.rst'),
url='https://github.com/django-polymorphic/django-polymorphic',
author='Bert Constantin',
author_email='bert.constantin@gmx.de',
maintainer='Christopher Glass',
maintainer_email='tribaal@gmail.com',
packages=find_packages(),
package_data={
'polymorphic': [
'templates/admin/polymorphic/*.html',
],
},
test_suite='runtests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Framework :: Django :: 1.4',
'Framework :: Django :: 1.5',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
import codecs
import os
import re
import sys
def read(*parts):
file_path = path.join(path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name='django-polymorphic',
version=find_version('polymorphic', '__init__.py'),
license='BSD',
description='Seamless Polymorphic Inheritance for Django Models',
long_description=read('README.rst'),
url='https://github.com/django-polymorphic/django-polymorphic',
author='Bert Constantin',
author_email='bert.constantin@gmx.de',
maintainer='Christopher Glass',
maintainer_email='tribaal@gmail.com',
packages=find_packages(),
package_data={
'polymorphic': [
'templates/admin/polymorphic/*.html',
],
},
install_requires=['setuptools'],
test_suite='runtests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Framework :: Django :: 1.4',
'Framework :: Django :: 1.5',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| bsd-3-clause | Python |
260911a0a46601092aa75882c806ca921a0cbf6d | Add setup.py file so we can install | ocefpaf/pyaxiom,axiom-data-science/pyaxiom,axiom-data-science/pyaxiom,ocefpaf/pyaxiom | setup.py | setup.py | from __future__ import with_statement
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
version = "0.0.1-dev"
def readme():
with open('README.md') as f:
return f.read()
reqs = [line.strip() for line in open('requirements.txt')]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name = "pyaxiom",
version = version,
description = "A library to manage various Axiom assets using Python",
long_description = readme(),
license = 'LGPLv3',
author = "Kyle Wilcox",
author_email = "kyle@axiomalaska.com",
url = "https://git.axiom/axiom/pyncml",
packages = find_packages(),
install_requires = reqs,
tests_require = ['pytest'],
cmdclass = {'test': PyTest},
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
include_package_data = True,
)
| mit | Python |
|
b1d87a8f96fb6a019bc7ebab71fe8e0c5921d80f | Include setup.py | emedvedev/attention-ocr | setup.py | setup.py | from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['distance', 'tensorflow', 'numpy', 'six']
setup(
name='attentionocr',
url='https://github.com/emedvedev/attention-ocr',
author_name='Ed Medvedev',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='''Optical character recognition model
for Tensorflow based on Visual Attention.'''
)
| mit | Python |
|
6ded510fa9c694e8a836302131157604859d40b1 | add setup settings | mrpatiwi/uc-numero-alumno-python | setup.py | setup.py | from setuptools import setup
setup(name='uc-numero-alumno',
version='0.1.0',
description='Valida un número de alumno de la UC ',
url='https://github.com/mrpatiwi/uc-numero-alumno-python',
author='Patricio López',
author_email='patricio@lopezjuri.com',
license='MIT',
packages=['ucnumber'],
zip_safe=False)
| mit | Python |
|
414c5d0f9e7e92772cf65be976791889e96e2799 | Package with setuptools | mtearle/npyscreenreactor | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
classifiers = [
'Development Status :: 5 - Production/Stable',
'Framework :: Twisted',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Natural Language :: English',
'Topic :: Software Development :: Libraries :: Python Modules'
]
setup(
name='npyscreenreactor',
version='1.1',
license='MIT',
classifiers=classifiers,
author='Mark Tearle',
author_email='mark@tearle.com',
description = 'Twisted reactor for npyscreen',
long_description = 'npyscreenreactor is a Twisted reactor for the npyscreen curses library',
url='https://github.com/mtearle/npyscreenreactor',
packages=find_packages(),
keywords=['npyscreen', 'twisted'],
install_requires=['twisted', 'npyscreen']
)
| mit | Python |
|
8aada38d951d039e11e03a6bae9445c784bb4cce | Write a brief demo using nltk | alexander-bauer/syllabus-summary | parse-demo.py | parse-demo.py | #!/usr/bin/python3
import sys, os
import nltk
if len(sys.argv) < 2:
print("Please supply a filename.")
sys.exit(1)
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
# Break the input down into sentences, then into words, and position tag
# those words.
sentences = [nltk.pos_tag(nltk.word_tokenize(sentence)) \
for sentence in nltk.sent_tokenize(data)]
# Define a grammar, and identify the noun phrases in the sentences.
chunk_parser = nltk.RegexpParser(r"NP: {<DT>?<JJ>*<NN>}")
trees = [chunk_parser.parse(sentence) for sentence in sentences]
for tree in trees:
print(tree)
#for subtree in tree.subtrees(filter = lambda t: t.label() == 'NP'):
#print(subtree)
| mit | Python |
|
89fa937d218bef113d2bcc681cb4dbd547940c45 | Add setup.py | koofr/python-koofr | setup.py | setup.py | from distutils.core import setup
setup(
name = 'koofr',
packages = ['koofr'], # this must be the same as the name above
install_requires=['requests'],
version = '0.1',
description = 'Python SDK for Koofr',
author = 'Andraz Vrhovec',
author_email = 'andraz@koofr.net',
url = 'https://github.com/koofr/python-koofr', # use the URL to the github repo
download_url = 'https://github.com/koofr/python-koofr/tarball/0.1', # I'll explain this in a second
keywords = ['api', 'koofr', 'cloud'], # arbitrary keywords
classifiers = [],
)
| mit | Python |
|
8ecfe73916fbca42b9a1b47fb2758bb561b76eec | Remove print. | Dioptas/pymatgen,Bismarrck/pymatgen,ctoher/pymatgen,Bismarrck/pymatgen,yanikou19/pymatgen,Bismarrck/pymatgen,sonium0/pymatgen,yanikou19/pymatgen,sonium0/pymatgen,yanikou19/pymatgen,rousseab/pymatgen,Dioptas/pymatgen,Bismarrck/pymatgen,sonium0/pymatgen,ctoher/pymatgen,migueldiascosta/pymatgen,Bismarrck/pymatgen,rousseab/pymatgen,ctoher/pymatgen,migueldiascosta/pymatgen,rousseab/pymatgen,migueldiascosta/pymatgen | setup.py | setup.py | import os
from setuptools import setup, find_packages
README = os.path.join(os.path.dirname(__file__), 'README.md')
long_description = open(README).read() + '\n\n'
setup (
name = 'pymatgen',
version = '1.2.4',
packages = find_packages(),
install_requires = ['numpy', 'scipy', 'matplotlib', 'PyCIFRW'],
package_data = {'pymatgen.core': ['*.json'], 'pymatgen.io': ['*.cfg']},
author = 'Shyue Ping Ong, Anubhav Jain, Michael Kocher, Geoffroy Hautier, Will Richards, Dan Gunter, Vincent L Chevrier, Rickard Armiento',
author_email = 'shyue@mit.edu, anubhavj@mit.edu, mpkocher@lbnl.gov, geoffroy.hautier@uclouvain.be, wrichard@mit.edu, dkgunter@lbl.gov, vincentchevrier@gmail.com, armiento@mit.edu',
maintainer = 'Shyue Ping Ong',
url = 'https://github.com/CederGroupMIT/pymatgen_repo/',
license = 'MIT',
description = "pymatgen is the Python library powering the Materials Project (www.materialsproject.org).",
long_description = long_description,
keywords = ["vasp", "materials", "project", "electronic", "structure"],
classifiers = [
"Programming Language :: Python :: 2.7",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules",
],
download_url = "https://github.com/CederGroupMIT/pymatgen_repo/tarball/master",
test_suite = 'nose.collector',
test_requires = ['nose']
)
| import os
from setuptools import setup, find_packages
README = os.path.join(os.path.dirname(__file__), 'README.md')
long_description = open(README).read() + '\n\n'
print find_packages()
setup (
name = 'pymatgen',
version = '1.2.4',
packages = find_packages(),
install_requires = ['numpy', 'scipy', 'matplotlib', 'PyCIFRW'],
package_data = {'pymatgen.core': ['*.json'], 'pymatgen.io': ['*.cfg']},
author = 'Shyue Ping Ong, Anubhav Jain, Michael Kocher, Geoffroy Hautier, Will Richards, Dan Gunter, Vincent L Chevrier, Rickard Armiento',
author_email = 'shyue@mit.edu, anubhavj@mit.edu, mpkocher@lbnl.gov, geoffroy.hautier@uclouvain.be, wrichard@mit.edu, dkgunter@lbl.gov, vincentchevrier@gmail.com, armiento@mit.edu',
maintainer = 'Shyue Ping Ong',
url = 'https://github.com/CederGroupMIT/pymatgen_repo/',
license = 'MIT',
description = "pymatgen is the Python library powering the Materials Project (www.materialsproject.org).",
long_description = long_description,
keywords = ["vasp", "materials", "project", "electronic", "structure"],
classifiers = [
"Programming Language :: Python :: 2.7",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules",
],
download_url = "https://github.com/CederGroupMIT/pymatgen_repo/tarball/master",
test_suite = 'nose.collector',
test_requires = ['nose']
)
| mit | Python |
fe8cc65832b389314ee6e83c76371809e40cc5d1 | Bump to 0.1.1 | kivy-garden/garden,kivy-garden/garden,kivy-garden/garden,mohammadj22/garden | setup.py | setup.py | from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
setup(
name='Kivy Garden',
version='0.1.1',
license='MIT',
packages=['garden'],
scripts=['bin/garden', 'bin/garden.bat'],
install_requires=['requests'],
)
| from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
setup(
name='Kivy Garden',
version='0.1',
license='MIT',
packages=['garden'],
scripts=['bin/garden', 'bin/garden.bat'],
install_requires=['requests'],
)
| mit | Python |
ce7914dd35e66820248cb82760b50a31bc8a625b | Add setup.py script to install whip-neustar cli script | wbolster/whip-neustar | setup.py | setup.py | from setuptools import setup
setup(
name='whip-neustar',
version='0.1',
packages=['whip_neustar'],
entry_points={
'console_scripts': [
'whip-neustar = whip_neustar.cli:main',
],
}
)
| bsd-3-clause | Python |
|
a72bc73aab4b696113bee16f5f7f9da1540bc02f | Create playerlist.py | Myselfminer/N | playerlist.py | playerlist.py | import config
class players:
def __init__(self):
self.path=config.install_path+"reg\\N_NOW_RUNNING\\PLAYERS\\LIST.nreg"
def get_names_str(self,level):
a=open(self.path,"r")
b=a.readlines()
string=""
for i in b:
string=string+i
a.close()
return string
def get_names_list(self, level):
a=open(self.path,"r")
b=a.readlines()
string=[]
for i in b:
string.append(i)
return string
def add(self, name, uuid, level, entity_id):
a=open(self.path,"a")
a.write(name+";"+uuid+";"+entity_id+";"+level)
a.close()
def remove(self, name, uuid, level, entity_id):
a=open(self.path, "r")
b=a.readlines()
b.remove(name+";"+uuid+";"+str(entity_id)+";"+str(level))
a=open(self.path,"w")
for i in b:
a.write(i)
a.close()
del b
| apache-2.0 | Python |
|
7fa6d8beb2637bed6b31cf1cea5fdafffc6049bf | add tests | schieb/angr,haylesr/angr,chubbymaggie/angr,f-prettyland/angr,angr/angr,iamahuman/angr,iamahuman/angr,iamahuman/angr,schieb/angr,tyb0807/angr,angr/angr,chubbymaggie/angr,f-prettyland/angr,schieb/angr,axt/angr,angr/angr,chubbymaggie/angr,axt/angr,f-prettyland/angr,axt/angr,tyb0807/angr,tyb0807/angr,haylesr/angr | tests/test_dfg.py | tests/test_dfg.py | #!/usr/bin/env python
import logging
import time
import sys
from os.path import join, dirname, realpath
l = logging.getLogger("angr.tests.test_dfg")
l.setLevel(logging.DEBUG)
import nose
import angr
import pyvex
test_location = str(join(dirname(realpath(__file__)), "../../binaries/tests"))
def perform_one(binary_path):
proj = angr.Project(join(test_location, binary_path),
load_options={'auto_load_libs': False},
)
start = time.time()
cfg = proj.analyses.CFG(context_sensitivity_level=2)
end = time.time()
duration = end - start
l.info("CFG generated in %f seconds.", duration)
dfg = proj.analyses.DFG(cfg=cfg)
nose.tools.assert_true(len(dfg.dfgs) <= len(cfg.nodes()))
for addr, d in dfg.dfgs.items():
nose.tools.assert_true(cfg.get_any_node(addr) is not None)
# We check there is not node that we ignored
for n in d.nodes():
nose.tools.assert_not_equal(n.tag, 'Ist_IMark')
nose.tools.assert_not_equal(n.tag, 'Ist_AbiHint')
nose.tools.assert_not_equal(n.tag, 'Ist_Exit')
if n.tag == 'Ist_Put':
nose.tools.assert_not_equal(n.offset, n.arch.ip_offset)
for (a, b) in d.edges():
if isinstance(a, pyvex.IRExpr.IRExpr):
# We check that there is no edge between two expressions/const
nose.tools.assert_false(isinstance(b, pyvex.IRExpr.IRExpr))
# If there is an edge coming from an expr/const it should be in
# the dependencies of the other node
# FIXME
# Impossible to check because of the Unop optimization in the
# DFG...
# nose.tools.assert_true(a in b.expressions)
elif hasattr(a, 'tmp'):
# If there is an edge between a tmp and another node
# be sure that this tmp is in the dependencies of this node
tmps = [ ]
for e in b.expressions:
if hasattr(e, 'tmp'):
tmps.append(e.tmp)
nose.tools.assert_true(a.tmp in tmps)
def test_dfg_isalnum():
perform_one("i386/isalnum")
def test_dfg_counter():
perform_one("i386/counter")
def test_dfg_cfg_0():
perform_one("x86_64/cfg_0")
def test_dfg_fauxware():
perform_one("mips/fauxware")
def run_all():
functions = globals()
all_functions = dict(filter((lambda (k, v): k.startswith('test_') and hasattr(v, '__call__')), functions.items()))
for f in sorted(all_functions.keys()):
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("angr.analyses.dfg").setLevel(logging.DEBUG)
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
| bsd-2-clause | Python |
|
d7a0962a817e1a7e530fcd84a11dc51be82574a6 | Create get_qpf_f012.py | wfclark/hamlet,wfclark/hamlet | get_qpf_f012.py | get_qpf_f012.py | import sys
import os
import urllib2
import datetime
import time
import psycopg2
from subprocess import call, Popen
# pull the last hours worth of precip data
os.system("wget http://www.srh.noaa.gov/ridge2/Precip/qpfshp/latest/latest_rqpf_f012.tar.gz -O latest_rqpf_f012.tar.gz")
os.system("mv latest_rqpf_f012.tar.gz latest_rqpf_f012.tar")
os.system("tar xvf latest_rqpf_f012.tar")
latest_rqpf_f012_shp = './latest/latest_rqpf_f012.shp'
last_hr_shp2pgsql = 'ogr2ogr -f "PostgreSQL" PG:"user=postgres dbname=hamlet password=password" {} -t_srs EPSG:4326 -nln latest_rqpf_f012 -overwrite'.format(latest_rqpf_f012_shp)
print last_hr_shp2pgsql
call(last_hr_shp2pgsql, shell = True)
conn_string = "dbname='hamlet' user=postgres port='5432' host='127.0.0.1' password='password'"
print "Connecting to database..."
try:
conn = psycopg2.connect(conn_string)
except Exception as e:
print str(e)
sys.exit()
print "Connected!\n"
drop_cur = conn.cursor()
#creating views that show where the roads are potentially flooded or exposed to icy conditions
drop_cur.execute("""drop table if exists roads_flooded_bunco cascade;""")
drop_cur.execute("""drop table if exists roads_flooded_se_heavy cascade;""")
drop_cur.execute("""drop table if exists roads_flooded_se_moderate cascade;""")
drop_cur.execute("""drop table if exists roads_flooded_se_light cascade;""")
drop_cur.execute("""drop table if exists roads_flooded_se_drizzle cascade;""")
conn.commit()
drop_cur.close()
flooded_cur = conn.cursor()
flooded_cur.execute("""
create table roads_flooded_bunco as
select
a.gid,
street_nam,
sum(b.globvalue),
a.geom
from conterlines_poly as a
inner join last_hr_prcp as b
on st_dwithin(a.geom::geometry(MULTIpolygon, 4326), b.wkb_geometry::geometry(point, 4326), 0.025)
group by a.gid, a.street_nam, a.geom;""")
flooded_cur.execute("""create table roads_flooded_se_heavy as
select
gid
street_nam,
sum(b.globvalue),
a.geom
from se_road_polys as a
inner join last_hr_prcp as b
on st_dwithin(a.geom::geometry(MULTIpolygon, 4326), b.wkb_geometry::geometry(point, 4326), 0.025)
where b.globvalue >= 1
group by a.gid, a.geom;""")
flooded_cur.execute("""create table roads_flooded_se_moderate as
select
gid
street_nam,
sum(b.globvalue),
a.geom
from se_road_polys as a
inner join last_hr_prcp as b
on st_dwithin(a.geom::geometry(MULTIpolygon, 4326), b.wkb_geometry::geometry(point, 4326), 0.025)
where b.globvalue >= .5
group by a.gid, a.geom;
""")
flooded_cur.execute("""create table roads_flooded_se_light as
select
gid
street_nam,
sum(b.globvalue),
a.geom
from se_road_polys as a
inner join last_hr_prcp as b
on st_dwithin(a.geom::geometry(MULTIpolygon, 4326), b.wkb_geometry::geometry(point, 4326), 0.025)
where b.globvalue >= .25
group by a.gid, a.geom;""")
flooded_cur.execute("""create table roads_flooded_se_drizzle as
select
gid
street_nam,
sum(b.globvalue),
a.geom
from se_road_polys as a
inner join last_hr_prcp as b
on st_dwithin(a.geom::geometry(MULTIpolygon, 4326), b.wkb_geometry::geometry(point, 4326), 0.025)
where b.globvalue >= .1 and b.globvalue <= .25
group by a.gid, a.geom;""")
conn.commit()
flooded_cur.close()
| bsd-3-clause | Python |
|
504612eb0c3c6ec210dd6e555941c13523333f12 | install without cython | hickford/primesieve-python,anirudhjayaraman/primesieve-python,jakirkham/primesieve-python | setup.py | setup.py | from setuptools import setup, Extension
from glob import glob
library = ('primesieve', dict(
sources=glob("lib/primesieve/src/primesieve/*.cpp"),
include_dirs=["lib/primesieve/include"],
language="c++",
))
try:
from Cython.Build import cythonize
except ImportError:
cythonize = None
extension = Extension(
"primesieve",
["primesieve/primesieve.pyx"] if cythonize else ["primesieve/primesieve.cpp"],
include_dirs = ["lib/primesieve/include"],
language="c++",
)
if cythonize:
extension = cythonize(extension)
setup(
name='primesieve',
url = "https://github.com/hickford/primesieve-python",
license = "MIT",
libraries = [library],
ext_modules = [extension],
)
| from setuptools import setup, Extension
from Cython.Build import cythonize
from glob import glob
library = ('primesieve', dict(
sources=glob("lib/primesieve/src/primesieve/*.cpp"),
include_dirs=["lib/primesieve/include"],
language="c++",
))
extension = Extension(
"primesieve",
["primesieve/primesieve.pyx"],
include_dirs = ["lib/primesieve/include"],
language="c++",
)
setup(
name='primesieve',
url = "https://github.com/hickford/primesieve-python",
license = "MIT",
libraries = [library],
ext_modules = cythonize(extension),
)
| mit | Python |
42ca323888dc13246fa7f6a01a6e29efcdb2d5c5 | Add setup.py | mcs07/MolVS | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
import molvs
if os.path.exists('README.rst'):
long_description = open('README.rst').read()
else:
long_description = ''''''
setup(
name='MolVS',
version=molvs.__version__,
author=molvs.__author__,
author_email=molvs.__email__,
license=molvs.__license__,
url='https://github.com/mcs07/MolVS',
packages=['molvs'],
description='',
long_description=long_description,
keywords='chemistry cheminformatics rdkit',
zip_safe=False,
test_suite='nose.collector',
entry_points={'console_scripts': ['molvs = molvs.cli:main']},
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| mit | Python |
|
e91b1c56b252ddc3073a15209e38e73424911b62 | Remove unused import. | DVegaCapital/zipline,jimgoo/zipline-fork,DVegaCapital/zipline,nborggren/zipline,MonoCloud/zipline,florentchandelier/zipline,aajtodd/zipline,wilsonkichoi/zipline,aajtodd/zipline,kmather73/zipline,bartosh/zipline,ChinaQuants/zipline,alphaBenj/zipline,jimgoo/zipline-fork,iamkingmaker/zipline,michaeljohnbennett/zipline,joequant/zipline,morrisonwudi/zipline,sketchytechky/zipline,YuepengGuo/zipline,nborggren/zipline,cmorgan/zipline,keir-rex/zipline,YuepengGuo/zipline,AlirezaShahabi/zipline,enigmampc/catalyst,jordancheah/zipline,humdings/zipline,umuzungu/zipline,MonoCloud/zipline,chrjxj/zipline,keir-rex/zipline,chrjxj/zipline,CDSFinance/zipline,magne-max/zipline-ja,grundgruen/zipline,enigmampc/catalyst,otmaneJai/Zipline,quantopian/zipline,wubr2000/zipline,florentchandelier/zipline,gwulfs/zipline,bartosh/zipline,humdings/zipline,gwulfs/zipline,AlirezaShahabi/zipline,quantopian/zipline,iamkingmaker/zipline,umuzungu/zipline,otmaneJai/Zipline,magne-max/zipline-ja,zhoulingjun/zipline,sketchytechky/zipline,wilsonkichoi/zipline,dkushner/zipline,dmitriz/zipline,jordancheah/zipline,joequant/zipline,CDSFinance/zipline,grundgruen/zipline,dkushner/zipline,alphaBenj/zipline,wubr2000/zipline,michaeljohnbennett/zipline,dmitriz/zipline,kmather73/zipline,ChinaQuants/zipline,Scapogo/zipline,morrisonwudi/zipline,zhoulingjun/zipline,Scapogo/zipline,cmorgan/zipline | setup.py | setup.py | #!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, Extension
from Cython.Build import cythonize
import numpy as np
ext_modules = [
Extension(
'zipline.assets._assets',
['zipline/assets/_assets.pyx'],
include_dirs=[np.get_include()],
),
Extension(
'zipline.lib.adjusted_array',
['zipline/lib/adjusted_array.pyx'],
include_dirs=[np.get_include()],
),
Extension(
'zipline.lib.adjustment',
['zipline/lib/adjustment.pyx'],
include_dirs=[np.get_include()],
),
Extension(
'zipline.data.ffc.loaders._us_equity_pricing',
['zipline/data/ffc/loaders/_us_equity_pricing.pyx'],
include_dirs=[np.get_include()],
),
]
setup(
name='zipline',
version='0.8.0rc1',
description='A backtester for financial algorithms.',
author='Quantopian Inc.',
author_email='opensource@quantopian.com',
packages=['zipline'],
ext_modules=cythonize(ext_modules),
scripts=['scripts/run_algo.py'],
include_package_data=True,
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Office/Business :: Financial',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: System :: Distributed Computing',
],
install_requires=[
'Logbook',
'pytz',
'requests',
'numpy',
'pandas',
'six',
'Cython',
],
extras_require={
'talib': ["talib"],
},
url="http://zipline.io"
)
| #!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages, Extension
from Cython.Build import cythonize
import numpy as np
ext_modules = [
Extension(
'zipline.assets._assets',
['zipline/assets/_assets.pyx'],
include_dirs=[np.get_include()],
),
Extension(
'zipline.lib.adjusted_array',
['zipline/lib/adjusted_array.pyx'],
include_dirs=[np.get_include()],
),
Extension(
'zipline.lib.adjustment',
['zipline/lib/adjustment.pyx'],
include_dirs=[np.get_include()],
),
Extension(
'zipline.data.ffc.loaders._us_equity_pricing',
['zipline/data/ffc/loaders/_us_equity_pricing.pyx'],
include_dirs=[np.get_include()],
),
]
setup(
name='zipline',
version='0.8.0rc1',
description='A backtester for financial algorithms.',
author='Quantopian Inc.',
author_email='opensource@quantopian.com',
packages=['zipline'],
ext_modules=cythonize(ext_modules),
scripts=['scripts/run_algo.py'],
include_package_data=True,
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Office/Business :: Financial',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: System :: Distributed Computing',
],
install_requires=[
'Logbook',
'pytz',
'requests',
'numpy',
'pandas',
'six',
'Cython',
],
extras_require={
'talib': ["talib"],
},
url="http://zipline.io"
)
| apache-2.0 | Python |
d139ced0b482fa65720a3c8f268d71dbf25119fb | add jsonsoup plugin | melmothx/jsonbot,melmothx/jsonbot,melmothx/jsonbot | commonplugs/jsonsoup.py | commonplugs/jsonsoup.py | # commonplugs/jsonsoup.py
#
#
## gozerlib imports
from gozerlib.callbacks import callbacks
from gozerlib.utils.url import posturl, getpostdata
from gozerlib.persistconfig import PersistConfig
from gozerlib.commands import cmnds
from gozerlib.socket.irc.monitor import outmonitor
from gozerlib.socket.rest.server import RestServer, RestRequestHandler
from gozerlib.eventbase import EventBase
from gozerlib.utils.exception import handle_exception
from gozerlib.examples import examples
## simplejson imports
from simplejson import dumps
## basic imports
import socket
import re
## VARS
outurl = "http://jsonsoup.appspot.com/soup/"
state = PersistConfig()
if not state.data:
state.data = {}
if not state.data.has_key('relay'):
state.data['relay'] = []
cfg = PersistConfig()
cfg.define('enable', 0)
cfg.define('host' , socket.gethostbyname(socket.getfqdn()))
cfg.define('name' , socket.getfqdn())
cfg.define('port' , 10102)
cfg.define('disable', [])
waitre = re.compile(' wait (\d+)', re.I)
hp = "%s:%s" % (cfg.get('host'), cfg.get('port'))
url = "http://%s" % hp
## callbacks
def preremote(bot, event):
if event.channel in state.data['relay']:
return True
def handle_doremote(bot, event):
if event.isremote:
return
posturl(outurl, {}, {'event': event.tojson() })
callbacks.add('PRIVMSG', handle_doremote, preremote, threaded=True)
callbacks.add('OUTPUT', handle_doremote, preremote, threaded=True)
callbacks.add('MESSAGE', handle_doremote, preremote, threaded=True)
callbacks.add('BLIP_SUBMITTED', handle_doremote, preremote, threaded=True)
outmonitor.add('soup', handle_doremote, preremote, threaded=True)
## server part
server = None
def soup_POST(server, request):
try:
input = getpostdata(request)
container = input['container']
except KeyError, AttributeError:
logging.warn("soup - %s - can't determine eventin" % request.ip)
return dumps(["can't determine eventin"])
event = EventBase()
event.load(container)
callbacks.check(event)
return dumps(['ok',])
def soup_GET(server, request):
try:
path, container = request.path.split('#', 1)
except ValueError:
logging.warn("soup - %s - can't determine eventin" % request.ip)
return dumps(["can't determine eventin", ])
try:
event = EventBase()
event.load(container)
callbacks.check(event)
except Exception, ex:
handle_exception()
return dumps(['ok', ])
def startserver():
try:
import google
return
except ImportError:
pass
global server
try:
server = RestServer((cfg.get('host'), cfg.get('port')), RestRequestHandler)
if server:
server.start()
logging.warn('soup - running at %s:%s' % (cfg.get('host'), cfg.get('port')))
server.addhandler('/soup/', 'POST', soup_POST)
server.addhandler('/soup/', 'GET', soup_GET)
for mount in cfg.get('disable'):
server.disable(mount)
else:
logging.error('soup - failed to start server at %s:%s' % (cfg.get('host'), cfg.get('port')))
except socket.error, ex:
logging.warn('soup - start - socket error: %s', (request.ip, str(ex)))
except Exception, ex:
handle_exception()
def stopserver():
try:
if not server:
logging.warn('soup - server is already stopped')
return
server.shutdown()
except Exception, ex:
handle_exception()
pass
## plugin init
def init():
if cfg['enable']:
startserver()
def shutdown():
if cfg['enable']:
stopserver()
def handle_soup_on(bot, event):
if not event.rest:
target = event.channel
else:
target = event.rest
if not target in state.data['relay']:
state.data['relay'].append(target)
state.save()
event.done()
cmnds.add('soup-on', handle_soup_on, 'OPER')
examples.add('soup-on', 'enable relaying of the channel to the JSONBOT event network (jsonsoup)', 'soup-on')
def handle_soup_off(bot, event):
if not event.rest:
target = event.channel
else:
target = event.rest
if target in state.data['relay']:
state.data['relay'].remove(target)
state.save()
event.done()
cmnds.add('soup-off', handle_soup_off, 'OPER')
examples.add('soup-off', 'disable relaying of channel to the JSONBOT event network (jsonsoup)', 'soup-off')
def handle_soup_startserver(bot, event):
cfg['enable'] = 1
cfg.save()
startserver()
event.done()
cmnds.add('soup-startserver', handle_soup_startserver, 'OPER')
examples.add('soup-startserver', 'start the JSONBOT event network server', 'soup-startserver')
def handle_soup_stopserver(bot, event):
cfg['enable'] = 0
cfg.save()
stopserver()
event.done()
cmnds.add('soup-stopserver', handle_soup_stopserver, 'OPER')
examples.add('soup-stopserver', 'stop the JSONBOT event network server', 'soup-startserver')
| mit | Python |
|
52cd79d7045a69ff5073af7ed14e9ed774de7a39 | Add setup.py. | pySUMO/pysumo,pySUMO/pysumo | setup.py | setup.py | from setuptools import setup
setup(
name='pySUMO',
version='0.0.0a1',
description='A graphical IDE for Ontologies written in SUO-Kif',
long_description='A graphical IDE for Ontologies written in SUO-Kif',
url='',
author='',
author_email='',
license='',
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: ',
'Programming Language :: Python :: 3.4',
],
keywords='SUMO Ontologies SUO-Kif',
package_dir={'':'src'},
packages=['pysumo', 'pysumo.logger', 'pySUMOQt', 'pySUMOQt.Designer', 'pySUMOQt.Widget'],
install_requires=['pyside'],
extras_require={'test' : ['pytest']},
data_files=[('data', ['data/Merge.kif', 'data/MILO.kif']),
('data/wordnet', [''.join(['data/wordnet/sdata.', x]) for x in
['adj', 'adv', 'noun', 'verb']]),],
entry_points={'gui_scripts': ['pySUMOQt = pySUMOQt.MainWindow:main']},
)
| bsd-2-clause | Python |
|
7354dc674a4551169fb55bfcec208256e956d14e | Add skeleton class for conditions | lnishan/SQLGitHub | components/condition.py | components/condition.py | """A class to store conditions (eg. WHERE [cond])."""
class SgConditionSimple:
"""
A class to store a simple condition.
A simple condition is composed of 2 operands and 1 operator.
"""
def __init__(self, operand-l, operator, operand-r):
self._op-l = operand-l
self._op = operator
self._op-r = operand-r
class SgCondition:
"""A class to store a (complex) condition."""
def __init__(self, expr):
self._expr = expr
self._conds = [] # simple conditions
self._conns = [] # connectors (eg. and, or)
# TODO(lnishan): parse expr into _conds and _conns.
def Evaluate(self, fields, row):
# TODO(lnishan): Evaluate the (complex) condition.
return True
| mit | Python |
|
a0607d0f9b7c08ddcf81459868b33761d8ed5bb2 | Set up the dependency | keras-team/keras-nlp,keras-team/keras-nlp | setup.py | setup.py | # Copyright 2021 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script."""
from setuptools import find_packages
from setuptools import setup
setup(
name="keras-nlp",
description="High-level NLP libraries based on Keras",
url="https://github.com/keras-team/keras-nlp",
author="Keras team",
author_email="keras-nlp@google.com",
license="Apache License 2.0",
# tensorflow isn't a dependency because it would force the
# download of the gpu version or the cpu version.
# users should install it manually.
install_requires=["packaging", "tensorflow", "numpy"],
extras_require={"tests": ["flake8", "isort", "black",],},
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Operating System :: Unix",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
],
packages=find_packages(exclude=("tests",)),
) | apache-2.0 | Python |
|
d9be2b8a61a88f0ee228c08d1f277770602840b1 | Add python version for compress | armiller/puzzles,armiller/puzzles | compression/compress.py | compression/compress.py |
def compress(uncompressed):
count = 1
compressed = ""
if not uncompressed:
return compressed
letter = uncompressed[0]
for nx in uncompressed[1:]:
if letter == nx:
count = count + 1
else:
compressed += "{}{}".format(letter, count)
count = 1
letter = nx
compressed += "{}{}".format(letter, count)
return compressed
if __name__ == "__main__":
print(compress("aaabbbccccd")) | apache-2.0 | Python |
|
d480c2738bb4d0ae72643fc9bc1f911cb630539c | add 12-list.py | weizhenwei/tech-docs-2016,weizhenwei/tech-docs-2016,weizhenwei/tech-docs-2016,weizhenwei/tech-docs-2016,weizhenwei/tech-docs-2016,weizhenwei/tech-docs-2016 | python/12-list.py | python/12-list.py | #!/usr/bin/env python
import math
list = ['physics', 'chemistry', 1997, 2001];
print "list[2] = ", list[2]
print "list[1:3] = ", list[1:3]
list[2] = "math";
print "update, list[2] = ", list[2]
del list[2]
print "delete, list[2] = ", list[2]
print "length of delete:", len(list)
if ('physics' in list):
print "physics is in list"
else:
print "physics is not in list"
for elem in list:
print "elem :", elem
| bsd-2-clause | Python |
|
240b22d0b078951b7d1f0df70156b6e2041a530f | fix setup.py dor pypi. | h4ki/couchapp,couchapp/couchapp,benoitc/erica,flimzy/couchapp,perplexes/couchapp,benoitc/erica,couchapp/couchapp,diderson/couchapp,couchapp/couchapp,dustin/couchapp,perplexes/couchapp,couchapp/couchapp,diderson/couchapp,h4ki/couchapp,perplexes/couchapp,dustin/couchapp,flimzy/couchapp,diderson/couchapp,flimzy/couchapp,dustin/couchapp,h4ki/couchapp,diderson/couchapp,perplexes/couchapp,h4ki/couchapp,flimzy/couchapp | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Benoit Chesneau <benoitc@e-engura.org>
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import os
import sys
from setuptools import setup
data_files = []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('app-template'):
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
setup(
name = 'Couchapp',
version = '0.1.4',
url = 'http://github.com/benoitc/couchapp/tree/master',
license = 'Apache License 2',
author = 'Benoit Chesneau',
author_email = 'benoitc@e-engura.org',
description = 'Standalone CouchDB Application Development Made Simple.',
long_description = """CouchApp is a set of helpers and a jQuery plugin
that conspire to get you up and running on CouchDB quickly and
correctly. It brings clarity and order to the freedom of CouchDB's
document-based approach.""",
keywords = 'couchdb couchapp',
platforms = 'any',
zip_safe = False,
packages= ['couchapp'],
package_dir={'couchapp': 'python/couchapp'},
data_files = data_files,
include_package_data = True,
scripts = ['python/couchapp/bin/couchapp'],
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Topic :: Database',
'Topic :: Utilities',
],
setup_requires = [
'setuptools>=0.6c9',
'couchdb',
],
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Benoit Chesneau <benoitc@e-engura.org>
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import os
import sys
from setuptools import setup
data_files = []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('app-template'):
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
setup(
name = 'Couchapp',
version = '0.1.4',
url = 'http://github.com/benoitc/couchapp/tree/master',
license = 'Apache License 2',
author = 'Benoit Chesneau',
author_email = 'benoitc@e-engura.org',
description = 'Standalone CouchDB Application Development Made Simple.',
long_description = """CouchApp is a set of helpers and a jQuery plugin
that conspire to get you up and running on CouchDB quickly and
correctly. It brings clarity and order to the freedom of CouchDB’s
document-based approach.""",
keywords = 'couchdb couchapp',
platforms = 'any',
zip_safe = False,
packages= ['couchapp'],
package_dir={'couchapp': 'python/couchapp'},
data_files = data_files,
include_package_data = True,
scripts = ['python/couchapp/bin/couchapp'],
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Topic :: Database',
'Topic :: Utilities',
],
setup_requires = [
'setuptools>=0.6c9',
'couchdb>=0.5dev',
]
)
| apache-2.0 | Python |
3ada80358a059b3a5ee4dd4ceed572f933a1ec67 | Create setup.py | guettli/compare-with-remote | setup.py | setup.py | from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='compare-with-remote',
version='0.1',
description=' Compare local script output with remote script output',
long_description=long_description,
url='https://github.com/guettli/compare-with-remote/',
author='Thomas Guettler',
author_email='info.compare-with-remote@thomas-guettler.de',
license='Apache2',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache2',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
entry_points={
'console_scripts': [
'sample=compare-with-remote:compare_with_remote/compare_with_remote:main',
],
},
)
| apache-2.0 | Python |
|
606853d904c1967b41b30d828940c4aa7ab4c0ab | add setup.py | kgiusti/pyngus,kgiusti/pyngus | setup.py | setup.py | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from distutils.core import setup
setup(name="fusion",
version="0.1.0",
author="kgiusti",
author_email="kgiusti@apache.org",
packages=["fusion"],
package_dir={"fusion": "python"},
license="Apache Software License")
| apache-2.0 | Python |
|
90ec011ebec93f4c0b0e93fc831b0f782be1b13e | Add the setup.py PIP install config file. | Legilibre/SedLex | setup.py | setup.py | from setuptools import setup
setup(
name='SedLex',
version='0.1',
install_requires=[
'html5lib',
'beautifulsoup4',
'requests',
'jinja2',
'python-gitlab'
]
)
| agpl-3.0 | Python |
|
fa88dac9c35fc473ebfea05926e0200926251d9d | Create setup.py | FlaminMad/RPiProcessRig,FlaminMad/RPiProcessRig | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='RPiProcessRig',
version='1.0',
description='A simple industrial rig that can be used for experimentation with a variety of different control algortithms',
author='Alexander Leech',
author_email='alex.leech@talktalk.net',
license = 'MIT',
keywords = "Raspberry Pi Process Control Industrial Rig Hardware Experimentation",
url='https://github.com/FlaminMad/RPiProcessRig',
packages=['yaml', 'pymodbus','spidev','RPi.GPIO'],
py_modules=
)
| mit | Python |
|
c0989ce01ee62367a92eb48855a42c3c4986de84 | Add setup.py. | djkartsa/django-add-another,djkartsa/django-add-another,djkartsa/django-add-another | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import codecs
import os
from setuptools import find_packages, setup
def read(file_name):
file_path = os.path.join(os.path.dirname(__file__), file_name)
return codecs.open(file_path, encoding='utf-8').read()
PACKAGE = "add_another"
NAME = "django-add-another"
DESCRIPTION = "'Add another' functionality outside Django admin"
AUTHOR = "Karim Amzil"
AUTHOR_EMAIL = "djkartsa@gmail.com"
URL = "https://github.com/djkartsa/django-add-another"
VERSION = __import__(PACKAGE).__version__
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=read("README.md"),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="LICENSE.txt",
url=URL,
packages=find_packages(),
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Framework :: Django",
],
install_requires=[
'Django',
],
zip_safe=False,
)
| mit | Python |
|
7634b58b1bd0fc2eee121bad2a20b61077a48d7b | Update setup.py | typemytype/defconAppKit,typesupply/defconAppKit | setup.py | setup.py | #!/usr/bin/env python
import sys
from distutils.core import setup
try:
import fontTools
except:
print "*** Warning: defcon requires FontTools, see:"
print " fonttools.sf.net"
try:
import robofab
except:
print "*** Warning: defcon requires RoboFab, see:"
print " robofab.com"
#if "sdist" in sys.argv:
# import os
# import subprocess
# import shutil
# docFolder = os.path.join(os.getcwd(), "documentation")
# # remove existing
# doctrees = os.path.join(docFolder, "build", "doctrees")
# if os.path.exists(doctrees):
# shutil.rmtree(doctrees)
# # compile
# p = subprocess.Popen(["make", "html"], cwd=docFolder)
# p.wait()
# # remove doctrees
# shutil.rmtree(doctrees)
setup(name="defconAppKit",
version="0.1",
description="A set of interface objects for working with font data.",
author="Tal Leming",
author_email="tal@typesupply.com",
url="https://github.com/typesupply/defconAppKit",
license="MIT",
packages=[
"defconAppKit",
"defconAppKit.controls",
"defconAppKit.representationFactories",
"defconAppKit.tools",
"defconAppKit.windows"
],
package_dir={"":"Lib"}
)
| #!/usr/bin/env python
import sys
from distutils.core import setup
try:
import fontTools
except:
print "*** Warning: defcon requires FontTools, see:"
print " fonttools.sf.net"
try:
import robofab
except:
print "*** Warning: defcon requires RoboFab, see:"
print " robofab.com"
#if "sdist" in sys.argv:
# import os
# import subprocess
# import shutil
# docFolder = os.path.join(os.getcwd(), "documentation")
# # remove existing
# doctrees = os.path.join(docFolder, "build", "doctrees")
# if os.path.exists(doctrees):
# shutil.rmtree(doctrees)
# # compile
# p = subprocess.Popen(["make", "html"], cwd=docFolder)
# p.wait()
# # remove doctrees
# shutil.rmtree(doctrees)
setup(name="defconAppKit",
version="0.1",
description="A set of interface objects for working with font data.",
author="Tal Leming",
author_email="tal@typesupply.com",
url="http://code.typesupply.com",
license="MIT",
packages=[
"defconAppKit",
"defconAppKit.controls",
"defconAppKit.representationFactories",
"defconAppKit.tools",
"defconAppKit.windows"
],
package_dir={"":"Lib"}
) | mit | Python |
3c5802bda34ed9c772f7bb2e33b29f265440f286 | Add a simple setup.py. | dwaiter/django-goodfields | setup.py | setup.py | import os
from setuptools import setup, find_packages
README_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.markdown')
description = 'django-goodfields makes creating good form fields easy.'
long_description = os.path.exists(README_PATH) and open(README_PATH).read() or description
setup(
name='django-goodfields',
version='0.0.1',
description=description,
long_description=long_description,
author='Steve Losh',
author_email='steve@stevelosh.com',
url='http://bitbucket.org/dwaiter/django-goodfields/',
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Operating System :: OS Independent',
'Framework :: Django',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
],
)
| mit | Python |
|
26cc1c4ff2b5c0de8b83bb9bd088d80f5650dda1 | Create setup.py | albertcuesta/PEACHESTORE | setup.py | setup.py | __author__ = 'Alumne'
from distutils.core import setup
setup(name='PEACHESTORE',
version='python 3',
author='albert cuesta',
author_email='albert_cm_91@hotmail.com',
url='https://github.com/albertcuesta/PEACHESTORE',
description='es una tienda online de aplicaciones moviles similar a google play',
packager=['PEACHSTORE']
)
| mit | Python |
|
d64367eda03772997af21792e82a2825848c1ae6 | add tests for splat utils | imbasimba/astroquery,ceb8/astroquery,imbasimba/astroquery,ceb8/astroquery | astroquery/splatalogue/tests/test_utils.py | astroquery/splatalogue/tests/test_utils.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from ... import splatalogue
from astropy import units as u
import numpy as np
from .test_splatalogue import patch_post
from .. import utils
def test_clean(patch_post):
x = splatalogue.Splatalogue.query_lines(114*u.GHz,116*u.GHz,chemical_name=' CO ')
c = utils.clean_column_headings(x)
assert 'Resolved QNs' not in c.colnames
assert 'QNs' in c.colnames
def test_merge(patch_post):
x = splatalogue.Splatalogue.query_lines(114*u.GHz,116*u.GHz,chemical_name=' CO ')
c = utils.merge_frequencies(x)
assert 'Freq' in c.colnames
assert np.all(c['Freq'] > 0)
def test_minimize(patch_post):
x = splatalogue.Splatalogue.query_lines(114*u.GHz,116*u.GHz,chemical_name=' CO ')
c = utils.minimize_table(x)
assert 'Freq' in c.colnames
assert np.all(c['Freq'] > 0)
assert 'Resolved QNs' not in c.colnames
assert 'QNs' in c.colnames
| bsd-3-clause | Python |
|
b72f8a9b0d9df7d42c43c6a294cc3aab2cb91641 | Add missing migrations for limit_choices_to on BlogPage.author | thelabnyc/wagtail_blog,thelabnyc/wagtail_blog | blog/migrations/0002_auto_20190605_1104.py | blog/migrations/0002_auto_20190605_1104.py | # Generated by Django 2.2.2 on 2019-06-05 08:04
import blog.abstract
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_squashed_0006_auto_20180206_2239'),
]
operations = [
migrations.AlterField(
model_name='blogpage',
name='author',
field=models.ForeignKey(blank=True, limit_choices_to=blog.abstract.limit_author_choices, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='author_pages', to=settings.AUTH_USER_MODEL, verbose_name='Author'),
),
]
| apache-2.0 | Python |
|
7fdf796440c3a4ed84ffcb4343cd92f0013c8b1f | add current client, supports basic chatting | AlexKavourias/slackline | slack.py | slack.py | from slackclient import SlackClient
def get_client(token='4577027817.4577075131'):
return SlackClient(token)
print get_client().api_call('api.test')
| mit | Python |
|
f1c1206af29ee0f7be8b7477cd409f2844c816b3 | add Todo generator | secreek/todo | todo/generator.py | todo/generator.py | # coding=utf8
"""
Generator from todo object to todo format string
"""
from models import Task
from models import Todo
class Generator(object):
"""
Generator from todo object to readable string.
"""
newline = "\n"
def gen_task_id(self, task_id):
"""
int => str e.g. 12 => '12.'
"""
return str(task_id) + "."
def gen_task_done(self, done):
"""
boolen => str e.g. True => '[x]'
"""
if done is True:
return '[x]'
else:
return ' '
def gen_task_content(self, content):
"""
str => str
"""
return content
def gen_name(self, name):
"""
str => str e.g. 'name' => 'name\n------'
"""
return name + self.newline + '-' * len(name)
def gen_task(self, task):
"""
Task => str
e.g. Task(1, "Write email", True) => '1. [x] Write email'
"""
lst = []
lst.append(self.gen_task_id(task.id))
lst.append(self.gen_task_done(task.done))
lst.append(self.gen_task_content(task.content))
return " ".join(lst)
def generate(self, todo):
"""
Generate todo object to string.
e.g. Todo(name, tasks) => "1. (x) do something..."
"""
lst = []
if todo.name:
head = self.gen_name(todo.name)
else:
head = ""
lst.append(head)
for task in todo.tasks:
lst.append(self.gen_task(task))
return self.newline.join(lst)
generator = Generator() # build generator
| mit | Python |
|
3c290803bbd6d7401903506b3a27cf2c9ebad0b4 | Add ChatInfoFormatter | alvarogzp/telegram-bot,alvarogzp/telegram-bot | bot/action/standard/info/formatter/chat.py | bot/action/standard/info/formatter/chat.py | from bot.action.standard.info.formatter import ApiObjectInfoFormatter
from bot.action.util.format import ChatFormatter
from bot.api.api import Api
from bot.api.domain import ApiObject
class ChatInfoFormatter(ApiObjectInfoFormatter):
def __init__(self, api: Api, chat: ApiObject, bot_user: ApiObject, user: ApiObject):
super().__init__(api, chat)
self.bot_user = bot_user
self.user = user
def format(self, full_info: bool = False):
"""
:param full_info: If True, adds more info about the chat. Please, note that this additional info requires
to make THREE synchronous api calls.
"""
chat = self.api_object
if full_info:
self.__format_full(chat)
else:
self.__format_simple(chat)
def __format_full(self, chat: ApiObject):
chat = self.api.getChat(chat_id=chat.id)
description = chat.description
invite_link = self._invite_link(chat.invite_link)
pinned_message = self._pinned_message(chat.pinned_message)
sticker_set_name = self._group_sticker_set(chat.sticker_set_name)
member_count = self.api.getChatMembersCount(chat_id=chat.id)
admins = self.api.getChatAdministrators(chat_id=chat.id)
admin_count = len(admins)
me_admin = self._yes_no(self._is_admin(self.bot_user, admins))
you_admin = self._yes_no(self._is_admin(self.user, admins))
self.__format_simple(chat)
self._add_info("Description", description)
self._add_info("Invite link", invite_link)
self._add_info("Pinned message", pinned_message)
self._add_info("Group sticker set", sticker_set_name)
self._add_info("Members", member_count)
self._add_info("Admins", admin_count, "(not counting other bots)")
self._add_info("Am I admin", me_admin)
self._add_info("Are you admin", you_admin)
def __format_simple(self, chat: ApiObject):
full_data = ChatFormatter(chat).full_data
title = chat.title
username = self._username(chat.username)
_type = chat.type
_id = chat.id
all_members_are_admins = self._yes_no(chat.all_members_are_administrators)
self._add_title(full_data)
self._add_empty()
self._add_info("Title", title)
self._add_info("Username", username)
self._add_info("Type", _type)
self._add_info("Id", _id)
self._add_info("All members are admins", all_members_are_admins)
| agpl-3.0 | Python |
|
1ad56e631c29869d127931b555d0b366f7e75641 | Add test for fftpack. | nbeaver/numpy,MSeifert04/numpy,ViralLeadership/numpy,behzadnouri/numpy,charris/numpy,hainm/numpy,seberg/numpy,rmcgibbo/numpy,ahaldane/numpy,astrofrog/numpy,KaelChen/numpy,pelson/numpy,gfyoung/numpy,Yusa95/numpy,ssanderson/numpy,joferkington/numpy,dwf/numpy,anntzer/numpy,brandon-rhodes/numpy,sigma-random/numpy,AustereCuriosity/numpy,trankmichael/numpy,WarrenWeckesser/numpy,ESSS/numpy,tynn/numpy,jonathanunderwood/numpy,mattip/numpy,mortada/numpy,dwillmer/numpy,cjermain/numpy,kirillzhuravlev/numpy,mingwpy/numpy,leifdenby/numpy,endolith/numpy,skwbc/numpy,behzadnouri/numpy,matthew-brett/numpy,joferkington/numpy,NextThought/pypy-numpy,ajdawson/numpy,pyparallel/numpy,ekalosak/numpy,pbrod/numpy,bmorris3/numpy,andsor/numpy,BabeNovelty/numpy,kirillzhuravlev/numpy,solarjoe/numpy,ChristopherHogan/numpy,mindw/numpy,GaZ3ll3/numpy,utke1/numpy,skymanaditya1/numpy,Yusa95/numpy,larsmans/numpy,ContinuumIO/numpy,AustereCuriosity/numpy,shoyer/numpy,charris/numpy,jakirkham/numpy,nguyentu1602/numpy,Yusa95/numpy,skymanaditya1/numpy,nguyentu1602/numpy,sonnyhu/numpy,CMartelLML/numpy,mwiebe/numpy,ajdawson/numpy,abalkin/numpy,chatcannon/numpy,Eric89GXL/numpy,has2k1/numpy,jakirkham/numpy,nbeaver/numpy,BMJHayward/numpy,dwf/numpy,bmorris3/numpy,argriffing/numpy,ewmoore/numpy,bmorris3/numpy,pdebuyl/numpy,anntzer/numpy,dato-code/numpy,hainm/numpy,NextThought/pypy-numpy,NextThought/pypy-numpy,jankoslavic/numpy,endolith/numpy,dch312/numpy,tacaswell/numpy,rhythmsosad/numpy,moreati/numpy,jankoslavic/numpy,sinhrks/numpy,numpy/numpy,larsmans/numpy,numpy/numpy-refactor,ogrisel/numpy,ahaldane/numpy,MichaelAquilina/numpy,rudimeier/numpy,nguyentu1602/numpy,Anwesh43/numpy,ddasilva/numpy,kiwifb/numpy,Srisai85/numpy,kirillzhuravlev/numpy,ogrisel/numpy,stefanv/numpy,mingwpy/numpy,madphysicist/numpy,githubmlai/numpy,chatcannon/numpy,cowlicks/numpy,grlee77/numpy,tacaswell/numpy,SunghanKim/numpy,MSeifert04/numpy,ChanderG/numpy,mathdd/numpy,dimasad/numpy,MSeifert04/numpy,NextThought/pypy-numpy,ssanderson/numpy,BabeNovelty/numpy,pizzathief/numpy,brandon-rhodes/numpy,jschueller/numpy,rherault-insa/numpy,groutr/numpy,ahaldane/numpy,bringingheavendown/numpy,mwiebe/numpy,pelson/numpy,chiffa/numpy,grlee77/numpy,pyparallel/numpy,bertrand-l/numpy,numpy/numpy-refactor,rajathkumarmp/numpy,ssanderson/numpy,ewmoore/numpy,simongibbons/numpy,argriffing/numpy,charris/numpy,kiwifb/numpy,ChanderG/numpy,musically-ut/numpy,bringingheavendown/numpy,GrimDerp/numpy,madphysicist/numpy,SiccarPoint/numpy,rudimeier/numpy,has2k1/numpy,numpy/numpy-refactor,gfyoung/numpy,rhythmsosad/numpy,drasmuss/numpy,drasmuss/numpy,leifdenby/numpy,ViralLeadership/numpy,Srisai85/numpy,seberg/numpy,mortada/numpy,rgommers/numpy,felipebetancur/numpy,solarjoe/numpy,joferkington/numpy,pbrod/numpy,embray/numpy,b-carter/numpy,astrofrog/numpy,dch312/numpy,MichaelAquilina/numpy,seberg/numpy,has2k1/numpy,matthew-brett/numpy,ChristopherHogan/numpy,jorisvandenbossche/numpy,matthew-brett/numpy,shoyer/numpy,dimasad/numpy,AustereCuriosity/numpy,rhythmsosad/numpy,stuarteberg/numpy,nguyentu1602/numpy,gfyoung/numpy,CMartelLML/numpy,mortada/numpy,b-carter/numpy,tacaswell/numpy,pelson/numpy,MaPePeR/numpy,behzadnouri/numpy,mhvk/numpy,sonnyhu/numpy,BMJHayward/numpy,simongibbons/numpy,empeeu/numpy,skwbc/numpy,dwillmer/numpy,naritta/numpy,Dapid/numpy,numpy/numpy-refactor,tdsmith/numpy,ewmoore/numpy,gmcastil/numpy,felipebetancur/numpy,Anwesh43/numpy,andsor/numpy,jorisvandenbossche/numpy,ChristopherHogan/numpy,CMartelLML/numpy,groutr/numpy,dimasad/numpy,empeeu/numpy,nbeaver/numpy,jonathanunderwood/numpy,stuarteberg/numpy,rajathkumarmp/numpy,pbrod/numpy,larsmans/numpy,ekalosak/numpy,matthew-brett/numpy,GrimDerp/numpy,njase/numpy,chatcannon/numpy,BMJHayward/numpy,immerrr/numpy,mhvk/numpy,yiakwy/numpy,sinhrks/numpy,solarjoe/numpy,felipebetancur/numpy,MaPePeR/numpy,shoyer/numpy,ddasilva/numpy,naritta/numpy,SiccarPoint/numpy,SiccarPoint/numpy,rhythmsosad/numpy,dimasad/numpy,dwillmer/numpy,simongibbons/numpy,ESSS/numpy,jakirkham/numpy,ChanderG/numpy,sigma-random/numpy,simongibbons/numpy,tynn/numpy,ogrisel/numpy,utke1/numpy,WillieMaddox/numpy,tdsmith/numpy,jakirkham/numpy,andsor/numpy,MSeifert04/numpy,embray/numpy,rgommers/numpy,jankoslavic/numpy,jschueller/numpy,grlee77/numpy,SunghanKim/numpy,MaPePeR/numpy,pizzathief/numpy,pelson/numpy,dch312/numpy,empeeu/numpy,utke1/numpy,ahaldane/numpy,ekalosak/numpy,mhvk/numpy,jorisvandenbossche/numpy,brandon-rhodes/numpy,trankmichael/numpy,immerrr/numpy,pbrod/numpy,sigma-random/numpy,larsmans/numpy,GrimDerp/numpy,mhvk/numpy,numpy/numpy-refactor,immerrr/numpy,Eric89GXL/numpy,trankmichael/numpy,embray/numpy,moreati/numpy,SunghanKim/numpy,madphysicist/numpy,WillieMaddox/numpy,mingwpy/numpy,grlee77/numpy,GrimDerp/numpy,simongibbons/numpy,pdebuyl/numpy,stefanv/numpy,mattip/numpy,pizzathief/numpy,naritta/numpy,rajathkumarmp/numpy,dwf/numpy,jorisvandenbossche/numpy,mindw/numpy,jschueller/numpy,mathdd/numpy,ekalosak/numpy,tdsmith/numpy,endolith/numpy,mathdd/numpy,rudimeier/numpy,felipebetancur/numpy,pelson/numpy,rherault-insa/numpy,joferkington/numpy,anntzer/numpy,jakirkham/numpy,naritta/numpy,maniteja123/numpy,charris/numpy,mhvk/numpy,bringingheavendown/numpy,GaZ3ll3/numpy,numpy/numpy,dch312/numpy,numpy/numpy,KaelChen/numpy,Dapid/numpy,pyparallel/numpy,musically-ut/numpy,endolith/numpy,madphysicist/numpy,BabeNovelty/numpy,bertrand-l/numpy,argriffing/numpy,bertrand-l/numpy,abalkin/numpy,matthew-brett/numpy,sonnyhu/numpy,MichaelAquilina/numpy,ChanderG/numpy,ewmoore/numpy,yiakwy/numpy,CMartelLML/numpy,sonnyhu/numpy,ogrisel/numpy,pdebuyl/numpy,cjermain/numpy,kirillzhuravlev/numpy,GaZ3ll3/numpy,has2k1/numpy,rherault-insa/numpy,WarrenWeckesser/numpy,drasmuss/numpy,ewmoore/numpy,moreati/numpy,SiccarPoint/numpy,astrofrog/numpy,dwf/numpy,skymanaditya1/numpy,pizzathief/numpy,ahaldane/numpy,maniteja123/numpy,rmcgibbo/numpy,hainm/numpy,chiffa/numpy,groutr/numpy,stefanv/numpy,WarrenWeckesser/numpy,b-carter/numpy,dato-code/numpy,MSeifert04/numpy,grlee77/numpy,musically-ut/numpy,andsor/numpy,GaZ3ll3/numpy,mattip/numpy,dato-code/numpy,jankoslavic/numpy,Srisai85/numpy,embray/numpy,BabeNovelty/numpy,Linkid/numpy,githubmlai/numpy,shoyer/numpy,KaelChen/numpy,ddasilva/numpy,shoyer/numpy,leifdenby/numpy,skwbc/numpy,rudimeier/numpy,Anwesh43/numpy,Eric89GXL/numpy,WarrenWeckesser/numpy,maniteja123/numpy,seberg/numpy,astrofrog/numpy,sinhrks/numpy,ajdawson/numpy,WillieMaddox/numpy,MaPePeR/numpy,astrofrog/numpy,cowlicks/numpy,mindw/numpy,ViralLeadership/numpy,rgommers/numpy,njase/numpy,BMJHayward/numpy,Linkid/numpy,cowlicks/numpy,WarrenWeckesser/numpy,jschueller/numpy,musically-ut/numpy,tynn/numpy,cjermain/numpy,cjermain/numpy,brandon-rhodes/numpy,pizzathief/numpy,Anwesh43/numpy,Srisai85/numpy,ChristopherHogan/numpy,njase/numpy,immerrr/numpy,skymanaditya1/numpy,githubmlai/numpy,stuarteberg/numpy,stefanv/numpy,hainm/numpy,dwf/numpy,jonathanunderwood/numpy,tdsmith/numpy,stefanv/numpy,githubmlai/numpy,mwiebe/numpy,Linkid/numpy,mathdd/numpy,ajdawson/numpy,stuarteberg/numpy,chiffa/numpy,Eric89GXL/numpy,madphysicist/numpy,anntzer/numpy,ContinuumIO/numpy,mortada/numpy,SunghanKim/numpy,yiakwy/numpy,dato-code/numpy,pbrod/numpy,KaelChen/numpy,Linkid/numpy,rmcgibbo/numpy,ContinuumIO/numpy,ESSS/numpy,rgommers/numpy,dwillmer/numpy,sigma-random/numpy,yiakwy/numpy,rajathkumarmp/numpy,gmcastil/numpy,ogrisel/numpy,mindw/numpy,sinhrks/numpy,embray/numpy,bmorris3/numpy,pdebuyl/numpy,MichaelAquilina/numpy,trankmichael/numpy,Dapid/numpy,empeeu/numpy,gmcastil/numpy,rmcgibbo/numpy,Yusa95/numpy,mingwpy/numpy,cowlicks/numpy,mattip/numpy,kiwifb/numpy,numpy/numpy,jorisvandenbossche/numpy,abalkin/numpy | numpy/fft/tests/test_fftpack.py | numpy/fft/tests/test_fftpack.py | import sys
from numpy.testing import *
set_package_path()
from numpy.fft import *
restore_path()
class test_fftshift(NumpyTestCase):
def check_fft_n(self):
self.failUnlessRaises(ValueError,fft,[1,2,3],0)
if __name__ == "__main__":
NumpyTest().run()
| bsd-3-clause | Python |
|
ab6fa9717b092f3b8eea4b70920a1d7cef042b69 | Return disappeared __main__ | tsiang/certchecker | certchecker/__main__.py | certchecker/__main__.py | import click
from certchecker import CertChecker
@click.command()
@click.option(
'--profile',
default='default',
help="Section name in your boto config file"
)
def main(profile):
cc = CertChecker(profile)
print(cc.result)
if __name__ == "__main__":
print(main())
| apache-2.0 | Python |
|
b9feeb2a37f0596b48f9582e8953d29485167fc8 | Add an event-driven recording tool | cyliustack/sofa,cyliustack/sofa,cyliustack/sofa,cyliustack/sofa,cyliustack/sofa | tools/sofa-edr.py | tools/sofa-edr.py | #!/usr/bin/env python3
import subprocess
import time
import argparse
if __name__ == '__main__':
bwa_is_recorded = False
smb_is_recorded = False
htvc_is_recorded = False
parser = argparse.ArgumentParser(description='A SOFA wrapper which supports event-driven recording.')
parser.add_argument('--trace-points', default='', metavar='Comma-sperated string list for interested keywords, e.g., "keyword1,keyword2"')
args = parser.parse_args()
while True:
time.sleep(3)
print(time.time())
with open('/home/ubuntu/pbrun_error.log') as f:
lines = f.readlines()
lc = 0
for line in lines:
#print('Line%d'%lc, line)
lc = lc + 1
if lc < 6:
continue
if line.find('BWA') != -1 and not smb_is_recorded:
bwa_is_recorded = True
print('BWA begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-bwa ', shell=True)
break
if line.find('BQSR') != -1 and not smb_is_recorded:
smb_is_recorded = True
print('SMB begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-smb ', shell=True)
break
if line.find('HaplotypeCaller') != -1 and not htvc_is_recorded:
htvc_is_recorded = True
print('HTVC begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-htvc ', shell=True)
break
if bwa_is_recorded and smb_is_recorded and htvc_is_recorded:
print("Tracing is done.")
break
| apache-2.0 | Python |
|
0bd65e0e20911e7ac87aba3ef076b327f57b2f6f | Add get-aixdzs.py | changyuheng/bin,changyuheng/bin | get-aixdzs.py | get-aixdzs.py | #!/usr/bin/env python3
import argparse
import html.parser
from typing import List, Tuple
import urllib.request
class AixdzsHTMLParser(html.parser.HTMLParser):
def __init__(self):
super().__init__()
self.last_url: str = ''
self.next_url: str = ''
self.is_in_content_tag: bool = False
self.content_tag_nested_count: int = 0
self.content: str = ''
self.is_in_episode_name_tag: bool = False
self.episode_name_tag_nested_count: int = 0
self.episode_name: str = ''
def handle_starttag(self, tag: str, attrs: List[Tuple[str, str]]):
attr: Tuple[str, str]
if tag == 'a':
for attr in attrs:
if attr[0] != 'href':
continue
self.last_url = attr[1]
elif tag == 'div':
if self.is_in_content_tag:
self.content_tag_nested_count += 1
for attr in attrs:
if attr[0] != 'class':
continue
if attr[1] == 'content':
self.is_in_content_tag = True
elif tag == 'h1':
if self.is_in_episode_name_tag:
self.episode_name_tag_nested_count += 1
else:
self.is_in_episode_name_tag = True
def handle_endtag(self, tag: str):
if tag == 'div':
if self.content_tag_nested_count > 0:
self.content_tag_nested_count -= 1
return
if self.is_in_content_tag:
self.is_in_content_tag = False
elif tag == 'h1':
if self.episode_name_tag_nested_count > 0:
self.episode_name_tag_nested_count -= 1
return
if self.is_in_episode_name_tag:
self.is_in_episode_name_tag = False
def handle_data(self, data: str):
if data == '下一章[→]':
self.next_url = self.last_url
elif self.is_in_content_tag:
self.content += data
elif self.is_in_episode_name_tag:
self.episode_name = data
self.content += '\n' + self.episode_name + '\n'
class TxtDownloader:
def __init__(self, begin_url: str, num_of_episodes_to_get: int):
self.begin_url: str = begin_url
self.episode_urls: List[str] = list()
self.num_of_episodes_to_get: int = num_of_episodes_to_get
self.content: str = ''
def start(self):
current_url: str = self.begin_url
for _ in range(self.num_of_episodes_to_get):
page: str = urllib.request.urlopen(current_url).read().decode()
aixdzs_html_parser: AixdzsHTMLParser = AixdzsHTMLParser()
aixdzs_html_parser.feed(page)
self.content += aixdzs_html_parser.content
if not aixdzs_html_parser.next_url:
break
current_url = urllib.parse.urljoin(self.begin_url, aixdzs_html_parser.next_url)
self.episode_urls.append(current_url)
def parse_args() -> argparse.Namespace:
parser: argparse.ArgumentParser = argparse.ArgumentParser(
description='Download episodes from 愛下電子書 https://tw.aixdzs.com/')
parser.add_argument('begin_url', help='the URL of the begin episode')
parser.add_argument('number_of_episodes', type=int, help='the number of the episodes to download')
return parser.parse_args()
args: argparse.Namespace = parse_args()
tdler: TxtDownloader = TxtDownloader(args.begin_url, args.number_of_episodes)
tdler.start()
print(tdler.content)
| mpl-2.0 | Python |
|
42ab52b6d077443fac20ea872b503589f6ddb3f7 | Create pyPostings.py | domarps/Spatial_Hashing | pyPostings.py | pyPostings.py | import re
import string
def posting(corpus):
posting = []
tokens = tokenize(corpus)
for index, token in enumerate(tokens):
posting.append([token, (index+1)])
return posting
def posting_list(corpus):
posting_list = {}
tokens = tokenize(corpus)
for index, token in enumerate(tokens):
if token not in posting_list:
posting_list[token] = [(index + 1)]
else:
posting_list[token].append(index + 1)
return posting_list
def tokenize(corpus):
assert type(corpus) is str, 'Corpus must be a string of characters.'
# split
tokenized = corpus.split()
# normalize
for index, token in enumerate(tokenized):
tokenized[index] = re.sub('\W\Z', '', tokenized[index])
tokenized[index] = re.sub('\A\W', '', tokenized[index])
return tokenized
def not_string(a):
return a != " " and a != ""
| mit | Python |
|
ee39e69fe5d6e93844f47eaff0d9547622600fa7 | make parsing times easier | bloomberg/phabricator-tools,cs-shadow/phabricator-tools,kjedruczyk/phabricator-tools,cs-shadow/phabricator-tools,kjedruczyk/phabricator-tools,aevri/phabricator-tools,aevri/phabricator-tools,cs-shadow/phabricator-tools,bloomberg/phabricator-tools,valhallasw/phabricator-tools,valhallasw/phabricator-tools,valhallasw/phabricator-tools,aevri/phabricator-tools,bloomberg/phabricator-tools,aevri/phabricator-tools,kjedruczyk/phabricator-tools,aevri/phabricator-tools,bloomberg/phabricator-tools,cs-shadow/phabricator-tools,valhallasw/phabricator-tools,kjedruczyk/phabricator-tools,cs-shadow/phabricator-tools,bloomberg/phabricator-tools,kjedruczyk/phabricator-tools | py/phlsys_strtotime.py | py/phlsys_strtotime.py | #!/usr/bin/env python
# encoding: utf-8
"""A poor substitute for PHP's strtotime function."""
import datetime
def describeDurationStringToTimeDelta():
return str('time can be specified like "5 hours 20 minutes", use '
'combinations of seconds, minutes, hours, days, weeks. '
'each unit should only appear once. you may use floating '
'point numbers and negative numbers. '
'e.g. "1 weeks -1.5 days".')
def durationStringToTimeDelta(s):
"""Return a datetime.timedelta based on the supplied string 's'.
Usage examples:
>>> str(durationStringToTimeDelta("1 seconds"))
'0:00:01'
>>> str(durationStringToTimeDelta("2 minutes"))
'0:02:00'
>>> str(durationStringToTimeDelta("2 hours 2 minutes"))
'2:02:00'
>>> str(durationStringToTimeDelta("1 days 2 hours 2 minutes"))
'1 day, 2:02:00'
>>> str(durationStringToTimeDelta("1.5 days"))
'1 day, 12:00:00'
>>> str(durationStringToTimeDelta("1 days -1 hours"))
'23:00:00'
>>> str(durationStringToTimeDelta("1 milliseconds"))
'0:00:00.001000'
:s: a string in the appropriate time format
:returns: a datetime.timedelta
"""
clauses = s.split()
if len(clauses) % 2:
raise ValueError("odd number of clauses: " + s)
pairs = zip(clauses[::2], clauses[1::2])
d = {p[1]: float(p[0]) for p in pairs}
if len(d) != len(pairs):
raise ValueError("duplicated clauses: " + s)
return datetime.timedelta(**d)
#------------------------------------------------------------------------------
# Copyright (C) 2012 Bloomberg L.P.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#------------------------------- END-OF-FILE ----------------------------------
| apache-2.0 | Python |
|
2cf2a89bf3c7ccf667e4bcb623eeb6d0e1ea37bb | print sumthing pr1 | fly/euler,bsdlp/euler | python/py1.py | python/py1.py | #!/usr/bin/env python3
"""
If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
"""
thing = []
for urmom in range(1,1000):
if urmom % 5 == 0 or urmom % 3 == 0:
thing.append(urmom)
print(sum(thing))
| bsd-3-clause | Python |
|
784fd8b08ee0f268350a2003a9c06522c0678874 | Add python code for doing tensor decomposition with scikit-tensor. | monsendag/goldfish,ntnu-smartmedia/goldfish,monsendag/goldfish,ntnu-smartmedia/goldfish,monsendag/goldfish,ntnu-smartmedia/goldfish | python/run.py | python/run.py | import logging
import numpy
from numpy import genfromtxt
from sktensor import sptensor, cp_als
# Set logging to DEBUG to see CP-ALS information
logging.basicConfig(level=logging.DEBUG)
data = genfromtxt('../datasets/movielens-synthesized/ratings-synthesized-50k.csv', delimiter=',')
# we need to convert data into two lists; subscripts/coordinates and values
n = len(data)
subs_1 = numpy.append(data[:,:2], numpy.zeros((n, 1)), 1)
subs_2 = numpy.append(data[:,:2], numpy.ones((n, 1)), 1)
subs = numpy.vstack([subs_1, subs_2])
subs = subs.astype(int)
vals = numpy.hstack([data[:,2], data[:, 3]])
vals = vals.flatten()
# convert subs tuple of arrays (rows, cols, tubes)
subs = (subs[:,0], subs[:,1], subs[:,2])
# load into sparse tensor
T = sptensor(subs, vals)
# Decompose tensor using CP-ALS
P, fit, itr, exectimes = cp_als(T, 500, init='random')
P = P.totensor()
print P[1,1193,0] # 5
print P[1,661, 0] # 3
print P[1,594, 1] # 1.6
print P[1,1193, 1] # 2.2
#print numpy.allclose(T, P)
#print P.U[0].shape
#print "-------"
##print P.U[1].shape
#print "-------"
#print P.U[2].shape
| mit | Python |
|
00413958a12607aab942c98581b1a9e6d682ef28 | Create Single-Prime.py | isabellemao/Hello-World,isabellemao/Hello-World,isabellemao/Hello-World | python/Single-Prime.py | python/Single-Prime.py | #By Isabelle.
#Checks a single number and lists all of its factors (except 1 and itself)
import math
num = int(input("Pick a number to undergo the primality test!\n"))
root = int(round(math.sqrt(num)))
prime = True
for looper in range(2,root + 1): #53225 should normally be 3
if num % 2 == 0 or num % 3 == 0 or num % 5 == 0: #End if number is even
print("{} is divisible by a prime number from 2 and 5. Silly you, stop wasting my time.".format(num))
prime = False
break
elif looper % 2 == 0 or looper % 3 == 0 or looper % 5 == 0:
continue
else:
if num % looper == 0:
print("{} can be divided by {}.".format(num, looper))
looper += 1
prime = False
break
else:
print("{} cannot be divided by {}.".format(num, looper)) #delete
looper += 1
if prime == True:
print("{} is prime".format(num))
else:
print("{} is not prime.".format(num))
| apache-2.0 | Python |
|
1cb8df64d4f6f257d0bd03caaaddb33ad11a5c2c | Add or_gate | yukihirai0505/tutorial-program,yukihirai0505/tutorial-program,yukihirai0505/tutorial-program,yukihirai0505/tutorial-program,yukihirai0505/tutorial-program,yukihirai0505/tutorial-program,yukihirai0505/tutorial-program,yukihirai0505/tutorial-program,yukihirai0505/tutorial-program | python/ch02/or_gate.py | python/ch02/or_gate.py | import numpy as np
def OR(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.2
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
if __name__ == '__main__':
for xs in [(0, 0), (1, 0), (0, 1), (1, 1)]:
y = OR(xs[0], xs[1])
print(str(xs) + " -> " + str(y))
| mit | Python |
|
a58a31a6037babdc607593196da2841f13791bfa | Revert "去掉camelcase和underscore的转换, 直接用三方的" | nypisces/railguns,nypisces/railguns,nypisces/railguns | railguns/utils/text.py | railguns/utils/text.py | """
https://github.com/tomchristie/django-rest-framework/issues/944
"""
import re
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def camelcase_to_underscore(name):
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower()
def underscore_to_camelcase(name, lower_first=True):
result = name.title().replace('_', '')
if lower_first:
return result[0].lower() + result[1:]
else:
return result
def recursive_key_map(function, data):
if isinstance(data, dict):
new_dict = {}
for key, value in data.items():
if isinstance(key, str):
new_key = function(key)
new_dict[new_key] = recursive_key_map(function, value)
return new_dict
elif isinstance(data, (list, tuple)):
return [recursive_key_map(function, value) for value in data]
else:
return data
| mit | Python |
|
a723c70a0ae9da0f2207dd9278c619be323bda4a | move test parts to avnav_test | wellenvogel/avnav,wellenvogel/avnav,wellenvogel/avnav,wellenvogel/avnav,wellenvogel/avnav,wellenvogel/avnav,wellenvogel/avnav,wellenvogel/avnav,wellenvogel/avnav | avnav_test/avn_debug.py | avnav_test/avn_debug.py | import sys
sys.path.append(r'/home/pi/avnav/pydev')
import pydevd
from avnav_server import *
pydevd.settrace(host='10.222.10.45',stdoutToServer=True, stderrToServer=True)
main(sys.argv)
| mit | Python |
|
aa1b39b455f7145848c287ee9ee85507f5b66de0 | Add Meduza | andre487/news487,andre487/news487,andre487/news487,andre487/news487 | collector/rss/meduza.py | collector/rss/meduza.py | # coding=utf-8
import feedparser
import logging
from util import date, tags
SOURCE_NAME = 'Meduza'
FEED_URL = 'https://meduza.io/rss/all'
log = logging.getLogger('app')
def parse():
feed = feedparser.parse(FEED_URL)
data = []
for entry in feed['entries']:
data.append({
'title': entry['title'],
'description': entry['description'],
'link': entry['link'],
'published': date.utc_format(entry['published']),
'source_name': SOURCE_NAME,
'source_title': feed['feed']['title'],
'source_link': feed['feed']['link'],
'tags': tags.string_format('world', 'no_tech', 'meduza'),
})
log.info('%s: got %d documents', SOURCE_NAME, len(data))
return data
if __name__ == '__main__':
print parse()
| mit | Python |
|
d50814603217ca9ea47324a0ad516ce7418bc9bf | Add script to generate a standalone timeline view. | sahiljain/catapult,danbeam/catapult,catapult-project/catapult-csm,scottmcmaster/catapult,sahiljain/catapult,catapult-project/catapult-csm,0x90sled/catapult,benschmaus/catapult,catapult-project/catapult-csm,0x90sled/catapult,zeptonaut/catapult,danbeam/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,benschmaus/catapult,dstockwell/catapult,SummerLW/Perf-Insight-Report,vmpstr/trace-viewer,benschmaus/catapult,modulexcite/catapult,SummerLW/Perf-Insight-Report,dstockwell/catapult,danbeam/catapult,catapult-project/catapult,zeptonaut/catapult,dstockwell/catapult,catapult-project/catapult,catapult-project/catapult,sahiljain/catapult,benschmaus/catapult,0x90sled/catapult,catapult-project/catapult-csm,vmpstr/trace-viewer,sahiljain/catapult,catapult-project/catapult,catapult-project/catapult,catapult-project/catapult-csm,benschmaus/catapult,catapult-project/catapult,benschmaus/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,modulexcite/catapult,zeptonaut/catapult,benschmaus/catapult,SummerLW/Perf-Insight-Report,SummerLW/Perf-Insight-Report,scottmcmaster/catapult,sahiljain/catapult,catapult-project/catapult,dstockwell/catapult,modulexcite/catapult,danbeam/catapult,vmpstr/trace-viewer,sahiljain/catapult,scottmcmaster/catapult | build/generate_standalone_timeline_view.py | build/generate_standalone_timeline_view.py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import parse_deps
import sys
import os
srcdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../src"))
js_warning_message = """/**
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
* WARNING: This file is generated by generate_standalone_timeline_view.py
*
* Do not edit directly.
*/
"""
css_warning_message = """/**
/* Copyright (c) 2012 The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file. */
* WARNING: This file is generated by generate_standalone_timeline_view.py
*
* Do not edit directly.
*/
"""
def generate_css(filenames):
load_sequence = parse_deps.calc_load_sequence(filenames)
style_sheet_chunks = [css_warning_message, '\n']
for module in load_sequence:
for style_sheet in module.style_sheets:
style_sheet_chunks.append("""%s\n""" % style_sheet.timeline_view)
return ''.join(style_sheet_chunks)
def generate_js(filenames):
load_sequence = parse_deps.calc_load_sequence(filenames)
js_chunks = [js_warning_message, '\n']
js_chunks.append("window.FLATTENED = {};\n")
for module in load_sequence:
js_chunks.append( "window.FLATTENED['%s'] = true;\n" % module.name)
for module in load_sequence:
js_chunks.append(module.timeline_view)
js_chunks.append("\n")
return ''.join(js_chunks)
def main(args):
parser = optparse.OptionParser()
parser.add_option("--js", dest="js_file",
help="Where to place generated javascript file")
parser.add_option("--css", dest="css_file",
help="Where to place generated css file")
options, args = parser.parse_args(args)
if not options.js_file and not options.css_file:
print "Must specify one, or both of --js and --css"
return 1
input_filenames = [os.path.join(srcdir, f)
for f in ['base.js', 'timeline_view.js']]
if options.js_file:
with open(options.js_file, 'w') as f:
f.write(generate_js(input_filenames))
if options.css_file:
with open(options.css_file, 'w') as f:
f.write(generate_css(input_filenames))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| bsd-3-clause | Python |
|
93df464ec396774cb161b51d4988773e4ce95e44 | Create lfu-cache.py | yiwen-luo/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015 | Python/lfu-cache.py | Python/lfu-cache.py | # Time: O(1), per operation
# Space: O(k), k is the capacity of cache
# Design and implement a data structure for Least Frequently Used (LFU) cache.
# It should support the following operations: get and put.
#
# get(key) - Get the value (will always be positive) of the key
# if the key exists in the cache, otherwise return -1.
# put(key, value) - Set or insert the value if the key is not already present.
# When the cache reaches its capacity,
# it should invalidate the least frequently used item before inserting a new item.
# For the purpose of this problem, when there is a tie
# (i.e., two or more keys that have the same frequency),
# the least recently used key would be evicted.
#
# Follow up:
# Could you do both operations in O(1) time complexity?
#
# Example:
#
# LFUCache cache = new LFUCache( 2 /* capacity */ );
#
# cache.put(1, 1);
# cache.put(2, 2);
# cache.get(1); // returns 1
# cache.put(3, 3); // evicts key 2
# cache.get(2); // returns -1 (not found)
# cache.get(3); // returns 3.
# cache.put(4, 4); // evicts key 1.
# cache.get(1); // returns -1 (not found)
# cache.get(3); // returns 3
# cache.get(4); // returns 4
class ListNode(object):
def __init__(self, key):
self.key = key
self.next = None
self.prev = None
class LinkedList(object):
def __init__(self):
self.head = None
self.tail = None
def append(self, node):
if self.head is None:
self.head = node
else:
self.tail.next = node
node.prev = self.tail
self.tail = node
def delete(self, node):
if node.prev:
node.prev.next = node.next
else:
self.head = node.next
if node.next:
node.next.prev = node.prev
else:
self.tail = node.prev
del node
class LFUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.__capa = capacity
self.__size = 0
self.__min_freq = 0
self.__freq_to_nodes = collections.defaultdict(LinkedList)
self.__key_to_node = {}
self.__key_to_val_freq = {}
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key not in self.__key_to_val_freq:
return -1
self.__freq_to_nodes[self.__key_to_val_freq[key][1]].delete(self.__key_to_node[key])
if not self.__freq_to_nodes[self.__key_to_val_freq[key][1]].head:
del self.__freq_to_nodes[self.__key_to_val_freq[key][1]]
if self.__min_freq == self.__key_to_val_freq[key][1]:
self.__min_freq += 1
self.__key_to_val_freq[key][1] += 1
self.__freq_to_nodes[self.__key_to_val_freq[key][1]].append(ListNode(key))
self.__key_to_node[key] = self.__freq_to_nodes[self.__key_to_val_freq[key][1]].tail
return self.__key_to_val_freq[key][0]
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if self.__capa <= 0:
return
if self.get(key) != -1:
self.__key_to_val_freq[key][0] = value
return
if self.__size == self.__capa:
del self.__key_to_val_freq[self.__freq_to_nodes[self.__min_freq].head.key]
del self.__key_to_node[self.__freq_to_nodes[self.__min_freq].head.key]
self.__freq_to_nodes[self.__min_freq].delete(self.__freq_to_nodes[self.__min_freq].head)
if not self.__freq_to_nodes[self.__min_freq].head:
del self.__freq_to_nodes[self.__min_freq]
self.__size -= 1
self.__min_freq = 1
self.__key_to_val_freq[key] = [value, self.__min_freq]
self.__freq_to_nodes[self.__min_freq].append(ListNode(key))
self.__key_to_node[key] = self.__freq_to_nodes[self.__min_freq].tail
self.__size += 1
# Your LFUCache object will be instantiated and called as such:
# obj = LFUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
| mit | Python |
|
f6ef8e0c31163f95fa0c62873a7195ab51f65cf1 | Add cw_are_they_the_same.py | bowen0701/algorithms_data_structures | cw_are_they_the_same.py | cw_are_they_the_same.py | """Codewars: Are they the "same"?
6 kyu
URL: https://www.codewars.com/kata/550498447451fbbd7600041c
Given two arrays a and b write a function comp(a, b) (compSame(a, b) in Clojure)
that checks whether the two arrays have the "same" elements, with the same
multiplicities. "Same" means, here, that the elements in b are the elements i
a squared, regardless of the order.
Examples
Valid arrays
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [121, 14641, 20736, 361, 25921, 361, 20736, 361]
comp(a, b) returns true because in b 121 is the square of 11, 14641 is the
square of 121, 20736 the square of 144, 361 the square of 19, 25921 the
square of 161, and so on. It gets obvious if we write b's elements in terms of
squares:
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [11*11, 121*121, 144*144, 19*19, 161*161, 19*19, 144*144, 19*19]
Invalid arrays
If we change the first number to something else, comp may not return true
anymore:
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [132, 14641, 20736, 361, 25921, 361, 20736, 361]
comp(a,b) returns false because in b 132 is not the square of any number of a.
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [121, 14641, 20736, 36100, 25921, 361, 20736, 361]
comp(a,b) returns false because in b 36100 is not the square of any number of a.
Remarks
- a or b might be [] (all languages except R, Shell). a or b might be nil or null
or None or nothing (except in Haskell, Elixir, C++, Rust, R, Shell, PureScript).
- If a or b are nil (or null or None), the problem doesn't make sense so return false.
- If a or b are empty then the result is self-evident.
- a or b are empty or not empty lists.
"""
def comp(array1, array2):
# your code
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.