commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
23c09555221b3f7500a4c658452c9c0cb223799c | Add evaluation using random forest | glrs/StackedDAE,glrs/StackedDAE | Train_SDAE/tools/evaluate_model.py | Train_SDAE/tools/evaluate_model.py | import numpy as np
# import pandas as pd
# import sys
from scipy.special import expit
from sklearn import ensemble
def get_activations(exp_data, w, b):
exp_data = np.transpose(exp_data)
prod = exp_data.dot(w)
prod_with_bias = prod + b
return( expit(prod_with_bias) )
# Order of *args: first all the weights and then all the biases
def run_random_forest(nHLayers, exp_data, labels, *args):
print len(args[0]), len(args[0][0]), len(args[0][1])
print len(args[0][2])
print "NewLine!\n", len(args[0][3])
print "NewLine!\n", len(args[0][4])
assert len(exp_data) == len(labels)
# I think they should be already transposed when running the code. Will see
act = exp_data#.T
for i in range(nHLayers):
print('Weights and biases for layer: ' + str(i+1))
print np.asarray(args[0][i]).shape, np.asarray(args[0][nHLayers + i]).shape
act = get_activations(act.T, args[0][i], args[0][nHLayers + i])
rf = ensemble.RandomForestClassifier(n_estimators=1000, oob_score=True, max_depth=5)
rfit = rf.fit(act, labels)
print('OOB score: %.2f\n' % rfit.oob_score_)
| apache-2.0 | Python |
|
009df3372804fa946b7e1bd4c0827e887b964b38 | Convert blogger to simple xml | progrn/csb | convert.py | convert.py | from bs4 import BeautifulSoup
import io
import markdown2
import time
import codecs
file = io.open("Import/blog-03-03-2013.xml")
file_contents = file.read(-1)
#lxml xpath doesn't seem to understand blogger export
soup = BeautifulSoup(file_contents)
entries = soup("entry")
count = 0
def formatTime(timefield):
time_obj = time.strptime(entry(timefield)[0].string[0:16], "%Y-%m-%dT%H:%M")
return time.strftime("%Y%m%d%H%M%S", time_obj)
for entry in entries:
categories = entry("category")
tags = []
post = False
for category in categories:
if category["term"] == "http://schemas.google.com/blogger/2008/kind#post":
post = True
if category["scheme"] == "http://www.blogger.com/atom/ns#" and category["term"]:
tags.append(category["term"])
if post:
pub = formatTime("published")
updated = formatTime("updated")
filename_xml = "%s.blogger.xml" % pub
title = entry("title")[0].string
content = entry("content")[0].string
blog_file = io.open("Export/" + filename_xml, "w")
blog_file.write("<blog>\n\t<title>%s</title>\n\t<content><![CDATA[%s]]></content>\n</blog>" % (title, content))
blog_file.close()
count += 1
print "Found %d posts" % count
print "done!" | mit | Python |
|
8348ce87a68592e7108c43687ebfdf12684a1914 | Add elementTypes.py file | mhogg/bonemapy | elementTypes.py | elementTypes.py |
class elementC3D10():
def __init__(self):
self.name = 'C3D10'
self.desc = 'Quadratic tetrahedral element'
self.numNodes = 10
self.numIntPnts = 4
self.N = array(self.numNodes)
self.setIpcs()
def setIpcs(self):
alpha = 0.1770833333
beta = 0.4687500000
self.ipcs = numpy.array([[alpha,alpha,alpha],
[beta, alpha,alpha],
[alpha,beta, alpha],
[alpha,alpha,beta ]])
def shapeFunctionMatrix(self,ipc):
g,h,r=ipc
self.N[0] = (2.0*(1.0-g-h-r)-1.0)*(1.0-g-h-r)
self.N[1] = (2.0*g-1.0)*g
self.N[2] = (2.0*h-1.0)*h
self.N[3] = (2.0*r-1.0)*r
self.N[4] = 4.0*(1.0-g-h-r)*g
self.N[5] = 4.0*g*h
self.N[6] = 4.0*(1.0-g-h-r)*h
self.N[7] = 4.0*(1.0-g-h-r)*r
self.N[8] = 4.0*g*r
self.N[9] = 4.0*h*r
def interpFunc(self,nv):
return np.dot(self.N,nv)
class elementC3D4():
def __init__(self):
self.name = 'C3D4'
self.desc = 'Linear tetrahedral element'
self.numNodes = 4
self.numIntPnts = 1
self.N = np.array(self.numNodes)
self.setIpcs()
def setIpcs(self):
alpha = 0.33333 # CHECK THESE VALUES
beta = 0.33333 # CHECK THESE VALUES
self.ipcs = np.array([[],[],[]])
def shapeFuncMatrix(self,ipc):
g,h,r=ipc
self.N[0] = (1.0-g-h-r)
self.N[1] = g
self.N[2] = h
self.N[3] = r
def interpFunc(self,nv):
return np.dot(self.N,nv)
| mit | Python |
|
e789fb7246e7b926841f2d2912896fd0a0d14518 | Create login_portal.py | Humoud/Kuniv-Portal-Login | login_portal.py | login_portal.py | from splinter import Browser
print 'Starting...'
browser = Browser('firefox') # using firefox
browser.visit("http://portal.ku.edu.kw/sisapp/faces/login.jspx")
browser.fill('username','xxxxx') # enter student ID
browser.fill('password','yyyyy') # enter password
browser.find_by_id('loginBtn').click() # click login
| mit | Python |
|
82acd4827b2f3f426a6b97f474c54886758cfab7 | add code to update fields | kadrlica/obztak | obztak/scratch/update-fields.py | obztak/scratch/update-fields.py | #!/usr/bin/env python
"""
Update survey fields
"""
__author__ = "Alex Drlica-Wagner"
import copy
import fitsio
import numpy as np
import pylab as plt
import skymap
from obztak.utils import fileio
import obztak.delve
from obztak.delve import DelveFieldArray
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('old')
parser.add_argument('new')
parser.add_argument('-o','--outfile',default='update_target_fields.csv')
args = parser.parse_args()
db = DelveFieldArray.load_database()
old = DelveFieldArray.load(args.old)
new = DelveFieldArray.load(args.new)
print("Running comparing to new fields...")
if len(old) != len(new):
print("Different number of fields")
delve = np.in1d(new.unique_id,db.unique_id)
#done = (new['PRIORITY'] < 0) & (old['PRIORITY'] >= 0)
done = (new['PRIORITY'] < 0) & np.in1d(new.unique_id, old.unique_id[old['PRIORITY'] >= 0])
plt.figure()
smap = skymap.SurveyMcBryde()
smap.draw_fields(new[done & ~delve])
smap.draw_des()
plt.title('New')
plt.show()
# Write here
out = DelveFieldArray.load(args.old)
### There are two ways of doing this that should give the same answers...
print("Running DelveSurvey.update_covered_fields...")
update = obztak.delve.DelveSurvey.update_covered_fields(old)
done = (update['PRIORITY'] < 0) & (old['PRIORITY'] >= 0)
delve = np.in1d(update.unique_id,db.unique_id)
plt.figure()
smap = skymap.SurveyMcBryde()
smap.draw_fields(update[done & ~delve])
#smap.draw_fields(update[done])
plt.title('Update')
print("Writing %s..."%args.outfile)
update.write(args.outfile)
# double check
assert len(fileio.read_csv(args.old)) == len(fileio.read_csv(args.outfile))
print("REMINDER: gzip the output file and move to data directory.")
| mit | Python |
|
7d5dcaa0a72dbdd78e192f082bbdf261de1d8963 | Delete occurrences of an element if it occurs more than n times | SelvorWhim/competitive,SelvorWhim/competitive,SelvorWhim/competitive,SelvorWhim/competitive | Codewars/DeleteOccurrencesOfElementOverNTimes.py | Codewars/DeleteOccurrencesOfElementOverNTimes.py | # implemented with list comprehension with side-effects and a global variable
# there's a simpler way to do it with list appends that's probably no less efficient, since Python arrays are dynamic, but I wanted to try this out instead
from collections import Counter
c = Counter()
# for use in list comprehensions with side effects! Naughty...
def count_and_return(x):
c[x] += 1
return x
def delete_nth(arr,max_e):
if max_e <= 0:
return []
global c
c = Counter()
return [count_and_return(x) for x in arr if c[x] < max_e] # note: condition is evaluated before the function is applied to x, hence < instead of <=
| unlicense | Python |
|
895570ad25b1475c1e9ce85a78f22f268dce8dec | Add visualization script | AquaBSD/libbuhlmann,AquaBSD/libbuhlmann,AquaBSD/libbuhlmann | tools/visoutput.py | tools/visoutput.py | #!/usr/bin/env python
"""
An animated image
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
import sys
d_arr = []
t_arr = []
hs =[]
width = 0.35
maxpressure = 0.0
for line in sys.stdin:
toks = line.split(" ")
t_arr.append(toks[0])
d_arr.append((float(toks[1])-1)*10)
histline = []
histline.append(toks[3]) #1
histline.append(toks[3]) #2
histline.append(toks[5]) #3
histline.append(toks[7]) #4
histline.append(toks[9]) #5
histline.append(toks[11])#6
histline.append(toks[13])#7
histline.append(toks[15])#8
histline.append(toks[17])#9
histline.append(toks[19])#10
histline.append(toks[21])#11
histline.append(toks[23])#12
histline.append(toks[25])#13
histline.append(toks[27])#14
histline.append(toks[29])#15
histline.append(toks[31])#16
if (float(max(histline)) > maxpressure):
maxpressure = float(max(histline))
hs.append(histline)
fig = plt.figure()
fig.add_subplot(121)
l, = plt.plot(t_arr,d_arr)
plt.gca().invert_yaxis()
ax = fig.add_subplot(122)
ax.set_ylim(0.7, maxpressure)
nbComp = np.arange(len(histline))
rect = ax.bar(nbComp,hs[0],width)
ax.set_ylabel('Pressure')
ax.set_title('Pressure by compartment')
ax.set_xticks(nbComp + width)
ax.set_xticklabels(('C01', 'C02', 'C03', 'C04', 'C05','C06','C07','C08','C09','C10','C11','C12'))
axtime = plt.axes([0.2, 0.02, 0.65, 0.03])
stime= Slider(axtime, 'Time', 0, len(hs)-1, valinit=0,valfmt='%d')
def update(val):
time = int(stime.val)
ax.clear()
rect = ax.bar(nbComp,hs[time],width)
ax.set_ylim(0.7, maxpressure)
fig.canvas.draw()
stime.on_changed(update)
# plt.subplot(2, 1, 2)
# plt.plot(t_arr,bc1_arr, label="cmp 1")
# plt.plot(t_arr,bc2_arr, label="cmp 2")
# plt.plot(t_arr,bc3_arr, label="cmp 3")
# plt.plot(t_arr,bc4_arr, label="cmp 4")
# plt.plot(t_arr,bc5_arr, label="cmp 5")
# plt.plot(t_arr,bc6_arr, label="cmp 6")
# plt.plot(t_arr,bc7_arr, label="cmp 7")
# plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show() | isc | Python |
|
1c4adbe07892d95ca6254dcc2e48e11eb2141fa7 | Create pixelconversor.py | ornitorrincco/graphics,ornitorrincco/graphics,ornitorrincco/graphics | Art-2D/pixelconversor.py | Art-2D/pixelconversor.py | //This program rake a image an convert it in 2D pixel art.
| bsd-2-clause | Python |
|
096c8165ec2beacbc4897285b8fed439765d3e01 | Add test on update document title | AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core | test/integration/ggrc/models/test_document.py | test/integration/ggrc/models/test_document.py | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Integration tests for Document"""
from ggrc.models import all_models
from integration.ggrc import TestCase
from integration.ggrc.api_helper import Api
from integration.ggrc.models import factories
class TestDocument(TestCase):
"""Document test cases"""
# pylint: disable=invalid-name
def setUp(self):
super(TestDocument, self).setUp()
self.api = Api()
def test_update_title(self):
"""Test update document title."""
create_title = "test_title"
update_title = "update_test_title"
document = factories.DocumentFactory(title=create_title)
response = self.api.put(document, {"title": update_title})
self.assert200(response)
self.assertEqual(all_models.Document.query.get(document.id).title,
update_title)
| apache-2.0 | Python |
|
41752bfcbc0a1afdf7a0f3caa52285af08d131dd | Create get_var.py | ecoh70/Essential | get_var.py | get_var.py | import parse_expr
variables = {}
def getVar(key):
if key[0] == '%':
return variables[key[1:]]
elif key[-1] in ('+', '-', '/', '*'):
return parse_expr(key)
else:
return key
| bsd-3-clause | Python |
|
e42142498f2ef2b3e78d1becb024441500902a79 | add corruptor | olcf/pcircle,olcf/pcircle,olcf/pcircle,olcf/pcircle,olcf/pcircle | test/corrupt.py | test/corrupt.py | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
import random
if len(sys.argv) != 3 and not sys.argv[2]:
print('''
Usage: corrupt.py filename magic_string
magic_string is what you want to write to the file
it can not be empty and will be randomly placed \n\n''')
sys.exit(1)
size = 0
index = 0
try:
size = os.stat(sys.argv[1]).st_size
except Exception as e:
print(e)
sys.exit(1)
with open(sys.argv[1], "rb+") as f:
index = random.randint(0, size)
f.seek(index)
f.write(sys.argv[2])
print("Corrupted file offset: %s\n" % index)
| apache-2.0 | Python |
|
d2f18cc0992d4d7217583cd2601bc90afaa93a04 | add grain that detects SSDs | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/grains/ssds.py | salt/grains/ssds.py | # -*- coding: utf-8 -*-
'''
Detect SSDs
'''
import os
import salt.utils
import logging
log = logging.getLogger(__name__)
def ssds():
'''
Return list of disk devices that are SSD (non-rotational)
'''
SSDs = []
for subdir, dirs, files in os.walk('/sys/block'):
for dir in dirs:
flagfile = subdir + '/' + dir + '/queue/rotational'
if os.path.isfile(flagfile):
with salt.utils.fopen(flagfile, 'r') as _fp:
flag = _fp.read(1)
if flag == '0':
SSDs.append(dir)
log.info(dir + ' is a SSD')
elif flag == '1':
log.info(dir + ' is no SSD')
else:
log.warning(flagfile + ' does not report 0 or 1')
log.debug(flagfile + ' reports ' + flag)
else:
log.warning(flagfile + ' does not exist for ' + dir)
return {'SSDs': SSDs}
| apache-2.0 | Python |
|
936c2327d6be9da48dfbef47c17167510e9c2262 | Create bzip2.py | vadimkantorov/wigwam | wigs/bzip2.py | wigs/bzip2.py | class bzip2(Wig):
tarball_uri = 'http://www.bzip.org/1.0.6/bzip2-$RELEASE_VERSION$.tar.gz'
last_release_version = 'v1.0.6'
| mit | Python |
|
c2ca8328835d544440fd3b87813e2768ece58685 | Add new package: audacious (#16121) | LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/audacious/package.py | var/spack/repos/builtin/packages/audacious/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Audacious(AutotoolsPackage):
"""A lightweight and versatile audio player."""
homepage = "https://audacious-media-player.org/"
url = "https://github.com/audacious-media-player/audacious/archive/audacious-4.0.2.tar.gz"
version('4.0.2', sha256='92f30a78353c50f99b536061b9d94b6b9128760d546fddbf863e3591c4ac5a8d')
version('4.0.1', sha256='203195cf0d3c2e40d23c9895269ca0ace639c4a2b4dceb624169d75337059985')
version('4.0', sha256='cdfffd0eb966856980328ebb0fff9cbce57f99db9bda15e7e839d26c89e953e6')
version('3.10.1', sha256='c478939b4bcf6704c26eee87d48cab26547e92a83741f437711178c433373fa1')
version('3.10', sha256='82710d6ac90931c2cc4a0f0fcb6380ac21ed42a7a50856d16a67d3179a96e9ae')
depends_on('m4', type='build')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('glib')
depends_on('qt')
def autoreconf(self, spec, prefix):
bash = which('bash')
bash('./autogen.sh')
| lgpl-2.1 | Python |
|
4287d2290c581b907b08efabc1e6bccea4019ac6 | add new package (#15743) | LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack | var/spack/repos/builtin/packages/py-pyface/package.py | var/spack/repos/builtin/packages/py-pyface/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyPyface(PythonPackage):
"""The pyface project contains a toolkit-independent GUI abstraction layer,
which is used to support the "visualization" features of the Traits
package. Thus, you can write code in terms of the Traits API (views, items,
editors, etc.), and let pyface and your selected toolkit and back-end take
care of the details of displaying them."""
homepage = "https://docs.enthought.com/pyface"
url = "https://pypi.io/packages/source/p/pyface/pyface-6.1.2.tar.gz"
version('6.1.2', sha256='7c2ac3d5cbec85e8504b3b0b63e9307be12c6d710b46bae372ce6562d41f4fbc')
variant('backend', default='pyqt5', description='Default backend',
values=('wx', 'pyqt', 'pyqt5', 'pyside'), multi=False)
depends_on('py-setuptools', type='build')
depends_on('py-traits', type=('build', 'run'))
# Backends
depends_on('py-wxpython@2.8.10:', when='backend=wx', type=('build', 'run'))
depends_on('py-numpy', when='backend=wx', type=('build', 'run'))
depends_on('py-pyqt4@4.10:', when='backend=pyqt', type=('build', 'run'))
depends_on('py-pygments', when='backend=pyqt', type=('build', 'run'))
depends_on('py-pyqt5@5:', when='backend=pyqt5', type=('build', 'run'))
depends_on('py-pygments', when='backend=pyqt5', type=('build', 'run'))
depends_on('py-pyside@1.2:', when='backend=pyside', type=('build', 'run'))
depends_on('py-pygments', when='backend=pyside', type=('build', 'run'))
| lgpl-2.1 | Python |
|
be0033ac91c28f3e45eff34c84b7da59d7fcefe2 | add py-ranger package (#3258) | iulian787/spack,mfherbst/spack,EmreAtes/spack,lgarren/spack,LLNL/spack,matthiasdiener/spack,skosukhin/spack,skosukhin/spack,matthiasdiener/spack,matthiasdiener/spack,lgarren/spack,TheTimmy/spack,tmerrick1/spack,iulian787/spack,LLNL/spack,lgarren/spack,krafczyk/spack,matthiasdiener/spack,mfherbst/spack,krafczyk/spack,mfherbst/spack,iulian787/spack,LLNL/spack,iulian787/spack,EmreAtes/spack,lgarren/spack,tmerrick1/spack,skosukhin/spack,LLNL/spack,skosukhin/spack,TheTimmy/spack,lgarren/spack,krafczyk/spack,EmreAtes/spack,iulian787/spack,tmerrick1/spack,tmerrick1/spack,krafczyk/spack,TheTimmy/spack,TheTimmy/spack,mfherbst/spack,TheTimmy/spack,mfherbst/spack,tmerrick1/spack,skosukhin/spack,EmreAtes/spack,LLNL/spack,krafczyk/spack,EmreAtes/spack,matthiasdiener/spack | var/spack/repos/builtin/packages/py-ranger/package.py | var/spack/repos/builtin/packages/py-ranger/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
from spack import *
class PyRanger(PythonPackage):
"""A VIM-inspired filemanager for the console"""
homepage = "http://ranger.nongnu.org/"
url = "https://github.com/ranger/ranger/archive/v1.7.2.tar.gz"
version('1.7.2', '27805c3ab7ec4b129e1b93249506d925')
depends_on('python@2.6:')
| lgpl-2.1 | Python |
|
7e4a62aa483fbadc7089144191e48948f419903b | add setup.py | cloudbearings/vitess,mapbased/vitess,mapbased/vitess,AndyDiamondstein/vitess,kuipertan/vitess,enisoc/vitess,mapbased/vitess,cloudbearings/vitess,fengshao0907/vitess,erzel/vitess,atyenoria/vitess,anusornc/vitess,apmichaud/vitess-apm,AndyDiamondstein/vitess,dumbunny/vitess,anusornc/vitess,guokeno0/vitess,dumbunny/vitess,SDHM/vitess,tirsen/vitess,mlc0202/vitess,alainjobart/vitess,mattharden/vitess,dumbunny/vitess,alainjobart/vitess,tinyspeck/vitess,enisoc/vitess,HeisenbergUncertain/vitess,xgwubin/vitess,vitessio/vitess,davygeek/vitess,rnavarro/vitess,mattharden/vitess,sougou/vitess,cgvarela/vitess,xgwubin/vitess,HubSpot/vitess,mattharden/vitess,kmiku7/vitess-annotated,michael-berlin/vitess,erzel/vitess,guokeno0/vitess,skyportsystems/vitess,guokeno0/vitess,CERN-Stage-3/vitess,applift/vitess,guokeno0/vitess,nurblieh/vitess,atyenoria/vitess,dcadevil/vitess,applift/vitess,pivanof/vitess,mahak/vitess,cgvarela/vitess,mattharden/vitess,dcadevil/vitess,SDHM/vitess,michael-berlin/vitess,kuipertan/vitess,erzel/vitess,yangzhongj/vitess,tjyang/vitess,skyportsystems/vitess,netroby/vitess,apmichaud/vitess-apm,vitessio/vitess,yaoshengzhe/vitess,apmichaud/vitess-apm,rnavarro/vitess,pivanof/vitess,tinyspeck/vitess,CERN-Stage-3/vitess,yaoshengzhe/vitess,mattharden/vitess,netroby/vitess,nurblieh/vitess,xgwubin/vitess,HubSpot/vitess,dumbunny/vitess,AndyDiamondstein/vitess,HubSpot/vitess,guokeno0/vitess,erzel/vitess,michael-berlin/vitess,fengshao0907/vitess,erzel/vitess,erzel/vitess,pivanof/vitess,kuipertan/vitess,alainjobart/vitess,ptomasroos/vitess,vitessio/vitess,nurblieh/vitess,tirsen/vitess,cgvarela/vitess,mattharden/vitess,anusornc/vitess,netroby/vitess,davygeek/vitess,tjyang/vitess,enisoc/vitess,xgwubin/vitess,sougou/vitess,HubSpot/vitess,skyportsystems/vitess,mlc0202/vitess,yangzhongj/vitess,cgvarela/vitess,mapbased/vitess,CERN-Stage-3/vitess,dumbunny/vitess,erzel/vitess,enisoc/vitess,alainjobart/vitess,HeisenbergUncertain/vitess,HeisenbergUncertain/vitess,tirsen/vitess,dcadevil/vitess,netroby/vitess,dumbunny/vitess,tjyang/vitess,yaoshengzhe/vitess,atyenoria/vitess,cloudbearings/vitess,SDHM/vitess,mlc0202/vitess,nurblieh/vitess,kmiku7/vitess-annotated,aaijazi/vitess,yangzhongj/vitess,AndyDiamondstein/vitess,ngaut/vitess,tinyspeck/vitess,sougou/vitess,mapbased/vitess,kmiku7/vitess-annotated,mattharden/vitess,alainjobart/vitess,dcadevil/vitess,ptomasroos/vitess,anusornc/vitess,yangzhongj/vitess,michael-berlin/vitess,sougou/vitess,aaijazi/vitess,aaijazi/vitess,kmiku7/vitess-annotated,AndyDiamondstein/vitess,cgvarela/vitess,ptomasroos/vitess,dumbunny/vitess,tirsen/vitess,tjyang/vitess,cgvarela/vitess,tjyang/vitess,kuipertan/vitess,skyportsystems/vitess,SDHM/vitess,applift/vitess,kmiku7/vitess-annotated,vitessio/vitess,SDHM/vitess,dcadevil/vitess,dumbunny/vitess,mapbased/vitess,mahak/vitess,tirsen/vitess,vitessio/vitess,applift/vitess,mlc0202/vitess,yangzhongj/vitess,yaoshengzhe/vitess,mahak/vitess,mapbased/vitess,rnavarro/vitess,tirsen/vitess,atyenoria/vitess,anusornc/vitess,CERN-Stage-3/vitess,alainjobart/vitess,pivanof/vitess,kmiku7/vitess-annotated,HeisenbergUncertain/vitess,kuipertan/vitess,kuipertan/vitess,skyportsystems/vitess,cgvarela/vitess,rnavarro/vitess,cloudbearings/vitess,rnavarro/vitess,vitessio/vitess,mahak/vitess,SDHM/vitess,applift/vitess,AndyDiamondstein/vitess,enisoc/vitess,xgwubin/vitess,yangzhongj/vitess,applift/vitess,alainjobart/vitess,yaoshengzhe/vitess,xgwubin/vitess,fengshao0907/vitess,vitessio/vitess,fengshao0907/vitess,davygeek/vitess,michael-berlin/vitess,enisoc/vitess,sougou/vitess,aaijazi/vitess,guokeno0/vitess,aaijazi/vitess,mlc0202/vitess,pivanof/vitess,guokeno0/vitess,aaijazi/vitess,ngaut/vitess,alainjobart/vitess,mlc0202/vitess,fengshao0907/vitess,tirsen/vitess,aaijazi/vitess,mahak/vitess,HubSpot/vitess,nurblieh/vitess,ptomasroos/vitess,ptomasroos/vitess,dcadevil/vitess,nurblieh/vitess,kmiku7/vitess-annotated,cgvarela/vitess,xgwubin/vitess,HubSpot/vitess,enisoc/vitess,ngaut/vitess,aaijazi/vitess,mattharden/vitess,ptomasroos/vitess,fengshao0907/vitess,applift/vitess,dcadevil/vitess,anusornc/vitess,tinyspeck/vitess,rnavarro/vitess,nurblieh/vitess,skyportsystems/vitess,yaoshengzhe/vitess,netroby/vitess,tirsen/vitess,cloudbearings/vitess,pivanof/vitess,fengshao0907/vitess,mattharden/vitess,guokeno0/vitess,netroby/vitess,ngaut/vitess,nurblieh/vitess,aaijazi/vitess,tjyang/vitess,HubSpot/vitess,skyportsystems/vitess,SDHM/vitess,netroby/vitess,tinyspeck/vitess,mahak/vitess,xgwubin/vitess,erzel/vitess,anusornc/vitess,michael-berlin/vitess,vitessio/vitess,tjyang/vitess,applift/vitess,mahak/vitess,kmiku7/vitess-annotated,rnavarro/vitess,ptomasroos/vitess,yangzhongj/vitess,AndyDiamondstein/vitess,mapbased/vitess,sougou/vitess,kuipertan/vitess,rnavarro/vitess,atyenoria/vitess,ptomasroos/vitess,mlc0202/vitess,yaoshengzhe/vitess,atyenoria/vitess,AndyDiamondstein/vitess,davygeek/vitess,yaoshengzhe/vitess,apmichaud/vitess-apm,mahak/vitess,tjyang/vitess,sougou/vitess,mapbased/vitess,kuipertan/vitess,pivanof/vitess,xgwubin/vitess,tirsen/vitess,mapbased/vitess,michael-berlin/vitess,atyenoria/vitess,pivanof/vitess,erzel/vitess,mlc0202/vitess,HubSpot/vitess,erzel/vitess,netroby/vitess,applift/vitess,atyenoria/vitess,anusornc/vitess,anusornc/vitess,yangzhongj/vitess,ptomasroos/vitess,SDHM/vitess,davygeek/vitess,AndyDiamondstein/vitess,davygeek/vitess,tinyspeck/vitess,netroby/vitess,cgvarela/vitess,mlc0202/vitess,skyportsystems/vitess,cloudbearings/vitess,fengshao0907/vitess,ngaut/vitess,davygeek/vitess,fengshao0907/vitess,yangzhongj/vitess,cloudbearings/vitess,dumbunny/vitess,nurblieh/vitess,sougou/vitess,guokeno0/vitess,michael-berlin/vitess,kmiku7/vitess-annotated,skyportsystems/vitess,yaoshengzhe/vitess,apmichaud/vitess-apm,applift/vitess,cloudbearings/vitess,pivanof/vitess,cloudbearings/vitess,mattharden/vitess,SDHM/vitess,rnavarro/vitess,kuipertan/vitess,atyenoria/vitess,CERN-Stage-3/vitess,dumbunny/vitess,michael-berlin/vitess,tjyang/vitess,tinyspeck/vitess | py/setup.py | py/setup.py | #!/usr/bin/env python
# vim: set fileencoding=utf8 shiftwidth=4 tabstop=4 textwidth=80 foldmethod=marker :
# Copyright (c) 2010, Kou Man Tong. All rights reserved.
# For licensing, see LICENSE file included in the package.
from distutils.core import setup
setup(name = "vtdb",
packages=["vtdb", "net"],
platforms = "Any",
)
| apache-2.0 | Python |
|
a8f1529f6c077c0d70ccb326da6e63f3dd78ec76 | move kernel sanitization to separate script | rice-solar-physics/hot_plasma_single_nanoflares,rice-solar-physics/hot_plasma_single_nanoflares,rice-solar-physics/hot_plasma_single_nanoflares | sanitize_kernels.py | sanitize_kernels.py | import glob
import nbformat
#sanitize kernelspec
notebooks = glob.glob("notebooks/*.ipynb")
old_envs = {}
for nb in notebooks:
tmp = nbformat.read(nb,4)
old_envs[nb] = tmp['metadata']['kernelspec']['name']
tmp['metadata']['kernelspec']['name'] = 'python2'
nbformat.write(tmp,nb)
#revert kernelspec
#for k in old_envs:
# tmp = nbformat.read(k,4)
# tmp['metadata']['kernelspec']['name'] = old_envs[k]
# nbformat.write(tmp,k)
| bsd-2-clause | Python |
|
9b4f18dbf63a76bd2c0723677fb0d0215831324a | Create __init__.py | gappleto97/Senior-Project | ext/__init__.py | ext/__init__.py | mit | Python |
||
52e282b8c51c71db61cb0163df02caf2dce63b45 | add pretty function repr extension | minrk/ipython_extensions,minrk/ipython_extensions,danielballan/ipython_extensions,NunoEdgarGub1/ipython_extensions,NunoEdgarGub1/ipython_extensions,minrk/ipython_extensions,danielballan/ipython_extensions,dekstop/ipython_extensions,dekstop/ipython_extensions,danielballan/ipython_extensions,dekstop/ipython_extensions,NunoEdgarGub1/ipython_extensions | extensions/pretty_func_repr.py | extensions/pretty_func_repr.py | """
Trigger pinfo (??) to compute text reprs of functions, etc.
Requested by @katyhuff
"""
import types
from IPython import get_ipython
def pinfo_function(obj, p, cycle):
"""Call the same code as `foo?` to compute reprs of functions
Parameters
----------
obj:
The object being formatted
p:
The pretty formatter instance
cycle:
Whether a cycle has been detected (unused)
"""
text = get_ipython().inspector._format_info(obj, detail_level=1)
p.text(text)
_save_types = {}
def load_ipython_extension(ip):
"""register pinfo_function as the custom plain-text repr for funtion types"""
pprinter = ip.display_formatter.formatters['text/plain']
for t in (types.FunctionType,
types.BuiltinMethodType,
types.BuiltinFunctionType):
f = pprinter.for_type(t, pinfo_function)
_save_types[t] = f
def unload_ipython_extension(ip):
"""unregister pinfo_function"""
pprinter = ip.display_formatter.formatters['text/plain']
for t, f in _save_types.items():
pprinter.for_type(t, f)
_save_types.clear()
| bsd-3-clause | Python |
|
48ee097349b4315b9f3c726b734aa20e878b2288 | Add binary-numbers-small resource | uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged | csunplugged/resources/views/binary_cards_small.py | csunplugged/resources/views/binary_cards_small.py | """Module for generating Binary Cards (Small) resource."""
import os.path
from PIL import Image, ImageDraw, ImageFont
from utils.retrieve_query_parameter import retrieve_query_parameter
def resource_image(request, resource):
"""Create a image for Binary Cards (Small) resource.
Args:
request: HTTP request object.
resource: Object of resource data.
Returns:
A list of Pillow image objects.
"""
BASE_IMAGE_PATH = "static/img/resources/binary-cards-small/"
IMAGE_SIZE_X = 2480
IMAGE_SIZE_Y = 3044
IMAGE_DATA = [
("binary-cards-small-1.png", 4),
("binary-cards-small-2.png", 8),
("binary-cards-small-3.png", 12),
]
# Retrieve parameters
requested_bits = retrieve_query_parameter(request, "number_bits", ["4", "8", "12"])
dot_counts = retrieve_query_parameter(request, "dot_counts", ["yes", "no"])
black_back = retrieve_query_parameter(request, "black_back", ["yes", "no"])
if dot_counts == "yes":
font_path = "static/fonts/PatrickHand-Regular.ttf"
font = ImageFont.truetype(font_path, 200)
TEXT_COORDS = [
(525, 1341),
(1589, 1341),
(525, 2889),
(1589, 2889),
]
images = []
for (image_path, image_bits) in IMAGE_DATA:
requested_bits = int(requested_bits)
if image_bits <= requested_bits:
image = Image.open(os.path.join(BASE_IMAGE_PATH, image_path))
if dot_counts == "yes":
draw = ImageDraw.Draw(image)
for number in range(image_bits - 4, image_bits):
text = str(pow(2, number))
text_width, text_height = draw.textsize(text, font=font)
coord_x = TEXT_COORDS[number % 4][0] - (text_width / 2)
coord_y = TEXT_COORDS[number % 4][1] - (text_height / 2)
draw.text(
(coord_x, coord_y),
text,
font=font,
fill="#000"
)
images.append(image)
if black_back == "yes":
black_card = Image.new("1", (IMAGE_SIZE_X, IMAGE_SIZE_Y))
images.append(black_card)
return images
def subtitle(request, resource):
"""Return the subtitle string of the resource.
Used after the resource name in the filename, and
also on the resource image.
Args:
request: HTTP request object
resource: Object of resource data.
Returns:
text for subtitle (string)
"""
if retrieve_query_parameter(request, "dot_counts") == "yes":
display_numbers_text = "with dot counts"
else:
display_numbers_text = "without dot counts"
if retrieve_query_parameter(request, "black_back") == "yes":
black_back_text = "with black back"
else:
black_back_text = "without black back"
text = "{} bits - {} - {} - {}".format(
retrieve_query_parameter(request, "number_bits"),
display_numbers_text,
black_back_text,
retrieve_query_parameter(request, "paper_size")
)
return text
def valid_options():
"""Provide dictionary of all valid parameters.
This excludes the header text parameter.
Returns:
All valid options (dict).
"""
valid_options = {
"number_bits": ["4", "8", "12"],
"dot_counts": ["yes", "no"],
"black_back": ["yes", "no"],
"paper_size": ["a4", "letter"],
}
return valid_options
| mit | Python |
|
864bf2bb3bdb731d0725cc33891145f2a7da17d3 | Add initialization functions for database connection | leaffan/pynhldb | db/common.py | db/common.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from contextlib import contextmanager
from sqlalchemy import create_engine
from sqlalchemy.orm.session import sessionmaker
from sqlalchemy.schema import MetaData
from sqlalchemy.ext.declarative import declarative_base
from utils import get_connection_string_from_config_file
cfg_src = os.path.join(os.path.dirname(__file__), "..", r"_config.ini")
conn_string = get_connection_string_from_config_file(cfg_src, 'db_conn_3')
Engine = create_engine(conn_string, echo=False, pool_size=5)
Session = sessionmaker(bind=Engine)
Base = declarative_base(metadata=MetaData(schema='nhl', bind=Engine))
@contextmanager
def session_scope():
session = Session()
try:
yield session
except:
session.rollback()
raise
finally:
session.close()
| mit | Python |
|
ba0e1d90f5f33ed63c56c2788873624731a7a0b5 | add file | zlvb/regx | regxtest.py | regxtest.py | '''
((abc){4})
[1-5]{5}
5+
5*
5?
'''
EQUL = 1
COUNT = 2
ANY = 3
TREE = 4
class Node
def __init__(self, ntype, parent = None):
self.type = ntype
self.c = None
self.children = []
self.parent = parent
class RegX:
def __init__(self, regstr):
self.curnode = Node(TREE)
self.tokens = self.curnode.children
self.parseregx(regstr)
def parseany(self, regstr):
def parseregx(self, regstr, idx = 0):
regstr_len = len(regstr)
while True:
if regstr[idx] == '[':
newnode = Node(ANY, self.curnode)
self.tokens.append(newnode)
idx = self.parseany(regstr, idx)
elif regstr[idx] == '{':
newnode = Node(COUNT, self.curnode)
self.tokens.insert(-1, newnode)
idx+=1
elif regstr[idx] == '(':
newnode = Node(TREE, self.curnode)
self.curnode = newnode
self.tokens = newnode.children
parseregx(regstr, idx)
elif regstr[idx] == ')':
self.curnode = self.curnode.parent
self.tokens = self.curnode.children
idx+=1
elif regstr[idx] == '?':
newnode = Node(COUNT, self.curnode)
newnode.c = regstr[idx]
self.tokens.insert(-1, newnode)
idx+=1
elif regstr[idx] == '+':
newnode = Node(COUNT, self.curnode)
newnode.c = regstr[idx]
self.tokens.insert(-1, newnode)
idx+=1
elif regstr[idx] == '*':
newnode = Node(COUNT, self.curnode)
newnode.c = regstr[idx]
self.tokens.insert(-1, newnode)
idx+=1
elif regstr[idx] == '.':
pass
elif:
pass | mit | Python |
|
11380e7db081960757cbde2c4d2e69b695648782 | Add routine to calculate density. | lauralwatkins/genhernquist | density.py | density.py | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.DENSITY
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
def density(r, norm, rs, alpha, beta, gamma):
"""
Density profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
norm : normalisation (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
rho = norm*(r/rs)**(-gamma)*(1+(r/rs)**alpha)**((gamma-beta)/alpha)
return rho
| bsd-2-clause | Python |
|
3fb3662e58e35ccb283074c1078e1c9e7aaf88ed | Add live test for session | jgillick/LendingClub,carlosnasillo/LendingClub,jgillick/LendingClub,carlosnasillo/LendingClub | LendingClub/tests/live_session_test.py | LendingClub/tests/live_session_test.py | #!/usr/bin/env python
import sys
import unittest
import getpass
from logger import TestLogger
sys.path.insert(0, '.')
sys.path.insert(0, '../')
sys.path.insert(0, '../../')
from LendingClub import session
class LiveTestSession(unittest.TestCase):
http = None
session = None
logger = None
def setUp(self):
self.logger = TestLogger()
self.session = session.Session(logger=self.logger)
def tearDown(self):
pass
def test_login(self):
""" test_valid_login
Test login with credentials from the user
"""
print '\n\nEnter a valid LendingClub account information...'
email = raw_input('Email:')
password = getpass.getpass()
self.assertTrue(self.session.authenticate(email, password))
print 'Authentication successful'
def test_invalid_login(self):
""" test_invalid_password
Test login with the wrong password
"""
self.assertRaises(
session.AuthenticationError,
lambda: self.session.authenticate('test@test.com', 'wrongsecret')
)
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
9e4858e652fba57f767a9c6d921853a6487301bd | Add a test for the version string parsing code | twisted/epsilon | epsilon/test/test_version.py | epsilon/test/test_version.py | """
Tests for turning simple version strings into twisted.python.versions.Version
objects.
"""
from epsilon import asTwistedVersion
from twisted.trial.unittest import SynchronousTestCase
class AsTwistedVersionTests(SynchronousTestCase):
def test_simple(self):
"""
A simple version string can be turned into a Version object.
"""
version = asTwistedVersion("package", "1.2.3")
self.assertEqual(version.package, "package")
self.assertEqual(version.major, 1)
self.assertEqual(version.minor, 2)
self.assertEqual(version.micro, 3)
| mit | Python |
|
dff8d43edd0e831605f1b1c3b2d261fcf05dca9a | Add wordpress guid replace script | greyia/misc | script/wordpress/guid.py | script/wordpress/guid.py | import MySQLdb
import urlparse
poe = "https://wordpress.wordpress"
db = MySQLdb.connect(db="wordpress",user="",passwd="")
c = db.cursor()
sql = "SELECT ID,guid from wp_posts;"
c.execute(sql)
records = c.fetchall()
for record in records:
o = urlparse.urlparse(record[1])
url = poe + o.path
if o.query:
url = url + "?" + o.query
print "UPDATE wp_posts SET guid ='" + url + "' where ID = '" + str(record[0]) + "';"
| mit | Python |
|
c48ec87b3e1c672864fc8c5bfe1aa551c01846ee | add basic tcp server | mrtazz/admiral,mrtazz/admiral | Server.py | Server.py | """
File: Server.py
Author: Daniel Schauenberg <schauend@informatik.uni-freiburg.de>
Description: class for implementing a search engine web server
"""
import socket
from operator import itemgetter
class Webserver:
""" class for implementing a web server, serving the
inverted index search engine to the outside
(or inside) world
"""
def __init__(self, host='', port=3366):
""" constructor method to set the webserver basic settings
"""
self.host = host
self.port = port
self.socket = None
def bind_to_port(self):
""" simple method to make the port binding easier
"""
self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.socket.bind((self.host,self.port))
# number of queued connections
self.socket.listen(3)
# create endless loop waiting for connections
# can be interrupted via CTRL-C
try:
while True:
# get socket object and client address
connection, clientsock = self.socket.accept()
print "Client %s connected with port %s." % (itemgetter(0)(clientsock),itemgetter(1)(clientsock))
while True:
data = connection.recv(8192)
if not data: break
#connection.sendall(data)
print data
connection.close()
print clientaddr
finally:
# don't leave socket open when going home
self.socket.close()
def main():
foo = Webserver()
foo.bind_to_port()
if __name__ == '__main__':
main()
| mit | Python |
|
94403aedd21947c30b5d8159fcd42288050afc3a | Create 6kyu_personalized_brand_list.py | Orange9000/Codewars,Orange9000/Codewars | Solutions/6kyu/6kyu_personalized_brand_list.py | Solutions/6kyu/6kyu_personalized_brand_list.py | from collections import OrderedDict
def sorted_brands(history):
poplr=OrderedDict()
for i in history:
try: poplr[i['brand']]+=1
except: poplr[i['brand']]=1
return sorted(poplr.keys(), key=lambda x: poplr[x], reverse=1)
| mit | Python |
|
48cac034e7b402e2d4b3cb52d2cae51b44928e0b | add Faster R-CNN | yuyu2172/chainercv,yuyu2172/chainercv,pfnet/chainercv,chainer/chainercv,chainer/chainercv | examples/faster_rcnn/eval.py | examples/faster_rcnn/eval.py | from __future__ import division
import argparse
import sys
import time
import chainer
from chainer import iterators
from chainercv.datasets import voc_detection_label_names
from chainercv.datasets import VOCDetectionDataset
from chainercv.evaluations import eval_detection_voc
from chainercv.links import FasterRCNNVGG16
from chainercv.utils import apply_detection_link
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--batchsize', type=int, default=32)
args = parser.parse_args()
model = FasterRCNNVGG16(pretrained_model='voc07')
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
model.use_preset('evaluate')
dataset = VOCDetectionDataset(
year='2007', split='test', use_difficult=True, return_difficult=True)
iterator = iterators.SerialIterator(
dataset, args.batchsize, repeat=False, shuffle=False)
start_time = time.time()
processed = 0
def hook(
pred_bboxes, pred_labels, pred_scores, gt_values):
global processed
processed += len(pred_bboxes)
fps = len(processed) / (time.time() - start_time)
sys.stdout.write(
'\r{:d} of {:d} images, {:.2f} FPS'.format(
len(processed), len(dataset), fps))
sys.stdout.flush()
pred_bboxes, pred_labels, pred_scores, gt_values = \
apply_detection_link(model, iterator, hook=hook)
gt_bboxes, gt_labels, gt_difficults = gt_values
eval_ = eval_detection_voc(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
use_07_metric=True)
print()
print('mAP: {:f}'.format(eval_['map']))
for l, name in enumerate(voc_detection_label_names):
if l in eval_:
print('{:s}: {:f}'.format(name, eval_[l]['ap']))
else:
print('{:s}: -'.format(name))
if __name__ == '__main__':
main()
| mit | Python |
|
874e2c35bb0aea38a1161d96b8af484a69336ea6 | Add htpasswd.py to the contrib tree as it may be useful more generally than just for the Testing branch | rbaumg/trac,rbaumg/trac,rbaumg/trac,rbaumg/trac | contrib/htpasswd.py | contrib/htpasswd.py | #!/usr/bin/python
"""Replacement for htpasswd"""
import os
import random
try:
import crypt
except ImportError:
import fcrypt as crypt
from optparse import OptionParser
def salt():
"""Returns a string of 2 randome letters"""
# FIXME: Additional characters may be legal here.
letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
return random.choice(letters) + random.choice(letters)
class HtpasswdFile:
def __init__(self, filename, create=False):
self.entries = []
self.filename = filename
if not create:
if os.path.exists(self.filename):
self.load()
else:
raise Exception("%s does not exist" % self.filename)
def load(self):
lines = open(self.filename, 'r').readlines()
self.entries = []
for line in lines:
username, hash = line.split(':')
entry = [username, hash.rstrip()]
self.entries.append(entry)
def save(self):
open(self.filename, 'w').writelines(["%s:%s\n" % (entry[0], entry[1]) for entry in self.entries])
def update(self, username, password):
hash = crypt.crypt(password, salt())
matching_entries = [entry for entry in self.entries if entry[0] == username]
if matching_entries:
matching_entries[0][1] = hash
else:
self.entries.append([username, hash])
def delete(self, username):
self.entries = [entry for entry in self.entries if entry[0] != username]
def main():
"""%prog [-c] -b filename username password
Create or update an htpasswd file"""
# For now, we only care about the use cases that affect tests/functional.py
parser = OptionParser(usage=main.__doc__)
parser.add_option('-b', action='store_true', dest='batch', default=False,
help='Batch mode; password is passed on the command line IN THE CLEAR.')
parser.add_option('-c', action='store_true', dest='create', default=False,
help='Create a new htpasswd file, overwriting any existing file.')
parser.add_option('-D', action='store_true', dest='delete_user', default=False,
help='Remove the given user from the password file.')
options, args = parser.parse_args()
assert(options.batch) # We only support batch mode for now.
# Non-option arguments
filename, username = args[:2]
if options.delete_user:
password = None
else:
password = args[2]
passwdfile = HtpasswdFile(filename, create=options.create)
if options.delete_user:
passwdfile.delete(username)
else:
passwdfile.update(username, password)
passwdfile.save()
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
|
88eb8887bd71702fbf0c5095d8c2d637876de4b8 | Add the upload_file_test | seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase | examples/upload_file_test.py | examples/upload_file_test.py | from seleniumbase import BaseCase
class FileUploadButtonTests(BaseCase):
""" The main purpose of this is to test the self.choose_file() method. """
def test_file_upload_button(self):
self.open("https://www.w3schools.com/jsref/tryit.asp"
"?filename=tryjsref_fileupload_get")
self.wait_for_element('[id*="google_ads"]')
self.remove_elements('[id*="google_ads"]')
self.switch_to_frame('iframeResult')
self.add_css_style(
'input[type="file"]{zoom: 1.5;-moz-transform: scale(1.5);}')
self.highlight('input[type="file"]')
self.choose_file('input[type="file"]', "example_logs/screenshot.png")
self.demo_mode = True # Adds highlighting to the assert statement
self.assert_element('input[type="file"]')
| mit | Python |
|
a98ba6efa109383ecc1dfeb07691dc0a4a4e2a5b | Update migrations | hobarrera/django-afip,hobarrera/django-afip | django_afip/migrations/0002_auto_20150909_1837.py | django_afip/migrations/0002_auto_20150909_1837.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('afip', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='tax',
name='amount',
field=models.DecimalField(decimal_places=2, max_digits=15, verbose_name='cantidad'),
),
migrations.AlterField(
model_name='vat',
name='amount',
field=models.DecimalField(decimal_places=2, max_digits=15, verbose_name='cantidad'),
),
]
| isc | Python |
|
9668580633a1a8baaa59030e5a52d2478222cbd2 | Add cost tracking file to openstack | opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor | nodeconductor/openstack/cost_tracking.py | nodeconductor/openstack/cost_tracking.py | from . import models
from nodeconductor.cost_tracking import CostTrackingBackend
class OpenStackCostTrackingBackend(CostTrackingBackend):
@classmethod
def get_monthly_cost_estimate(cls, resource):
backend = resource.get_backend()
return backend.get_monthly_cost_estimate()
| mit | Python |
|
2dfa68eb458cfc7d6166ede8a222b1d11b9577a0 | Create grabscreen.py | Sentdex/pygta5 | grabscreen.py | grabscreen.py | # Done by Frannecklp
import cv2
import numpy as np
import win32gui, win32ui, win32con, win32api
def grab_screen(region=None):
hwin = win32gui.GetDesktopWindow()
if region:
left,top,x2,y2 = region
width = x2 - left + 1
height = y2 - top + 1
else:
width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)
left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
hwindc = win32gui.GetWindowDC(hwin)
srcdc = win32ui.CreateDCFromHandle(hwindc)
memdc = srcdc.CreateCompatibleDC()
bmp = win32ui.CreateBitmap()
bmp.CreateCompatibleBitmap(srcdc, width, height)
memdc.SelectObject(bmp)
memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)
signedIntsArray = bmp.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (height,width,4)
srcdc.DeleteDC()
memdc.DeleteDC()
win32gui.ReleaseDC(hwin, hwindc)
win32gui.DeleteObject(bmp.GetHandle())
return cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
| mit | Python |
|
0486e02bbaefea63a2dff9983be51623a184dc66 | test python interpreter | google-code-export/los-cocos,google-code-export/los-cocos | test/test_interpreter_layer.py | test/test_interpreter_layer.py | # This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
import pyglet
if __name__ == "__main__":
director.init()
interpreter_layer = cocos.layer.InterpreterLayer()
main_scene = cocos.scene.Scene(interpreter_layer)
director.run(main_scene)
| bsd-3-clause | Python |
|
76baf574ba5a4ff9e835412e27fd2ebc634a9992 | add Cython register test | jck/pymtl,Glyfina-Fernando/pymtl,tj93/pymtl,tj93/pymtl,12yujim/pymtl,cornell-brg/pymtl,jjffryan/pymtl,12yujim/pymtl,tj93/pymtl,12yujim/pymtl,Glyfina-Fernando/pymtl,cfelton/pymtl,jjffryan/pymtl,cfelton/pymtl,jjffryan/pymtl,jck/pymtl,cornell-brg/pymtl,jck/pymtl,Glyfina-Fernando/pymtl,cfelton/pymtl,cornell-brg/pymtl | new_pymtl/translation_tools/verilator_sim_test.py | new_pymtl/translation_tools/verilator_sim_test.py | from verilator_sim import get_verilated
from new_pmlib.regs import Reg
from new_pymtl import SimulationTool
def test_reg():
model = Reg(16)
print "BEGIN"
vmodel = get_verilated( model )
print "END"
vmodel.elaborate()
sim = SimulationTool( vmodel )
sim.reset()
assert vmodel.out == 0
vmodel.in_.value = 10
sim.cycle()
assert vmodel.out == 10
vmodel.in_.value = 12
assert vmodel.out == 10
sim.cycle()
assert vmodel.out == 12
| bsd-3-clause | Python |
|
214aa96b5e816ad6386fc20fed684152ac8181d1 | add migration for ip to generic ip field change | byteweaver/django-newsletters | newsletters/migrations/0003_auto_20150701_1840.py | newsletters/migrations/0003_auto_20150701_1840.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('newsletters', '0002_auto_20150630_0009'),
]
operations = [
migrations.AlterField(
model_name='subscription',
name='ip',
field=models.GenericIPAddressField(),
),
]
| bsd-3-clause | Python |
|
f90a9e585b5de36b3abc11cf454cde75a44a1a6b | Include Overlay Utils | MarvinTeichmann/KittiSeg | evaluation/overlay_utils.py | evaluation/overlay_utils.py | #!/usr/bin/env python
"""Utility functions for segmentation tasks."""
from PIL import Image
import scipy.ndimage
import numpy as np
def replace_colors(segmentation, color_changes):
"""
Replace the values in segmentation to the values defined in color_changes.
Parameters
----------
segmentation : numpy array
Two dimensional
color_changes : dict
The key is the original color, the value is the color to change to.
The key 'default' is used when the color is not in the dict.
If default is not defined, no replacement is done.
Each color has to be a tuple (r, g, b) with r, g, b in {0, 1, ..., 255}
Returns
-------
np.array
The new colored segmentation
"""
width, height = segmentation.shape
output = scipy.misc.toimage(segmentation)
output = output.convert('RGBA')
for x in range(0, width):
for y in range(0, height):
if segmentation[x, y] in color_changes:
output.putpixel((y, x), color_changes[segmentation[x, y]])
elif 'default' in color_changes:
output.putpixel((y, x), color_changes['default'])
return output
def overlay_segmentation(image, segmentation, color_dict):
"""
Overlay original_image with segmentation_image.
Parameters
----------
"""
width, height = segmentation.shape
output = scipy.misc.toimage(segmentation)
output = output.convert('RGBA')
for x in range(0, width):
for y in range(0, height):
if segmentation[x, y] in color_dict:
output.putpixel((y, x), color_dict[segmentation[x, y]])
elif 'default' in color_dict:
output.putpixel((y, x), color_dict['default'])
background = scipy.misc.toimage(image)
background.paste(output, box=None, mask=output)
return np.array(background)
| mit | Python |
|
c37452e7cd4401bd7cbb8e855af65c26d730187c | add web_utl for crawler | PegasusWang/wechannel,PegasusWang/wechannel,PegasusWang/wechannel,PegasusWang/wechannel | crawler/web_util.py | crawler/web_util.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
chrome有个功能,对于请求可以直接右键copy as curl,然后在命令行里边用curl
模拟发送请求。现在需要把此curl字符串处理成requests库可以传入的参数格式,
http://stackoverflow.com/questions/23118249/whats-the-difference-between-request-payload-vs-form-data-as-seen-in-chrome
"""
import re
from functools import wraps
import traceback
import requests
def encode_to_dict(encoded_str):
""" 将encode后的数据拆成dict
>>> encode_to_dict('name=foo')
{'name': foo'}
>>> encode_to_dict('name=foo&val=bar')
{'name': 'foo', 'val': 'var'}
"""
pair_list = encoded_str.split('&')
d = {}
for pair in pair_list:
if pair:
key = pair.split('=')[0]
val = pair.split('=')[1]
d[key] = val
return d
def parse_curl_str(s):
"""convert chrome curl string to url, headers dict and data"""
pat = re.compile("'(.*?)'")
str_list = [i.strip() for i in re.split(pat, s)] # 拆分curl请求字符串
url = ''
headers = {}
data = ''
for i in range(0, len(str_list)-1, 2):
arg = str_list[i]
string = str_list[i+1]
if arg.startswith('curl'):
url = string
elif arg.startswith('-H'):
header_key = string.split(':', 1)[0].strip()
header_val = string.split(':', 1)[1].strip()
headers[header_key] = header_val
elif arg.startswith('--data'):
data = string
return url, headers, data
def retry(retries=3):
"""一个失败请求重试,或者使用下边这个功能强大的retrying
pip install retrying
https://github.com/rholder/retrying
:param retries: number int of retry times.
"""
def _retry(func):
@wraps(func)
def _wrapper(*args, **kwargs):
index = 0
while index < retries:
index += 1
try:
response = func(*args, **kwargs)
if response.status_code == 404:
print(404)
break
elif response.status_code != 200:
print(response.status_code)
continue
else:
break
except Exception as e:
traceback.print_exc()
response = None
return response
return _wrapper
return _retry
_get = requests.get
@retry(5)
def get(*args, **kwds):
if 'timeout' not in kwds:
kwds['timeout'] = 10
if 'headers' not in kwds:
headers = {
'User-Agent': 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
}
kwds['headers'] = headers
return _get(*args, **kwds)
requests.get = get
| mit | Python |
|
a12dd320df30404df8c8ec196e21067376cc1e2c | Add tests of table and column pickling | mhvk/astropy,astropy/astropy,astropy/astropy,larrybradley/astropy,funbaker/astropy,joergdietrich/astropy,joergdietrich/astropy,dhomeier/astropy,larrybradley/astropy,pllim/astropy,MSeifert04/astropy,pllim/astropy,dhomeier/astropy,StuartLittlefair/astropy,tbabej/astropy,lpsinger/astropy,saimn/astropy,tbabej/astropy,dhomeier/astropy,AustereCuriosity/astropy,stargaser/astropy,AustereCuriosity/astropy,kelle/astropy,stargaser/astropy,bsipocz/astropy,tbabej/astropy,larrybradley/astropy,StuartLittlefair/astropy,mhvk/astropy,stargaser/astropy,joergdietrich/astropy,pllim/astropy,bsipocz/astropy,dhomeier/astropy,kelle/astropy,aleksandr-bakanov/astropy,lpsinger/astropy,bsipocz/astropy,DougBurke/astropy,StuartLittlefair/astropy,lpsinger/astropy,lpsinger/astropy,joergdietrich/astropy,funbaker/astropy,saimn/astropy,StuartLittlefair/astropy,mhvk/astropy,pllim/astropy,stargaser/astropy,aleksandr-bakanov/astropy,funbaker/astropy,joergdietrich/astropy,saimn/astropy,tbabej/astropy,bsipocz/astropy,astropy/astropy,kelle/astropy,DougBurke/astropy,lpsinger/astropy,larrybradley/astropy,aleksandr-bakanov/astropy,astropy/astropy,funbaker/astropy,AustereCuriosity/astropy,mhvk/astropy,pllim/astropy,larrybradley/astropy,saimn/astropy,StuartLittlefair/astropy,astropy/astropy,tbabej/astropy,DougBurke/astropy,kelle/astropy,AustereCuriosity/astropy,dhomeier/astropy,AustereCuriosity/astropy,MSeifert04/astropy,MSeifert04/astropy,DougBurke/astropy,saimn/astropy,MSeifert04/astropy,mhvk/astropy,aleksandr-bakanov/astropy,kelle/astropy | astropy/table/tests/test_pickle.py | astropy/table/tests/test_pickle.py | import cPickle as pickle
import numpy as np
import pytest
from ...table import Table, Column, MaskedColumn
@pytest.fixture(params=[0, 1, -1])
def protocol(request):
"""
Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced).
"""
return request.param
def test_pickle_column(protocol):
c = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})
cs = pickle.dumps(c)
cp = pickle.loads(cs)
assert np.all(cp == c)
assert cp.attrs_equal(c)
def test_pickle_masked_column(protocol):
c = MaskedColumn(data=[1, 2], name='a', format='%05d', description='col a', unit='cm',
meta={'a': 1})
c.mask[1] = True
c.fill_value = -99
cs = pickle.dumps(c)
cp = pickle.loads(cs)
assert np.all(cp._data == c._data)
assert np.all(cp.mask == c.mask)
assert cp.attrs_equal(c)
assert cp.fill_value == -99
def test_pickle_table(protocol):
a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})
b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm',
meta={'b': 1})
t = Table([a, b], meta={'a': 1})
ts = pickle.dumps(t)
tp = pickle.loads(ts)
assert np.all(tp['a'] == t['a'])
assert np.all(tp['b'] == t['b'])
assert tp['a'].attrs_equal(t['a'])
assert tp['b'].attrs_equal(t['b'])
assert tp.meta == t.meta
def test_pickle_masked_table(protocol):
a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})
b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm',
meta={'b': 1})
t = Table([a, b], meta={'a': 1}, masked=True)
t['a'].mask[1] = True
t['a'].fill_value = -99
ts = pickle.dumps(t)
tp = pickle.loads(ts)
for colname in ('a', 'b'):
for attr in ('_data', 'mask', 'fill_value'):
assert np.all(getattr(tp[colname], attr) == getattr(tp[colname], attr))
assert tp['a'].attrs_equal(t['a'])
assert tp['b'].attrs_equal(t['b'])
assert tp.meta == t.meta
| bsd-3-clause | Python |
|
fb5f6b5db2e2701692dd0a35dfad36d7b6dd4f2d | Create example file | josuemontano/blender_wrapper | example.py | example.py | from blender_wrapper.api import Scene
from blender_wrapper.api import Camera
from blender_wrapper.api import SunLamp
from blender_wrapper.api import ORIGIN
def main():
scene = Scene(1500, 1000, filepath="~/Desktop/")
scene.setup()
camera = Camera((1, 0, 1), (90, 0, 0), view_align=True)
camera.add_to_scene()
lamp = SunLamp(10, (0, 0, 3), ORIGIN)
lamp.add_to_scene()
scene.render(resolution_percentage=100)
# Execute running:
# blender --background -P ./test.py
if __name__ == "__main__":
main()
| mit | Python |
|
62032986f4e57c85f842c16fdb916b0a19bdbd0e | Create _webui.py | flipchan/LayerProx,flipchan/LayerProx,flipchan/LayerProx,flipchan/LayerProx | marionette_tg/plugins/_webui.py | marionette_tg/plugins/_webui.py | import flask
import gnupg, base64
#https://gist.github.com/dustismo/6203329 / apt-get install libleveldb1 libleveldb-dev && pip install plyvel
#import plyvel #leveldb, very fast, you can even run the database in ram if you want
#import MySQLdb #if you want mysql
from os import urandom
from base64 import b64decode
import datetime
import sys
from functools import wraps
datab = marionette_tg.conf.get("server.database")
if datab == 'leveldb':
import plyvel
elif datab == 'mysql':
import MySQLdb
else:
print 'error'
#webui for layerprox
lp = flask.Flask(__name__)
dbplace = '' #database directory, test: '/tmp/testdb/'
def add_response_headers(headers={}):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
resp = flask.make_response(f(*args, **kwargs))
h = resp.headers
for header, value in headers.items():
h[header] = value
return resp
return decorated_function
return decorator
#general trolling bots
@lp.before_request
def blockuseragentreq():
useragent = flask.request.headers['User-Agent']
if 'sqlmap' in useragent:
return flask.redirect(flask.url_for('dummy'))
elif 'wget' in useragent:
return flask.redirect(flask.url_for('dummy'))
elif 'w3af' in useragent:
return flask.redirect(flask.url_for('dummy'))
# elif 'curl' in useragent:
# return flask.redirect(flask.url_for('dummy'))
elif 'Scanner' in useragent:
return flask.redirect(flask.url_for('dummy'))
else:
pass
#root@box:~# curl -I http://127.0.0.1:80/
#HTTP/1.0 200 OK
#Content-Type: text/html; charset=utf-8
#Content-Length: 198
#Server: amiIN9vf36G1T3xzpg==
#Date: Sat, 26 Nov 2016 10:24:22 GMT
#protection against server fingerprinting
def antifingerprint(f):
jibbrish = base64.b64encode(urandom(13))
jibbrish = jibbrish.replace("==", "")
@wraps(f)
@add_response_headers({'Server': jibbrish})
def decorated_function(*args, **kwargs):
return f(*args, **kwargs)
return decorated_function
@lp.route('/')
@antifingerprint
def firstpage():
return '''
<html>
<head>
<title>LayerProx</title>
</head>
<body>
<h2>
<center>This is a LayerProx server
</h2>
<br>
<br>
<t>get encrypted by goin to /getproxied</t>
</center>
</body>
</html>
'''
#@lp.route('', methods=['GET'])
@lp.route('/getproxied', methods=['GET', 'POST'])
@antifingerprint
def get_registerd():
if flask.request.method == 'POST':
day = gen_day()
h1 = urandom()
fingerprint = 'x'
return '''
<html>
<head>
<title> LayerProx</title>
</head>
<body>
</body>
</html>
'''
#choice a serve
#choice a serverr
#if sucess redirect to
@lp.route('/welcome')
@antifingerprint
def wel():
return '''
<html>
<body>
<center>
<h1>Welcome to the LayerProx network</h1>
</body>
</html>
'''
#make the bots read 1984
@lp.route('/nobots')
@antifingerprint
def dummy():
return flask.redirect('http://msxnet.org/orwell/1984.pdf', code=302)
#tell noone pub info, only dax
@lp.route('/robots.txt')
@antifingerprint
def robots():
return '''
User-agent: *
Dissallow: /
User-agent: DuckDuckBot/1.1
Dissallow:
User-agent: DuckDuckBot/1.0
Dissallow:
'''
def check_db():
db = plyvel.DB(dbplace, create_if_missing=True)
#db.put(b'20', b'value')
today = datetime.date.today()
today = str(today)
#key prefix is date + hmac, one value is date
for key, value in db.iterator(start=today):
if key:
db.delete(key)
else:
pass
#datetime syntax
#>>> today = datetime.date.today()
#>>> print today
#2016-11-02
#>>> today = datetime.date.today()
#>>> EndDate = today + timedelta(days=10)
#>>> print EndDate
#2016-11-12
#day generation system
def gen_day():
test = map(ord, urandom(10))
test = str(test[0])# first number pair
test = test[0] # first number
test = int(test[0])
test2 = map(ord, urandom(test))
number = test2[test]
today = datetime.date.today()
number = int(number)
#number * daytime
day = today + datetime.timedelta(days=+number) #plus int generated
return day
#mysql - db
#create database lp;
#create table layerprox(
#fingerprint hmac h1 h2 to_date
#)
if __name__ == '__main__':
lp.run(debug=False,port=80) #host=0.0.0.0
| apache-2.0 | Python |
|
68e16ca50bec3802184e098548aa2c2584c352b2 | Add main example code | nparley/signal_decorator | signal_decorator.py | signal_decorator.py | #!/usr/bin/python
__author__ = 'Neil Parley'
from functools import wraps
import signal
import sys
def catch_sig(f):
"""
Adds the signal handling as a decorator, define the signals and functions that handle them. Then wrap the functions
with your decorator.
:param f: Function
:return: Function wrapped with registered signal handling
"""
@wraps(f)
def reg_signal(*args, **kwargs):
def signal_handler(*args):
print('Got killed')
sys.exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
return f(*args, **kwargs)
return reg_signal
@catch_sig
def test():
import time
print("Waiting")
time.sleep(60)
if __name__ == "__main__":
test() | mit | Python |
|
950bdd0f528fc61175c39dc2ade6abb9d46d767a | Change plan on book | phildini/logtacts,phildini/logtacts,phildini/logtacts,phildini/logtacts,phildini/logtacts | contacts/migrations/0027_auto_20170106_0627.py | contacts/migrations/0027_auto_20170106_0627.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-01-06 06:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0026_auto_20161231_2045'),
]
operations = [
migrations.AlterField(
model_name='book',
name='plan',
field=models.CharField(blank=True, choices=[('team_monthly', 'Team Monthly Subscription'), ('basic_yearly', 'Basic Yearly Subscription'), ('basic_monthly', 'Basic Monthly Subscription'), ('family_monthly', 'Family Monthly Subscription'), ('family_yearly', 'Family Yearly Subscription'), ('team_yearly', 'Team Yearly Subscription')], max_length=100),
),
migrations.AlterField(
model_name='historicalbook',
name='plan',
field=models.CharField(blank=True, choices=[('team_monthly', 'Team Monthly Subscription'), ('basic_yearly', 'Basic Yearly Subscription'), ('basic_monthly', 'Basic Monthly Subscription'), ('family_monthly', 'Family Monthly Subscription'), ('family_yearly', 'Family Yearly Subscription'), ('team_yearly', 'Team Yearly Subscription')], max_length=100),
),
]
| mit | Python |
|
97c87237de87c91d66a92c1cacc362a7b831b8ef | add script to install python modules with pip | nateGeorge/IDmyDog,nateGeorge/IDmyDog,nateGeorge/IDmyDog | install_py_modules.py | install_py_modules.py | # this will install most necessary packages for this project
# that you may not already have on your system
import pip
def install(package):
pip.main(['install', package])
# Example
if __name__ == '__main__':
# for scraping akc.org for a list of breed names and pics
install('Scrapy')
# for calculating Haralick textures
install('mahotas')
# image operations convenience functions
install('imutils')
# plotting package
install('seaborn')
# data operations
install('pandas')
# machine learning lib
install('scikit-learn')
# image processing
install('scikit-image')
# eta and % completion of tasks
install('progressbar') | mit | Python |
|
98e822a78722e735b31817e74cc5e310fcb43c9a | add missed migration (HomeBanner verbose texts) | brasilcomvc/brasilcomvc,brasilcomvc/brasilcomvc,brasilcomvc/brasilcomvc | brasilcomvc/portal/migrations/0005_homebanner_verbose.py | brasilcomvc/portal/migrations/0005_homebanner_verbose.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('portal', '0004_homebanner_image_upload_to'),
]
operations = [
migrations.AlterModelOptions(
name='homebanner',
options={'verbose_name_plural': 'Banners da Home', 'verbose_name': 'Banner da Home'},
),
]
| apache-2.0 | Python |
|
35310a8fa136b5b6e094401a8289f5eabeb28cbc | Create batterylevel.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/hairygael/batterylevel.py | home/hairygael/batterylevel.py | def batterylevel():
power_now = subprocess.call ("WMIC PATH Win32_Battery Get EstimatedChargeRemaining", "r".readline())
ANSWER = float(power_now) * 100 , "%"
i01.mouth.speak(str(ANSWER))
| apache-2.0 | Python |
|
c7851b61268848cf1b02d9e5c845a846ded4c2a7 | Update __init__.py | r0h4n/node-agent,Tendrl/node_agent,r0h4n/node-agent,Tendrl/node-agent,Tendrl/node-agent,Tendrl/node-agent,r0h4n/node-agent,Tendrl/node_agent | tendrl/node_agent/objects/cluster_message/__init__.py | tendrl/node_agent/objects/cluster_message/__init__.py | from tendrl.commons import etcdobj
from tendrl.commons.message import Message as message
from tendrl.commons import objects
class ClusterMessage(objects.BaseObject, message):
internal = True
def __init__(self, **cluster_message):
self._defs = {}
message.__init__(self, **cluster_message)
objects.BaseObject.__init__(self)
self.value = 'clusters/%s/messages/%s'
self._etcd_cls = _ClusterMessageEtcd
class _ClusterMessageEtcd(etcdobj.EtcdObj):
"""Cluster message object, lazily updated
"""
__name__ = 'clusters/%s/messages/%s'
_tendrl_cls = ClusterMessage
def render(self):
self.__name__ = self.__name__ % (
self.cluster_id, self.message_id
)
return super(_ClusterMessageEtcd, self).render()
| from tendrl.commons import etcdobj
from tendrl.commons.message import Message as message
from tendrl.commons import objects
class ClusterMessage(objects.BaseObject, message):
internal = True
def __init__(self, **cluster_message):
self._defs = {}
message.__init__(self, **cluster_message)
objects.BaseObject.__init__(self)
self.value = 'clusters/%s/Messages/%s'
self._etcd_cls = _ClusterMessageEtcd
class _ClusterMessageEtcd(etcdobj.EtcdObj):
"""Cluster message object, lazily updated
"""
__name__ = 'clusters/%s/Messages/%s'
_tendrl_cls = ClusterMessage
def render(self):
self.__name__ = self.__name__ % (
self.cluster_id, self.message_id
)
return super(_ClusterMessageEtcd, self).render()
| lgpl-2.1 | Python |
7fddacd1a751c095f70693bb703bb9959a706ae1 | Add an example with end to end data | therve/nabu,therve/nabu | example.py | example.py | """
Example script for getting events over a Zaqar queue.
To run:
$ export IDENTITY_API_VERSION=3
$ source ~/devstack/openrc
$ python example.py
"""
import json
import os
import uuid
import requests
import websocket
from keystoneauth1.identity import v3
from keystoneauth1 import session
client_id = str(uuid.uuid4())
def authenticate(ws, token, project_id):
ws.send(json.dumps(
{'action': 'authenticate',
'headers': {'X-Auth-Token': token,
'Client-ID': client_id,
'X-Project-ID': project_id}}))
return ws.recv()
def send_message(ws, project_id, action, body=None):
msg = {'action': action,
'headers': {'Client-ID': client_id, 'X-Project-ID': project_id}}
if body:
msg['body'] = body
ws.send(json.dumps(msg))
return json.loads(ws.recv())
def main():
auth_url = os.environ.get('OS_AUTH_URL')
user = os.environ.get('OS_USERNAME')
password = os.environ.get('OS_PASSWORD')
project = os.environ.get('OS_PROJECT_NAME')
auth = v3.Password(auth_url=auth_url,
username=user,
user_domain_name='default',
password=password,
project_name=project,
project_domain_name='default')
sess = session.Session(auth=auth)
token = auth.get_token(sess)
project_id = auth.get_project_id(project)
nabu_url = auth.get_endpoint(sess, service_type='subscription')
requests.post('%s/v1/subscription' % (nabu_url,),
data=json.dumps({'source': 'compute',
'target': 'nabu_queue'}),
headers={'X-Auth-Token': token,
'Content-Type': 'application/json'})
ws_url = auth.get_endpoint(sess, service_type='messaging-websocket')
ws = websocket.create_connection(ws_url.replace('http', 'ws'))
authenticate(ws, token, project_id)
send_message(ws, project_id, 'queue_create', {'queue_name': 'nabu_queue'})
send_message(ws, project_id, 'subscription_create',
{'queue_name': 'nabu_queue', 'ttl': 3000})
while True:
ws.recv()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
e1907624a143d0733cd89e5458d104ed0a4fee43 | Add simple tasks | zkan/fabric-workshop,zkan/fabric-workshop | fabfile.py | fabfile.py | # Simple Tasks
def hello():
print 'Hello ThaiPy!'
def hi(name='Kan'):
print 'Hi ' + name
| mit | Python |
|
50769229ce8ef4e84f345184b0aebf036bc0e179 | add fabfile | cydev/web,cydev/web | fabfile.py | fabfile.py | from fabric.api import local, put, run, cd, sudo
def status():
run("systemctl status web")
def restart():
sudo("systemctl restart web")
def deploy():
local('tar -czf cydev_web.tgz web static/')
put("cydev_web.tgz", "~/cydev.ru")
with cd("~/cydev.ru"):
run("tar -xvf cydev_web.tgz")
restart()
status()
| bsd-3-clause | Python |
|
2af8c695c1463c080ce8c4bff7e3d81662a49c81 | implement generic decorator and register function | hephs/dispatk | dispatk.py | dispatk.py | """
This function is inspired by singledispatch of Python 3.4+ (PEP 443),
but the dispatch happens on the key extracted fro the arguments values.
from dispatk import dispatk
@dispatk(lambda n: int(n))
def fib(n):
return fib(n-1) + fib(n-2)
@fib.register(0)
def _(n):
return 0
@fib.register(1, 2)
def _(n):
return 1
@fib.register(41)
def _(n):
return 165580141
*register* accepts one or more keys.
@fib.register(1, 2)
def _(n):
return 1
is equivalent to
@fib.register(1)
@fib.register(2)
def _(n):
return 1
"""
from functools import wraps
__all__ = ('dispatk',)
def dispatk(keyer):
"""This is the decorator for the generic function and it accepts
only one argument *keyer*, it'll be called with the same arguments
of the function call and it must return an hashable object
(int, tuple, etc.).
Rhe generic function has a *register* method used to decorate the
function for some specific keys.
*register* accepts one or more keys and returns the decorated
function.
"""
calls = {}
def _dispatk(main):
def register(*keys):
def _register(spec):
for key in keys:
if key in calls:
raise ValueError(
"function already registered for %r"
% (main.__name__, key))
calls[key] = spec
return spec
return _register
@wraps(main)
def run(*args, **kwargs):
return calls.get(keyer(*args, **kwargs), main)(*args, **kwargs)
run.register = register
return run
return _dispatk
| mit | Python |
|
e823c55f62c8aa1d72ec3bf2b58288b3dd413561 | Create radix_sort.py | TheAlgorithms/Python | sorts/radix_sort.py | sorts/radix_sort.py | def radixsort(lst):
RADIX = 10
maxLength = False
tmp , placement = -1, 1
while not maxLength:
maxLength = True
# declare and initialize buckets
buckets = [list() for _ in range( RADIX )]
# split lst between lists
for i in lst:
tmp = i / placement
buckets[tmp % RADIX].append( i )
if maxLength and tmp > 0:
maxLength = False
# empty lists into lst array
a = 0
for b in range( RADIX ):
buck = buckets[b]
for i in buck:
lst[a] = i
a += 1
# move to next
placement *= RADIX
| mit | Python |
|
2b810eb1900ca96c7fb2d8b63b70b7b0df8b9ed5 | Create find_digits.py | costincaraivan/hackerrank,costincaraivan/hackerrank | algorithms/implementation/python3/find_digits.py | algorithms/implementation/python3/find_digits.py | #!/bin/python3
import sys
t = int(input().strip())
for a0 in range(t):
n = int(input().strip())
count = 0
digits = str(n)
for digit in digits:
if int(digit) != 0:
if n % int(digit) == 0:
count += 1
print(count)
| mit | Python |
|
c723865ae8013020f6f0a28cd41592c3dc900968 | add a second test for process_dc_env. | devopscenter/dcUtils,devopscenter/dcUtils | tests/process_dc_env_test_2.py | tests/process_dc_env_test_2.py | #!/usr/bin/env python
import sys
import os
import argparse
# There is a PEP8 warning about this next line not being at the top of the file.
# The better answer is to append the $dcUTILS/scripts directory to the sys.path
# but I wanted to illustrate it here...so your mileage may vary how you want
from process_dc_env import pythonGetEnv
# ==============================================================================
"""
This script provides an example of how to use the process_dc_env.py in a python
script. In a python script, the pythonGetEnv is imported from the
process_dc_env script and then called directly in the script. That function will
do the necessary handling of some of the arguments on behalf of the python
script. Any other arguments passed in are ignored by the process_dc_env script
and it is expected that the python script would handle the rest of them. The
pythonGetEnv will return a environment list presented in a dictionary with the
environment variable set as the key and the value, is, well, the value.
Note that the argparse statement for processing arguments needs to be a bit
different than what you probably normally use. We need to ignore some of the
commands that are processed in the proces_dc_env.py (ie appName, env and
workspaceName if used). to do this use parse_known_args instead of parse_args
"""
__version__ = "0.1"
__copyright__ = "Copyright 2016, devops.center"
__credits__ = ["Bob Lozano", "Gregg Jensen"]
__license__ = "GPL"
__status__ = "Development"
# ==============================================================================
def checkArgs():
parser = argparse.ArgumentParser(
description='Script that provides a facility to watch for file ' +
'changes and then perform actions based upon the files' +
' that change.')
parser.add_argument('-f', '--foo', help='foo option',
required=False)
parser.add_argument('-w', '--workspaceName', help='The alternate ' +
'directory name to find the application env files ' +
'in. This will not change the .dcConfig/' +
'baseDiretory file but will read it for the ' +
'alternate path and use it directly',
required=False)
# old way
# args = parser.parse_args()
# new way and the extra options are in the unknown part
args, unknown = parser.parse_known_args()
# if we get here then the
return (args.foo, args.workspaceName)
def main(argv):
# for manageApp.py only ... or designed to only be used by manageApp.py
# retVals = pythonGetEnv(initialCreate=True)
# normal call for all other python scripts
try:
(foo, workspaceName) = checkArgs()
except SystemExit:
pythonGetEnv()
sys.exit(1)
retVals = pythonGetEnv()
print "=>{}<=".format(retVals)
print "foo={}".format(foo)
print "workspaceName={}".format(workspaceName)
print "CUSTOMER_APP_NAME=" + retVals["CUSTOMER_APP_NAME"]
print "ENV=" + retVals["ENV"]
if __name__ == "__main__":
main(sys.argv[1:])
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| apache-2.0 | Python |
|
51aefefc3cdcd131678e921a29b5acd5b9601b81 | add a unit-tests that essentially import the the python python file in src/dynamic_graph/ | stack-of-tasks/sot-core,stack-of-tasks/sot-core,stack-of-tasks/sot-core | tests/python/python_imports.py | tests/python/python_imports.py | #!/usr/bin/env python
import unittest
class PythonImportTest(unittest.TestCase):
def test_math_small_entities(self):
try:
import dynamic_graph.sot.core.math_small_entities
except ImportError as ie:
self.fail(str(ie))
def test_feature_position_relative(self):
try:
import dynamic_graph.sot.core.feature_position_relative
except ImportError as ie:
self.fail(str(ie))
def test_feature_position(self):
try:
import dynamic_graph.sot.core.feature_position
except ImportError as ie:
self.fail(str(ie))
def test_matrix_util(self):
try:
import dynamic_graph.sot.core.matrix_util
except ImportError as ie:
self.fail(str(ie))
def test_meta_task_6d(self):
try:
import dynamic_graph.sot.core.meta_task_6d
except ImportError as ie:
self.fail(str(ie))
def test_meta_task_posture(self):
try:
import dynamic_graph.sot.core.meta_task_posture
except ImportError as ie:
self.fail(str(ie))
def test_meta_task_visual_point(self):
try:
import dynamic_graph.sot.core.meta_task_visual_point
except ImportError as ie:
self.fail(str(ie))
def test_meta_tasks_kine_relative(self):
try:
import dynamic_graph.sot.core.meta_tasks_kine_relative
except ImportError as ie:
self.fail(str(ie))
def test_meta_tasks_kine(self):
try:
import dynamic_graph.sot.core.meta_tasks_kine
except ImportError as ie:
self.fail(str(ie))
def test_meta_tasks(self):
try:
import dynamic_graph.sot.core.meta_tasks
except ImportError as ie:
self.fail(str(ie))
def test_attime(self):
try:
import dynamic_graph.sot.core.utils.attime
except ImportError as ie:
self.fail(str(ie))
def test_history(self):
try:
import dynamic_graph.sot.core.utils.history
except ImportError as ie:
self.fail(str(ie))
def test_thread_interruptible_loop(self):
try:
import dynamic_graph.sot.core.utils.thread_interruptible_loop
except ImportError as ie:
self.fail(str(ie))
def test_viewer_helper(self):
try:
import dynamic_graph.sot.core.utils.viewer_helper
except ImportError as ie:
self.fail(str(ie))
def test_viewer_loger(self):
try:
import dynamic_graph.sot.core.utils.viewer_loger
except ImportError as ie:
self.fail(str(ie))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | Python |
|
606118fa4c7b203d986f37d061777beb843b278b | add consistency checker | eoss-cloud/madxxx_catalog_api,eoss-cloud/madxxx_catalog_api | catalog/model/check_consistency.py | catalog/model/check_consistency.py | from toolz.curried import operator
from api.eoss_api import Api
from dateutil.parser import parse
import datetime
import requests, grequests
import time
import logging
import click
from utilities import chunks
logger = logging.getLogger()
def append_data(file, data):
with open(file, "a") as myfile:
for item in data:
myfile.write(item+'\n')
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(version='1.0.0')
def cli(*args, **kwargs):
"""
EOSS catalog
consistency checker
check if registered external URLs exist (e.g. quicklooks, metadata or zip archives
"""
@click.option('--api_endpoint', nargs=1, default='http://api.eoss.cloud')
@click.argument('sensor', nargs=1)
@click.argument('year', nargs=1, type=click.INT)
@cli.command('check_consistency', short_help='update catalog with exported sentinel2 metadata file')
def main(sensor, year, api_endpoint):
api = Api(api_endpoint)
aoi_nw = (-180, 90)
aoi_se = (180, -90)
aoi_ne = (aoi_se[0], aoi_nw[1])
aoi_sw = (aoi_nw[0], aoi_se[1])
aoi = [aoi_nw, aoi_ne, aoi_se, aoi_sw, aoi_nw]
for delta_day in range(293, 0, -1):
start_time = time.time()
start_date = parse('%d-01-01'% year) + datetime.timedelta(days=delta_day)
end_date = start_date + datetime.timedelta(days=1)
logger.info('Checking consistencty for %s between %s and %s' % (sensor, start_date.isoformat(), end_date.isoformat()))
# Object representation
results = api.search_dataset(aoi, 100, start_date, end_date, sensor, full_objects=False)
url_resources = list()
missing_urls = list()
missing_types = list()
wrong_urls = list()
for r in results:
if r['resources']['s3public']['zip'] != None:
url_resources.append(r['resources']['s3public']['zip'])
else:
missing_urls.append('%s:%s' % (r['tile_identifier'], r['entity_id']))
missing_types.append('zip')
if r['resources']['metadata']!= None:
url_resources.append(r['resources']['metadata'])
else:
missing_urls.append('%s:%s' % (r['tile_identifier'], r['entity_id']))
missing_types.append('metadata')
if r['resources']['quicklook'] != None:
url_resources.append(r['resources']['quicklook'])
else:
missing_urls.append('%s:%s' % (r['tile_identifier'], r['entity_id']))
missing_types.append('quicklook')
logger.info('total scans: %d' %len(url_resources))
logger.info('already missed resources: %d' %len(missing_urls))
if False:
for counter, res in enumerate(url_resources):
req = requests.head(res)
if req.status_code != requests.codes.ok:
print res, req.status_code
missing_urls.append(res)
print res
if (counter % 25) == 0:
print counter
else:
counter = 0
for url_parts in chunks(url_resources, 500):
counter+=1
rs = (grequests.head(u) for u in url_parts)
res = grequests.map(rs)
for req in res:
if req.status_code != requests.codes.ok:
print res, req.status_code
wrong_urls.append(res)
missing_types.append('zip_registered')
#print missing_urls
if len(wrong_urls) > 0:
print wrong_urls
append_data('/tmp/wrong_urls.txt', wrong_urls)
if len(missing_urls) > 0:
append_data('/tmp/missing_urls.txt', missing_urls)
if len(missing_types) > 0:
for type in ['zip_registered', 'quicklook', 'metadata', 'zip']:
logger.info('%d:%s' % (operator.countOf(missing_types, type), type))
logger.info('Executed in %f secs.' % (time.time()-start_time))
if __name__ == '__main__':
cli()
| mit | Python |
|
f8067853546a9c25716aef6bc9f255591cb65626 | Add migration to change the project results report URL | akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr | akvo/rsr/migrations/0125_auto_20180315_0829.py | akvo/rsr/migrations/0125_auto_20180315_0829.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
ORIGINAL_URL = '/en/reports/project_results/{project}?format={format}&download=true'
NEW_URL = ORIGINAL_URL + '&p_StartDate={start_date}&p_EndDate={end_date}'
REPORT_ID = 1
def add_start_end_dates_report_url(apps, schema):
Report = apps.get_model('rsr', 'Report')
project_results_report = Report.objects.get(id=REPORT_ID)
project_results_report.url = NEW_URL
project_results_report.save()
def remove_start_end_dates_report_url(apps, schema):
Report = apps.get_model('rsr', 'Report')
project_results_report = Report.objects.get(id=REPORT_ID)
project_results_report.url = ORIGINAL_URL
project_results_report.save()
class Migration(migrations.Migration):
dependencies = [
('rsr', '0124_auto_20180309_0923'),
]
operations = [
migrations.RunPython(add_start_end_dates_report_url, remove_start_end_dates_report_url)
]
| agpl-3.0 | Python |
|
2aa0990746b71086b4c31ee81ac8874436c63e32 | Add a few tests (close #4) | liviu-/crosslink-ml-hn | tests/test_crosslinking_bot.py | tests/test_crosslinking_bot.py | from datetime import datetime
from datetime import date, timedelta
import pytest
from crosslinking_bot import crosslinking_bot as cb
class TestParseDate:
def test_return_today(self):
today = datetime.today().date()
assert 'today' == cb.parse_date(today)
def test_return_1_day_ago(self):
yesterday = date.today() - timedelta(1)
assert '1 day ago' == cb.parse_date(yesterday)
def test_return_2_days_ago(self):
two_days_ago = date.today() - timedelta(2)
assert '2 days ago' == cb.parse_date(two_days_ago)
class TestPrepareComment:
@pytest.fixture
def hn_hits(self):
return [{
'objectID': 12135399,
'created_at_i': 1469823139,
},
{
'objectID': 12135398,
'created_at_i': 1469821139,
},
]
def test_one_hit_contains_right_url(hn_hits):
hn_hits = [hn_hits.hn_hits()[0]]
hn_url = cb.HN_STORY.format(hn_hits[0]['objectID'])
assert hn_url in cb.prepare_comment(hn_hits)
def test_two_hits_contain_second_url(hn_hits):
hn_hits = hn_hits.hn_hits()
hn_url = cb.HN_STORY.format(hn_hits[1]['objectID'])
assert hn_url in cb.prepare_comment(hn_hits)
def test_two_hits_contain_plural_form(hn_hits):
hn_hits = hn_hits.hn_hits()
hn_url = cb.HN_STORY.format(hn_hits[1]['objectID'])
assert 'discussions' in cb.prepare_comment(hn_hits)
| mit | Python |
|
3a178c100cbf64b8ab60954a9b9ea5a01640f842 | Integrate LLVM at llvm/llvm-project@852d84e36ed7 | paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,karllessard/tensorflow,yongtang/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,yongtang/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,yongtang/tensorflow,yongtang/tensorflow,karllessard/tensorflow | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "852d84e36ed7a3db0ff4719f44a12b6bc09d35f3"
LLVM_SHA256 = "3def20f54714c474910e5297b62639121116254e9e484ccee04eee6815b5d58c"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:mathextras.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "0128f8016770655fe7a40d3657f00853e6badb93"
LLVM_SHA256 = "f90705c878399b7dccca9cf9b28d695a4c6f8a0e12f2701f7762265470fa6c22"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:mathextras.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| apache-2.0 | Python |
52f49543dd7bf01a2a24db435d8461b7c8921789 | Integrate LLVM at llvm/llvm-project@9a764ffeb6f0 | karllessard/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "9a764ffeb6f06a87c7ad482ae39f8a38b3160c5e"
LLVM_SHA256 = "8f000d6541d64876de8ded39bc140176c90b74c3961b9ca755b1fed44423c56b"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "72136d8ba266eea6ce30fbc0e521c7b01a13b378"
LLVM_SHA256 = "54d179116e7a79eb1fdf7819aad62b4d76bc0e15e8567871cae9b675f7dec5c1"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| apache-2.0 | Python |
45254d35def51a5e8936fe649f8c3fc089cd4a6d | add `schemas.py` | kokimoribe/todo-api | todo/schemas.py | todo/schemas.py | """Request/Response Schemas are defined here"""
# pylint: disable=invalid-name
from marshmallow import Schema, fields
from marshmallow_enum import EnumField
from todo.enums import Status
class TaskSchema(Schema):
"""Schema for api.portal.models.Panel"""
id = fields.Int(required=True)
title = fields.Str(required=True)
description = fields.Str(required=True)
status = EnumField(Status, required=True)
created_at = fields.DateTime(required=True)
updated_at = fields.DateTime(required=True)
| mit | Python |
|
4f9660704445e6da62fc4e893d93fc84288303d4 | Integrate LLVM at llvm/llvm-project@aec908f9b248 | Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "aec908f9b248b27cb44217081c54e2c00604dff7"
LLVM_SHA256 = "c88b75b4d60b960c7da65b7bacfdf8c5cf4c7846ab85a334f1ff18a8b50f2d98"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "5dcd6afa20881490b38f3d88c4e59b0b4ff33551"
LLVM_SHA256 = "86f64f78ba3b6c7e8400fe7f5559b3dd110b9a4fd9bfe9e5ea8a4d27301580e0"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| apache-2.0 | Python |
735dee2da41bf8df8519d516bd9b231ff440f5f9 | Create globals.system module for Python & system related settings | AntumDeluge/desktop_recorder,AntumDeluge/desktop_recorder | source/globals/system.py | source/globals/system.py | # -*- coding: utf-8 -*-
## \package globals.system
# MIT licensing
# See: LICENSE.txt
import sys
PY_VER_MAJ = sys.version_info[0]
PY_VER_MIN = sys.version_info[1]
PY_VER_REL = sys.version_info[2]
PY_VER_STRING = u'{}.{}.{}'.format(PY_VER_MAJ, PY_VER_MIN, PY_VER_REL)
| mit | Python |
|
b83b09f937f91a870165d88730a36faaee8a5261 | add a parser of metadata | suyuan1203/pyrets | retsmeta.py | retsmeta.py | # -*- coding: utf-8 -*-
from xml.etree import ElementTree
class MetaParser(object):
def GetResources(self):
pass
def GetRetsClass(self, resource):
pass
def GetTables(self, resource, rets_class):
pass
def GetLookUp(self, resource, rets_class):
pass
class StandardXmlMetaParser(MetaParser):
def __init__(self, filepath):
with open(filepath,'r') as f:
xml_str = f.read()
self.meta_xml = ElementTree.fromstring(xml_str)
def GetResources(self):
resource_list = []
resource_xml_list = self.meta_xml.find('METADATA').find('METADATA-SYSTEM').find('SYSTEM').find('METADATA-RESOURCE').findall('Resource')
for resource_xml in resource_xml_list:
resource = RetsResource()
resource.resource_id = resource_xml.find('ResourceID').text
resource_list.append(resource)
return resource_list
def GetRetsClass(self, resource):
class_list = []
resource_xml_list = self.meta_xml.find('METADATA').find('METADATA-SYSTEM').find('SYSTEM').find('METADATA-RESOURCE').findall('Resource')
for resource_xml in resource_xml_list:
if resource_xml.find('ResourceID')==resource:
class_xml_list = resource_xml.findall('Class')
for class_xml in class_xml_list:
def GetTables(self, resource, rets_class):
pass
def GetLookUp(self, resource, rets_class):
pass
class RetsResource(object):
def __init__(self):
self.resource_id = None
class RetsClass(object):
def __init__(self):
self.rets_classname = None
class RetsTable(object):
def __init__(self):
self.system_name = None
| mit | Python |
|
bbb445b691f7370059c7bf9c94e2e9c6f4155273 | update to latest | babelsberg/babelsberg-r,topazproject/topaz,babelsberg/babelsberg-r,topazproject/topaz,topazproject/topaz,babelsberg/babelsberg-r,babelsberg/babelsberg-r,topazproject/topaz,babelsberg/babelsberg-r | tasks/base.py | tasks/base.py | import os
from invoke import run
class BaseTest(object):
def download_mspec(self):
if not os.path.isdir("../mspec"):
run("cd .. && git clone --depth=100 --quiet https://github.com/ruby/mspec")
def download_rubyspec(self):
if not os.path.isdir("../rubyspec"):
run("cd .. && git clone --depth=100 --quiet https://github.com/ruby/spec")
run("mv spec rubyspec")
| import os
from invoke import run
class BaseTest(object):
def download_mspec(self):
if not os.path.isdir("../mspec"):
run("cd .. && git clone --depth=100 --quiet https://github.com/ruby/mspec")
run("cd ../mspec && git checkout v1.6.0")
def download_rubyspec(self):
if not os.path.isdir("../rubyspec"):
run("cd .. && git clone --depth=100 --quiet https://github.com/ruby/spec")
run("mv spec rubyspec")
| bsd-3-clause | Python |
50843d6a2c93be4e05a0a2da338e4b0e0d99d294 | Add tls proxy helper | jxaas/python-client | jujuxaas/tls_proxy.py | jujuxaas/tls_proxy.py | import copy
import select
import socket
import ssl
import sys
import threading
import logging
logger = logging.getLogger(__name__)
class TlsProxyConnection(object):
def __init__(self, server, inbound_socket, inbound_address, outbound_address):
self.server = server
self.inbound_socket = inbound_socket
self.inbound_address = inbound_address
self.outbound_socket = None
self.outbound_address = outbound_address
self.thread = None
def start(self):
self.thread = threading.Thread(target=self._proxy)
self.thread.daemon = True
self.thread.start()
def _proxy(self):
try:
self.outbound_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.outbound_socket = self.server._wrap_ssl(self.outbound_socket)
self.outbound_socket.connect(self.outbound_address)
logger.debug("Proxy for %s: connected to remote", self.inbound_address)
pairs = {}
pairs[self.inbound_socket] = self.outbound_socket
pairs[self.outbound_socket] = self.inbound_socket
selectors = [self.inbound_socket, self.outbound_socket]
while True:
ready, _, _ = select.select(selectors, [], [])
for s in ready:
data = s.recv(8192)
if len(data) == 0:
# Close
break
else:
other = pairs[s]
other.send(data)
except:
logger.warn("Proxy for %s: error: %s", self.inbound_address, sys.exc_info())
finally:
logger.debug("Proxy for %s: closing", self.inbound_address)
self.inbound_socket.close()
if self.outbound_socket:
self.outbound_socket.close()
class TlsProxy(object):
def __init__(self, ssl_context, listen_address, forward_address):
self.listen_address = listen_address
self.forward_address = forward_address
self.ssl_context = ssl_context
self._ready = threading.Event()
def _serve(self):
server = None
try:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(self.listen_address)
server.listen(50)
self._ready.set()
while True:
client, client_address = server.accept()
proxy = TlsProxyConnection(self, client, client_address, self.forward_address)
proxy.start()
finally:
if server:
server.close()
def start(self):
self.thread = threading.Thread(target=self._serve)
self.thread.daemon = True
self.thread.start()
self._ready.wait()
def _wrap_ssl(self, socket):
options = copy.copy(self.ssl_context)
options['sock'] = socket
return ssl.wrap_socket(**options)
| apache-2.0 | Python |
|
420c14d38fdddc3ed5d646a99c355b707be011fc | Add tests for ansible module | open-craft/opencraft,open-craft/opencraft,open-craft/opencraft,omarkhan/opencraft,omarkhan/opencraft,brousch/opencraft,brousch/opencraft,open-craft/opencraft,open-craft/opencraft,omarkhan/opencraft,omarkhan/opencraft,brousch/opencraft | instance/tests/test_ansible.py | instance/tests/test_ansible.py | # -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Ansible - Tests
"""
# Imports #####################################################################
import yaml
from unittest.mock import call, patch
from django.test import TestCase
from instance import ansible
# Tests #######################################################################
class YAMLTestCase(TestCase):
"""
Test cases for YAML helper functions
"""
def setUp(self):
self.yaml_dict1 = {
'testa': 'firsta with unicode «ταБЬℓσ»',
'testb': 'firstb',
'test_dict': {
'foo': 'firstfoo',
'bar': 'firstbar',
},
}
self.yaml_dict2 = {
'testb': 'secondb with unicode «ταБЬℓσ2»',
'testc': 'secondc',
'test_dict': {
'foo': 'secondfoo',
'other': 'secondother',
}
}
self.yaml_str1 = yaml.dump(self.yaml_dict1)
self.yaml_str2 = yaml.dump(self.yaml_dict2)
def test_yaml_merge(self):
"""
Merge of two yaml strings with overlapping variables
"""
yaml_result_str = ansible.yaml_merge(self.yaml_str1, self.yaml_str2)
self.assertEquals(yaml.load(yaml_result_str), {
'testa': 'firsta with unicode «ταБЬℓσ»',
'testb': 'secondb with unicode «ταБЬℓσ2»',
'testc': 'secondc',
'test_dict': {
'foo': 'secondfoo',
'bar': 'firstbar',
'other': 'secondother',
}
})
def test_yaml_merge_with_none(self):
"""
Merge of a yaml string with None
"""
self.assertEqual(ansible.yaml_merge(self.yaml_str1, None), self.yaml_str1)
class AnsibleTestCase(TestCase):
"""
Test cases for ansible helper functions & wrappers
"""
def test_string_to_file_path(self):
"""
Store a string in a temporary file
"""
test_str = 'My kewl string\nwith unicode «ταБЬℓσ», now 20% off!'
file_path = ansible.string_to_file_path(test_str)
with open(file_path) as fp:
self.assertEqual(fp.read(), test_str)
@patch('subprocess.Popen')
@patch('instance.ansible.mkdtemp')
@patch('instance.ansible.string_to_file_path')
def test_run_playbook(self, mock_string_to_file_path, mock_mkdtemp, mock_popen):
"""
Run the ansible-playbook command
"""
mock_string_to_file_path.return_value = '/test/str2path'
mock_mkdtemp.return_value = '/test/mkdtemp'
ansible.run_playbook(
'/requirements/path.txt',
"INVENTORY: 'str'",
"VARS: 'str2'",
'/play/book',
'playbook_name_str',
)
run_playbook_cmd = (
'virtualenv -p /usr/bin/python /test/mkdtemp && '
'/test/mkdtemp/bin/python -u /test/mkdtemp/bin/pip install -r /requirements/path.txt && '
'/test/mkdtemp/bin/python -u /test/mkdtemp/bin/ansible-playbook -i /test/str2path '
'-e @/test/str2path -u root playbook_name_str'
)
self.assertEqual(
mock_popen.mock_calls,
[call(run_playbook_cmd, bufsize=1, stdout=-1, cwd='/play/book', shell=True)]
)
| agpl-3.0 | Python |
|
7d7fd5b167528654b9fed5b0c971c2b8110d93ea | Create wrapper_exploit.py | liorvh/pythonpentest,funkandwagnalls/pythonpentest,liorvh/pythonpentest,funkandwagnalls/pythonpentest,liorvh/pythonpentest,funkandwagnalls/pythonpentest | wrapper_exploit.py | wrapper_exploit.py | # Author: Chris Duffy
# Date: May 2015
# Purpose: An sample exploit for testing UDP services
import sys, socket, strut, subprocess
program_name = 'C:\exploit_writing\vulnerable.exe'
fill ="A"*####
eip = struct.pack('<I',0x########)
offset = "\x90"*##
available_shellcode_space = ###
shell =() #Code to insert
# NOPs to fill the remaining space
exploit = fill + eip + offset + shell
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client.sendto(exploit, (rhost, rport))
subprocess.call([program_name, exploit])
| bsd-3-clause | Python |
|
ef24797a12e8a8919ddb11c7b6763154c5c3aad1 | transform DR script to observe exceptions | fkuhn/dgd2cmdi | transform_DR.py | transform_DR.py | __author__ = 'kuhn'
__author__ = 'kuhn'
from batchxslt import processor
from batchxslt import cmdiresource
import codecs
import os
dgd_corpus = "/home/kuhn/Data/IDS/svn_rev1233/dgd2_data/metadata/corpora/extern"
dgd_events = "/home/kuhn/Data/IDS/svn_rev1233/dgd2_data/metadata/events/extern"
dgd_speakers = "/home/kuhn/Data/IDS/svn_rev1233/dgd2_data/metadata/speakers/extern"
corpus_xsl = "/home/kuhn/Data/IDS/svn/dgd2_data/dgd2cmdi/xslt/dgdCorpus2cmdi.xsl"
event_xsl = "/home/kuhn/Data/IDS/svn/dgd2_data/dgd2cmdi/xslt/dgdEvent2cmdi.xsl"
speaker_xsl = "/home/kuhn/Data/IDS/svn/dgd2_data/dgd2cmdi/xslt/dgdSpeaker2cmdi.xsl"
saxon_jar = "/home/kuhn/Data/IDS/svn/dgd2_data/dgd2cmdi/dgd2cmdi/saxon/saxon9he.jar"
pf_corpus = os.path.join(dgd_corpus, 'DR--_extern.xml')
pf_events = os.path.join(dgd_events, 'DR')
pf_speakers = os.path.join(dgd_speakers, 'DR')
xsl_processor = processor.XSLBatchProcessor(saxon_jar)
xsl_processor.transform(corpus_xsl, pf_corpus, "cmdi_", '/tmp/cmdi/corpus/')
xsl_processor.transform(event_xsl, pf_events, "cmdi_", '/tmp/cmdi/events/DR/')
xsl_processor.transform(speaker_xsl, pf_speakers, "cmdi_", '/tmp/cmdi/speakers/DR/')
| bsd-3-clause | Python |
|
60b5228818c92f4d13b0a054956a5f834c7f7549 | Implement remove.py | cryptonomex/graphene,oxarbitrage/bitshares-core,abitmore/bitshares-2,peertracksinc/muse,oxarbitrage/bitshares-core,bigoc/openchain,bitsuperlab/cpp-play2,cryptonomex/graphene,oxarbitrage/bitshares-core,peertracksinc/muse,bitsuperlab/cpp-play2,bitshares/bitshares-2,oxarbitrage/bitshares-core,bigoc/openchain,bitsuperlab/cpp-play2,pmconrad/graphene,bitshares/bitshares-2,bitshares/bitshares-2,abitmore/bitshares-2,pmconrad/graphene,abitmore/bitshares-2,bitshares/bitshares-2,peertracksinc/muse,cryptonomex/graphene,peertracksinc/muse,pmconrad/graphene,bigoc/openchain,bigoc/openchain,abitmore/bitshares-2,pmconrad/graphene | programs/genesis_util/remove.py | programs/genesis_util/remove.py | #!/usr/bin/env python3
import argparse
import json
import sys
def dump_json(obj, out, pretty):
if pretty:
json.dump(obj, out, indent=2, sort_keys=True)
else:
json.dump(obj, out, separators=(",", ":"), sort_keys=True)
return
def main():
parser = argparse.ArgumentParser(description="Remove entities from snapshot")
parser.add_argument("-o", "--output", metavar="OUT", default="-", help="output filename (default: stdout)")
parser.add_argument("-i", "--input", metavar="IN", default="-", help="input filename (default: stdin)")
parser.add_argument("-a", "--asset", metavar="ASSETS", nargs="+", help="list of asset(s) to delete")
parser.add_argument("-p", "--pretty", action="store_true", default=False, help="pretty print output")
opts = parser.parse_args()
if opts.input == "-":
genesis = json.load(sys.stdin)
else:
with open(opts.input, "r") as f:
genesis = json.load(f)
if opts.asset is None:
opts.asset = []
rm_asset_set = set(opts.asset)
removed_asset_entries = {aname : 0 for aname in opts.asset}
new_initial_assets = []
for asset in genesis["initial_assets"]:
symbol = asset["symbol"]
if symbol not in rm_asset_set:
new_initial_assets.append(asset)
else:
removed_asset_entries[symbol] += 1
genesis["initial_assets"] = new_initial_assets
removed_balance_entries = {aname : [] for aname in opts.asset}
new_initial_balances = []
for balance in genesis["initial_balances"]:
symbol = balance["asset_symbol"]
if symbol not in rm_asset_set:
new_initial_balances.append(balance)
else:
removed_balance_entries[symbol].append(balance)
genesis["initial_balances"] = new_initial_balances
# TODO: Remove from initial_vesting_balances
for aname in opts.asset:
sys.stderr.write(
"Asset {sym} removed {acount} initial_assets, {bcount} initial_balances totaling {btotal}\n".format(
sym=aname,
acount=removed_asset_entries[aname],
bcount=len(removed_balance_entries[aname]),
btotal=sum(int(e["amount"]) for e in removed_balance_entries[aname]),
))
if opts.output == "-":
dump_json( genesis, sys.stdout, opts.pretty )
sys.stdout.flush()
else:
with open(opts.output, "w") as f:
dump_json( genesis, f, opts.pretty )
return
if __name__ == "__main__":
main()
| mit | Python |
|
6e43f611420068f0829fc64c1963ee51931b0099 | change name of data.py | 1orwell/yrs2013,1orwell/yrs2013 | node-interactions.py | node-interactions.py | import operator
from os import listdir
from os.path import isfile, join
import sys
def get_dict_of_all_contacts():
datapath = 'flu-data/moteFiles'
datafiles = [f for f in listdir(datapath) if isfile(join(datapath,f)) ]
dict_of_all_contacts = dict()
for datafile in datafiles:
node_contacts = dict()
f = open(join(datapath,datafile), 'r')
line = f.readline()
while line:
numlist = line.split()
if len(numlist) < 5:
continue
node = numlist[0]
time = int(numlist[-1])
if node not in node_contacts:
node_contacts[node] = time
line = f.readline()
nodename = datafile[5:]
dict_of_all_contacts[nodename] = node_contacts
f.close()
return dict_of_all_contacts
dict_of_all_contacts = get_dict_of_all_contacts()
node1 = dict_of_all_contacts['1']
infected = {}
for k, v in node1.iteritems():
infected[k] = v
final_infected = infected.copy()
for k,v in infected.iteritems():
current_node = dict_of_all_contacts[k]
for k, v in current_node.iteritems():
if k not in infected:
final_infected[k] = v
else:
if infected[k] > v:
final_infected[k] = v
print len(final_infected)
sorted_infected = sorted(final_infected.iteritems(), key=operator.itemgetter(1))
print sorted_infected
| mit | Python |
|
4b9925a429692175ad1e0a89859a67117cbba9fe | Create pirates_of_the_caribbean.py | CSavvy/python | extras/pirates_of_the_caribbean.py | extras/pirates_of_the_caribbean.py | #This makes the coding of the song easier
def note(n):
if n == 1:return 880
elif n == 2:return 987.77
elif n == 3:return 1046.5
elif n == 4:return 1174.66
elif n == 5:return 1318.51
elif n == 6:return 1396.91
elif n == 7:return 1567.98
elif n == 8:return 1760.00
elif n == 9:return 932.33
return 0
#This is the coding for the song
def Pirates( time, octave):
beep(time, note(1)*octave)
beep(time, note(3)*octave)
beep(time, note(4)*octave)
wait(time)
beep(time, note(4)*octave)
wait(time)
beep(time, note(4)*octave)
beep(time, note(5)*octave)
beep(time, note(6)*octave)
wait(time)
beep(time, note(6)*octave)
wait(time)
beep(time, note(6)*octave)
beep(time, note(7)*octave)
beep(time, note(5)*octave)
wait(time)
beep(time, note(5)*octave)
wait(time)
beep(time, note(4)*octave)
beep(time, note(3)*octave)
beep(time, note(3)*octave)
beep(time*2, note(4)*octave)
wait(time)
beep(time, note(1)*octave)
beep(time, note(3)*octave)
beep(time, note(4)*octave)
wait(time)
beep(time, note(4)*octave)
wait(time)
beep(time, note(4)*octave)
beep(time, note(5)*octave)
beep(time, note(6)*octave)
wait(time)
beep(time, note(6)*octave)
wait(time)
beep(time, note(6)*octave)
beep(time, note(7)*octave)
beep(time, note(5)*octave)
wait(time)
beep(time, note(5)*octave)
wait(time)
beep(time, note(4)*octave)
beep(time, note(3)*octave)
beep(time*2, note(4)*octave)
wait(time*2)
beep(time, note(1)*octave)
beep(time, note(3)*octave)
beep(time, note(4)*octave)
wait(time)
beep(time, note(4)*octave)
wait(time)
beep(time, note(4)*octave)
beep(time, note(6)*octave)
beep(time, note(7)*octave)
wait(time)
beep(time, note(7)*octave)
wait(time)
beep(time, note(7)*octave)
beep(time, note(1)*2*octave)
beep(time, note(9)*2*octave)
wait(time)
beep(time, note(9)*2*octave)
wait(time)
beep(time, note(1)*2*octave)
beep(time, note(7)*octave)
beep(time, note(1)*2*octave)
beep(time, note(4)*octave)
wait(time*2)
beep(time, note(4)*octave)
beep(time, note(5)*octave)
beep(time, note(6)*octave)
wait(time)
beep(time, note(6)*octave)
wait(time)
beep(time, note(7)*octave)
wait(time)
beep(time, note(1)*2*octave)
beep(time, note(4)*octave)
wait(time*2)
beep(time, note(4)*octave)
beep(time, note(6)*octave)
beep(time, note(5)*octave)
wait(time)
beep(time, note(5)*octave)
wait(time)
beep(time, note(6)*octave)
beep(time, note(4)*octave)
beep(time*3, note(5)*octave)
from Myro import *
init("sim")
#This is the song part of the program
Pirates(.15,1) #takes in duration of each note, and the octave to use
| mit | Python |
|
f8afd9d77a61f2baae15fec841817b0f97e573f9 | add redone twitterminal.py script | raehik/scripts,raehik/scripts | twitterminal.py | twitterminal.py | #!/usr/bin/env python3
#
# Tweet from the shell.
#
# Requires the following pip packages:
# * simplejson
# * twitter (NOT python-twitter, aka Python Twitter Tools)
#
import sys, os, argparse, subprocess, logging
# twitter requires a json module
# simplejson is updated more and may be faster
# see: http://stackoverflow.com/questions/712791
import simplejson
import twitter
class Twitterminal:
CREDS_FILE = os.getenv('HOME')+"/.twitterminal_creds"
APP_CREDS_FILE = os.getenv('HOME')+"/.twitterminal_appcreds"
ERR_ARGS = 1
ERR_OAUTH = 2
## CLI-related {{{
def __init_logging(self):
self.logger = logging.getLogger(os.path.basename(sys.argv[0]))
lh = logging.StreamHandler()
lh.setFormatter(logging.Formatter("%(name)s: %(levelname)s: %(message)s"))
self.logger.addHandler(lh)
def __parse_args(self):
self.parser = argparse.ArgumentParser(description="Tweet from the shell.")
self.parser.add_argument("-v", "--verbose", help="be verbose", action="count", default=0)
self.parser.add_argument("-q", "--quiet", help="be quiet (overrides -v)", action="count", default=0)
self.parser.add_argument("message", help="text to tweet")
self.args = self.parser.parse_args()
if self.args.verbose == 0:
self.logger.setLevel(logging.INFO)
elif self.args.verbose >= 1:
self.logger.setLevel(logging.DEBUG)
if self.args.quiet >= 1:
self.logger.setLevel(logging.NOTSET)
if len(self.args.message) == 0:
exit("message needs to be longer than 0 characters", ERR_ARGS)
def run(self):
"""Run from CLI: parse arguments, try to tweet."""
self.__init_logging()
self.__parse_args()
self.tweet(self.args.message)
## }}}
def __init__(self):
self.__init_client()
def exit(self, msg, ret):
"""Exit with explanation."""
self.logger.error(msg)
sys.exit(ret)
def get_shell(self, args):
"""Run a shell command and return the exit code."""
return subprocess.run(args).returncode
def __init_client(self):
"""Initialise the Twitter client."""
# get application OAuth tokens
with open(Twitterminal.APP_CREDS_FILE) as f:
api_tokens = [line.strip() for line in f]
if len(api_tokens) != 2:
exit("app creds key incorrectly formatted", ERR_OAUTH)
# get consumer OAuth tokens
# TODO: the oauth dance if required
#twitter.oauth_dance("twitterminal.py", api_tokens[0], api_tokens[1], Twitterminal.CREDS_FILE)
oauth_token, oauth_secret = twitter.read_token_file(Twitterminal.CREDS_FILE)
self.client = twitter.Twitter(auth=twitter.OAuth(oauth_token,
oauth_secret, api_tokens[0], api_tokens[1]))
def tweet(self, msg):
"""Tweet a message."""
self.client.statuses.update(status=msg)
if __name__ == "__main__":
twat = Twitterminal()
twat.run()
| mit | Python |
|
0c6becaa179aba9408def1b3cce61d5ec1509942 | Load the simul module and run a simulation | cphyc/MHD_simulation,cphyc/MHD_simulation | python/main.py | python/main.py | from simul import *
if __name__ == '__main__':
# create a new simulation
s = Simulation(Re=5)
# initial conditions psi(0) = 0, Omega(0) = 0
s.psi.initial("null")
s.omega.initial("null")
# T_n(t=0) = sin(pi*k*dz) & T_0(t=0) = 1-k*dz
s.T.initial(lambda n, k: T_0(n,k,s))
# main loop over time
while s.step():
s.T.step()
s.psi.step()
s.omega.step()
del s
| apache-2.0 | Python |
|
90169095a9e1adbc23e1efa35ea0e1a9a09259de | Solve Code Fights sortByHeight problem | HKuz/Test_Code | Problems/sortByHeight.py | Problems/sortByHeight.py | #!/usr/local/bin/python
# Code Fights Arcade Mode
def sortByHeight(a):
trees = [i for i, t in enumerate(a) if t == -1]
humans = sorted([h for h in a if h != -1])
for tree in trees:
humans.insert(tree, -1)
return humans
def main():
a = [-1, 150, 190, 170, -1, -1, 160, 180]
new = sortByHeight(a)
print(new)
if __name__ == '__main__':
main()
| mit | Python |
|
5820a2b6130ea7be9eb86341aa6b3b69861a9a36 | Create example.py | david-shu/lxml-mate | example.py | example.py |
from lxmlmate import ObjectifiedElementProxy
print("#To create a brand new xml:")
p = ObjectifiedElementProxy( rootag='Person' )
p.name = 'peter'
p.age = 13
print( p )
print('''
##<Person>
## <name>peter</name>
## <age>13</age>
##</Person>
''')
print('===================')
print( p.name )
print('''
##<name>peter</name>
''')
print('===================')
#To retrieve peter's name and age:
peter = p.name.pyval
age = p.age.pyval
print('#To create from xml string:')
p = ObjectifiedElementProxy( xmlStr="<Person><name>peter</name><age>13</age></Person>" )
print( p )
print('''
##<Person>
## <name>peter</name>
## <age>13</age>
##</Person>
''')
#Multiple levels' example:
r = ObjectifiedElementProxy()
r.person.name = 'jack'
r.person.age = 10
print('===================')
print('''#To insert descedants like '<person><name>peter</name><age>13</age></person>':''')
r.insert( 'person' )('name','peter')('age',13)
p = r('person').person[-1]
p.name = 'david'
p.age = 16
print( r )
print('''
##<root>
## <person>
## <name>jack</name>
## <age>10</age>
## </person>
## <person>
## <name>peter</name>
## <age>13</age>
## </person>
## <person>
## <name>david</name>
## <age>16</age>
## </person>
##</root>
''')
print('===================')
print( r.person[1].name.pyval )
print('##peter')
##To retrieve the last person:
r.person[-1]
##To insert a new tag with attrib:
r.insert( 'person', attrib={ 'height' : "185cm" } )
##To modify a tag's attrib:
r.person[0].attrib['height'] = "170cm"
##You can use lxml.ObjectifiedElement's methods directly like this:
r.addattr( 'kkk','vvv' )
##To modify tag:
r.person[-1].tag = 'person_new'
print('===================')
print( r.person[-1] )
print('''
##<person_new>
## <name>david</name>
## <age>16</age>
##</person_new>
''')
print('===================')
print('#To insert a new tag with attrib:')
r.insert( 'person', attrib={ 'height':'185cm'} )("name","joe")
print( r.person[-1] )
print('''
##<person height="185cm">
## <name>joe</name>
##</person>
''')
##To dump to xml document:
r.dump( 'person.xml' )
| mit | Python |
|
1a97d686ed5afd9a97083bc09f6c4bfb4ef124fc | Add quick helpers to get a client | ryansb/zaqar-webscraper-demo | helpers.py | helpers.py | from zaqarclient.queues import client
import os
conf = {
'auth_opts': {
'backend': 'keystone',
'options': {
'os_username': os.environ.get('OS_USERNAME'),
'os_password': os.environ.get('OS_PASSWORD'),
'os_project_name': os.environ.get('OS_PROJECT_NAME', 'admin'),
'os_auth_url': os.environ.get('OS_AUTH_URL') + '/v2.0/',
'insecure': '',
},
},
}
client = client.Client(url='http://192.168.122.58:8888', version=2, conf=conf)
| mit | Python |
|
7aee3720617aa3442245e2d0bf3de7393e4acb01 | Add lc0133_clone_graph.py | bowen0701/algorithms_data_structures | lc0133_clone_graph.py | lc0133_clone_graph.py | """Leetcode 133. Clone Graph
Medium
URL: https://leetcode.com/problems/clone-graph/
Given a reference of a node in a connected undirected graph, return a deep copy
(clone) of the graph. Each node in the graph contains a val (int) and a list
(List[Node]) of its neighbors.
Example:
Input:
{"$id":"1","neighbors":[{"$id":"2","neighbors":[{"$ref":"1"},
{"$id":"3","neighbors":[{"$ref":"2"},{"$id":"4","neighbors":[{"$ref":"3"},
{"$ref":"1"}],"val":4}],"val":3}],"val":2},{"$ref":"4"}],"val":1}
Explanation:
Node 1's value is 1, and it has two neighbors: Node 2 and 4.
Node 2's value is 2, and it has two neighbors: Node 1 and 3.
Node 3's value is 3, and it has two neighbors: Node 2 and 4.
Node 4's value is 4, and it has two neighbors: Node 1 and 3.
Note:
- The number of nodes will be between 1 and 100.
- The undirected graph is a simple graph, which means no repeated edges and no
self-loops in the graph.
- Since the graph is undirected, if node p has node q as neighbor, then node q
must have node p as neighbor too.
- You must return the copy of the given node as a reference to the cloned graph.
"""
# Definition for a Node.
class Node(object):
def __init__(self, val, neighbors):
self.val = val
self.neighbors = neighbors
class Solution(object):
def cloneGraph(self, node):
"""
:type node: Node
:rtype: Node
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
|
ef63c538aff066230030aaf02981933b652830e4 | Create module_posti.py | rnyberg/pyfibot,rnyberg/pyfibot | pyfibot/modules/module_posti.py | pyfibot/modules/module_posti.py | # -*- encoding: utf-8 -*-
"""
Get package tracking information from the Finnish postal service
"""
from __future__ import unicode_literals, print_function, division
from bs4 import BeautifulSoup
import requests
from datetime import datetime, timedelta
lang = 'en'
def command_posti(bot, user, channel, args):
"""Parse the package status page"""
args = args.strip()
if not args:
return bot.say(channel, 'Need a tracking ID as argument.')
url = 'http://www.itella.fi/itemtracking/itella/search_by_shipment_id'
params = {
'ShipmentId': args,
'lang': lang,
'LOTUS_hae': 'Hae',
'LOTUS_side': '1'
}
r = requests.post(url, params=params)
bs = BeautifulSoup(r.content)
try:
status_table = bs.find('table', {'id': 'shipment-event-table'}).find_all('tr')[1]
except:
if lang == 'en':
return bot.say(channel, 'Item not found.')
return bot.say(channel, 'Lähetystä ei löytynyt.')
try:
event = status_table.find('div', {'class': 'shipment-event-table-header'}).text.strip()
except:
event = '???'
location = '???'
dt = timedelta(0, 0, 0)
now = datetime.now()
for x in status_table.find_all('div', {'class': 'shipment-event-table-row'}):
try:
row_label = x.find('span', {'class': 'shipment-event-table-label'}).text.strip()
row_data = x.find('span', {'class': 'shipment-event-table-data'}).text.strip()
except:
continue
if lang == 'en':
if row_label == 'Registration:':
dt = now - datetime.strptime(row_data, '%d.%m.%Y %H:%M:%S')
if row_label == 'Location:':
location = row_data
else:
if row_label == 'Rekisteröinti:':
dt = now - datetime.strptime(row_data, '%d.%m.%Y klo %H:%M:%S')
if row_label == 'Paikka:':
location = row_data
agestr = []
if dt.days > 0:
agestr.append('%dd' % dt.days)
secs = dt.seconds
hours, minutes, seconds = secs // 3600, secs // 60 % 60, secs % 60
if hours > 0:
agestr.append('%dh' % hours)
if minutes > 0:
agestr.append('%dm' % minutes)
if lang == 'en':
return bot.say(channel, '%s - %s - %s' % (' '.join(agestr) + ' ago', event, location))
return bot.say(channel, '%s - %s - %s' % (' '.join(agestr) + ' sitten', event, location))
| bsd-3-clause | Python |
|
fbfdc979b5fbb7534a625db390b92856714dcfe1 | add basic tests for model_utils | jklenzing/pysat,rstoneback/pysat | pysat/tests/test_model_utils.py | pysat/tests/test_model_utils.py | import numpy as np
import sys
from nose.tools import assert_raises, raises
import pandas as pds
import pysat
from pysat import model_utils as mu
class TestBasics():
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument(platform='pysat',
name='testing',
clean_level='clean')
self.start = pysat.datetime(2009, 1, 1)
self.stop = pysat.datetime(2009, 1, 1)
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.start, self.stop
@raises(ValueError)
def test_collect_inst_model_pairs_wo_date(self):
"""Try to run without start or stop dates"""
match = mu.collect_inst_model_pairs(inst=self.testInst)
@raises(ValueError)
def test_collect_inst_model_pairs_wo_inst(self):
"""Try to run without an instrument"""
match = mu.collect_inst_model_pairs(start=self.start, stop=self.stop)
@raises(ValueError)
def test_collect_inst_model_pairs_wo_model(self):
"""Try to run without a model"""
match = mu.collect_inst_model_pairs(start=self.start, stop=self.stop,
inst=self.testInst)
| bsd-3-clause | Python |
|
6cd8b4c733de5a4ed39e3d3ba3d06e78b04dbb4b | read a value from a file that is in ConfigObj format - no section check | matplo/rootutils,matplo/rootutils | python/2.7/read_config_value.py | python/2.7/read_config_value.py | #!/usr/bin/env python
from configobj import ConfigObj
import argparse
import os
import sys
def read_config(fname, skey):
config = ConfigObj(fname, raise_errors=True)
return config[skey]
def main():
parser = argparse.ArgumentParser(description='read a value from a ConfigObj file', prog=os.path.basename(__file__))
parser.add_argument('-f', '--file', help='input file', type=str)
parser.add_argument('-k', '--key', help='key to read', type=str)
args = parser.parse_args()
if args.file is None:
parser.print_usage()
return
if args.key is None:
parser.print_usage()
return
if os.path.isfile(args.file):
try:
value = read_config(args.file, args.key)
print value
except:
print >> sys.stderr, '[e] unable to read key:', args.key
return
else:
print >> sys.stderr, '[e] unable to access file:', args.file
if __name__ == '__main__':
main()
| mit | Python |
|
837d1f26ad339fbe4338ef69c947f83042daba9f | add prelim script for looking at incident data | garnertb/fire-risk,garnertb/fire-risk,FireCARES/fire-risk,FireCARES/fire-risk | Scripts/fire_incident.py | Scripts/fire_incident.py | #Weinschenk
#12-14
from __future__ import division
import numpy as np
import pandas as pd
from pylab import *
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
incident = pd.read_csv('../Data/arlington_incidents.csv', header=0)
total_incidents = len(incident['incident_class_code'])
total_fires = 0
for i in incident['incident_class_code']:
if i == 1:
total_fires = total_fires + 1
print 100*(total_fires/total_incidents)
| mit | Python |
|
ab00f54344e4aa39503a59551e87db2ed4be9c3d | Create print_rectangle.py | set0s/learn-programming | python3/print_rectangle.py | python3/print_rectangle.py | while 1:
m, n = input().split()# m:height, n:width
if m == "0" and n == "0":
breaku
for i in range(int(m)):
print("#" * int(n))
print()
| mit | Python |
|
989a94c81f74a17707e66f126960b6bb45e9b4d5 | Add index to cover testgroup_details (previous runs) | dropbox/changes,bowlofstew/changes,dropbox/changes,dropbox/changes,bowlofstew/changes,bowlofstew/changes,wfxiang08/changes,bowlofstew/changes,wfxiang08/changes,wfxiang08/changes,wfxiang08/changes,dropbox/changes | migrations/versions/3042d0ca43bf_index_job_project_id.py | migrations/versions/3042d0ca43bf_index_job_project_id.py | """Index Job(project_id, status, date_created) where patch_id IS NULL
Revision ID: 3042d0ca43bf
Revises: 3a3366fb7822
Create Date: 2014-01-03 15:24:39.947813
"""
# revision identifiers, used by Alembic.
revision = '3042d0ca43bf'
down_revision = '3a3366fb7822'
from alembic import op
def upgrade():
op.execute('CREATE INDEX idx_job_previous_runs ON job (project_id, status, date_created) WHERE patch_id IS NULL')
def downgrade():
op.drop_index('idx_job_previous_runs', 'job')
| apache-2.0 | Python |
|
b96f39b3527cef7fd9766315fbdf7b87b6315ec8 | add watch file which generated by scratch | rli9/slam,rli9/slam,rli9/slam,rli9/slam | src/car_control_manual/scratch/watch_file.py | src/car_control_manual/scratch/watch_file.py | from __future__ import print_function
"""Watch File generated by Scratch
1. save Scratch file *.sb2 into the same directory or specify with path
2. change name *.sb2 to *.zip
3. unzip *.zip file and read json data from project.json
"""
import sys, time, logging, os, zipfile
import watchdog
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
class MyFileMonitor(watchdog.events.FileSystemEventHandler):
def __init__(self, suffix, callback):
super(MyFileMonitor, self).__init__()
self.callback = callback
if suffix.startswith('.'):
self.suffix = suffix[1:]
else:
self.suffix = suffix
def on_created(self, event):
super(MyFileMonitor, self).on_created(event)
n_suffix = event.src_path.split('.')[-1]
if not event.is_directory and n_suffix == self.suffix:
# when detected file created which we need , use callback to deal with
self.callback(event.src_path)
class WatchFile(object):
def __init__(self, *argv, **kargv):
self.path = kargv['path'] if kargv.has_key('path') else '.'
self.suffix = kargv['suffix'] if kargv.has_key('suffix') else '*' # star represent any file
self.observer = Observer()
self.event_handler = MyFileMonitor(self.suffix, callback=self.get_data)
def run(self):
self.observer.schedule(self.event_handler, self.path, recursive=True)
self.observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
self.observer.stop()
self.observer.join()
def get_data(self, filename):
return self._unpack(filename)
def _unpack(self, filename):
# first rename suffix to zip file
# may not work on linux
new_name = filename.split('.')[1] + '.zip'
new_name = new_name[1:] if new_name.startswith('\\') else new_name
os.rename(filename, new_name)
zip_file = zipfile.ZipFile(new_name, 'r')
json_data = ""
for name in zip_file.namelist():
if name == "project.json":
file = open(name, 'r')
json_data = "".join(file.readlines())
return json_data
if __name__ == "__main__":
wd = WatchFile(suffix=".sb2")
wd.run()
| mit | Python |
|
46b3c0c024dd0d8dbb80911d04848571b3176be7 | add yaml config reader | cure76/misc | config.py | config.py | # -*- coding: utf-8 -*-
import (os, sys, yaml)
class Settings(dict):
''' base settings class '''
def __init__( self, data = None ):
super( Settings, self ).__init__()
if data:
self.__update( data, {} )
def __update( self, data, did ):
dataid = id(data)
did[ dataid ] = self
for k in data:
dkid = id(data[k])
if did.has_key(dkid):
self[k] = did[dkid]
elif isinstance( data[k], Settings ):
self[k] = data[k]
elif isinstance( data[k], dict ):
obj = Settings()
obj.__update( data[k], did )
self[k] = obj
obj = None
else:
self[k] = data[k]
def __getitem__(self, item):
return self.__getattr__(item)
def __getattr__( self, key ):
return self.get( key, None )
def __setattr__( self, key, value ):
if isinstance(value,dict):
self[key] = Settings( value )
else:
self[key] = value
def update( self, *args ):
for obj in args:
for k in obj:
if isinstance(obj[k],dict):
self[k] = Settings( obj[k] )
else:
self[k] = obj[k]
return self
def merge( self, *args ):
for obj in args:
for k in obj:
if self.has_key(k):
if isinstance(self[k],list) and isinstance(obj[k],list):
self[k] += obj[k]
elif isinstance(self[k],list):
self[k].append( obj[k] )
elif isinstance(obj[k],list):
self[k] = [self[k]] + obj[k]
elif isinstance(self[k],Settings) and isinstance(obj[k],Settings):
self[k].merge( obj[k] )
elif isinstance(self[k],Settings) and isinstance(obj[k],dict):
self[k].merge( obj[k] )
else:
self[k] = [ self[k], obj[k] ]
else:
if isinstance(obj[k],dict):
self[k] = Settings( obj[k] )
else:
self[k] = obj[k]
return self
def load(config_file):
''' load data from yaml file '''
with open(config_file) as fd:
data = yaml.load(fd.read()) or dict()
return Settings(data)
if __name__ == '__main__':
''' '''
settings = load('./config.yaml')
| bsd-3-clause | Python |
|
8b9fe74976d77df32d73792f74ef4ddea1eb525f | Add Config.get() to skip KeyErrors | royrapoport/destalinator,TheConnMan/destalinator,royrapoport/destalinator,underarmour/destalinator,randsleadershipslack/destalinator,randsleadershipslack/destalinator,TheConnMan/destalinator | config.py | config.py | #! /usr/bin/env python
import os
import warnings
import yaml
class Config(object):
config_fname = "configuration.yaml"
def __init__(self, config_fname=None):
config_fname = config_fname or self.config_fname
fo = open(config_fname, "r")
blob = fo.read()
fo.close()
self.config = yaml.load(blob)
def __getattr__(self, attrname):
if attrname == "slack_name":
warnings.warn("The `slack_name` key in %s is deprecated in favor of the `SLACK_NAME` environment variable" %
self.config_fname, DeprecationWarning)
return self.config[attrname]
def get(self, attrname, fallback=None):
try:
return self.config[attrname]
except KeyError:
return fallback
# This deliberately isn't a `getenv` default so `.slack_name` isn't tried if there's a SLACK_NAME
SLACK_NAME = os.getenv("SLACK_NAME")
if SLACK_NAME is None:
SLACK_NAME = Config().slack_name
| #! /usr/bin/env python
import os
import warnings
import yaml
class Config(object):
config_fname = "configuration.yaml"
def __init__(self, config_fname=None):
config_fname = config_fname or self.config_fname
fo = open(config_fname, "r")
blob = fo.read()
fo.close()
self.config = yaml.load(blob)
def __getattr__(self, attrname):
if attrname == "slack_name":
warnings.warn("The `slack_name` key in %s is deprecated in favor of the `SLACK_NAME` environment variable" %
self.config_fname, DeprecationWarning)
return self.config[attrname]
# This deliberately isn't a `getenv` default so `.slack_name` isn't tried if there's a SLACK_NAME
SLACK_NAME = os.getenv("SLACK_NAME")
if SLACK_NAME is None:
SLACK_NAME = Config().slack_name
| apache-2.0 | Python |
7dde102dd51db08f9021234fa3d8f11ab165b210 | add custom_preprocess.py | JasonWayne/avazu-nn | src/custom_preprocess.py | src/custom_preprocess.py | import unittest
import csv
from datetime import datetime, timedelta
def load_raw_data_and_split_by_dt(path, output_dir):
base_datetime = datetime.strptime('141021', '%y%m%d')
output_file_dict = {(base_datetime + timedelta(days=x)).strftime('%y%m%d'): open(
output_dir + '/' + (base_datetime + timedelta(days=x)).strftime('%y%m%d') + '.csv', 'w') for x in range(0, 10)}
with open(path, 'rb') as csvfile:
header = csvfile.readline()
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
hour_column = row[2]
dt = hour_column[:6]
hour = hour_column[6:]
output_file_dict[dt].write(",".join(row[:2] + [hour] + row[3:]) + "\n")
class TestCustomPreprocess(unittest.TestCase):
def test_load_raw_data_and_split_by_dt(self):
load_raw_data_and_split_by_dt('../fixtures/train.thumb', '../fixtures')
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
1f5134b36846cf0e5e936888a4fe51a2012e0d78 | Create alternate_disjoint_set.py (#2302) | TheAlgorithms/Python | data_structures/disjoint_set/alternate_disjoint_set.py | data_structures/disjoint_set/alternate_disjoint_set.py | """
Implements a disjoint set using Lists and some added heuristics for efficiency
Union by Rank Heuristic and Path Compression
"""
class DisjointSet:
def __init__(self, set_counts: list) -> None:
"""
Initialize with a list of the number of items in each set
and with rank = 1 for each set
"""
self.set_counts = set_counts
self.max_set = max(set_counts)
num_sets = len(set_counts)
self.ranks = [1] * num_sets
self.parents = list(range(num_sets))
def merge(self, src: int, dst: int) -> bool:
"""
Merge two sets together using Union by rank heuristic
Return True if successful
Merge two disjoint sets
>>> A = DisjointSet([1, 1, 1])
>>> A.merge(1, 2)
True
>>> A.merge(0, 2)
True
>>> A.merge(0, 1)
False
"""
src_parent = self.get_parent(src)
dst_parent = self.get_parent(dst)
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
self.set_counts[src_parent] = 0
self.parents[src_parent] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
joined_set_size = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
self.set_counts[dst_parent] = 0
self.parents[dst_parent] = src_parent
joined_set_size = self.set_counts[src_parent]
self.max_set = max(self.max_set, joined_set_size)
return True
def get_parent(self, disj_set: int) -> int:
"""
Find the Parent of a given set
>>> A = DisjointSet([1, 1, 1])
>>> A.merge(1, 2)
True
>>> A.get_parent(0)
0
>>> A.get_parent(1)
2
"""
if self.parents[disj_set] == disj_set:
return disj_set
self.parents[disj_set] = self.get_parent(self.parents[disj_set])
return self.parents[disj_set]
| mit | Python |
|
4c53ffbd9b23238b3402752f33fcabb2724921f4 | Add dunder init for lowlevel. | python-astrodynamics/astrodynamics,python-astrodynamics/astrodynamics | astrodynamics/lowlevel/__init__.py | astrodynamics/lowlevel/__init__.py | # coding: utf-8
from __future__ import absolute_import, division, print_function
| mit | Python |
|
ae79ca36e3cfca362414f2293a4c6d295c6db38b | Create addroundkey.py | deekshadangwal/PyRTL,UCSBarchlab/PyRTL,nvandervoort/PyRTL,nvandervoort/PyRTL,deekshadangwal/PyRTL,UCSBarchlab/PyRTL | research/aes/addroundkey.py | research/aes/addroundkey.py | import sys
sys.path.append("../..")
import pyrtl
from pyrtl import *
import keyexpansion
from keyexpansion import *
""" AddRoundKey round of AES.
Input: 128-bit state array.
Output: 128-bit state array.
"""
def addroundkey_initial(state, expanded_key):
input_wire_1 = pyrtl.WireVector(bitwidth=128, name='input_wire_1')
input_wire_1 <<= state
new_1 = pyrtl.WireVector(bitwidth=128, name='new_1')
new_1 <<= state ^ expanded_key[1280:1408]
return new_1
def addroundkey_1(state, expanded_key):
input_wire_2 = pyrtl.WireVector(bitwidth=128, name='input_wire_2')
input_wire_2 <<= state
new_2 = pyrtl.WireVector(bitwidth=128, name='new_2')
new_2 <<= state ^ expanded_key[1152:1280]
return new_2
def addroundkey_2(state, expanded_key):
input_wire_3 = pyrtl.WireVector(bitwidth=128, name='input_wire_3')
input_wire_3 <<= state
new_3 = pyrtl.WireVector(bitwidth=128, name='new_3')
new_3 <<= state ^ expanded_key[1024:1152]
return new_3
def addroundkey_3(state, expanded_key):
input_wire_4 = pyrtl.WireVector(bitwidth=128, name='input_wire_4')
input_wire_4 <<= state
new_4 = pyrtl.WireVector(bitwidth=128, name='new_4')
new_4 <<= state ^ expanded_key[896:1024]
return new_4
def addroundkey_4(state, expanded_key):
input_wire_5 = pyrtl.WireVector(bitwidth=128, name='input_wire_5')
input_wire_5 <<= state
new_5 = pyrtl.WireVector(bitwidth=128, name='new_5')
new_5 <<= state ^ expanded_key[768:896]
return new_5
def addroundkey_5(state, expanded_key):
input_wire_6 = pyrtl.WireVector(bitwidth=128, name='input_wire_6')
input_wire_6 <<= state
new_6 = pyrtl.WireVector(bitwidth=128, name='new_6')
new_6 <<= state ^ expanded_key[640:768]
return new_6
def addroundkey_6(state, expanded_key):
input_wire_7 = pyrtl.WireVector(bitwidth=128, name='input_wire_7')
input_wire_7 <<= state
new_7 = pyrtl.WireVector(bitwidth=128, name='new_7')
new_7 <<= state ^ expanded_key[512:640]
return new_7
def addroundkey_7(state, expanded_key):
input_wire_8 = pyrtl.WireVector(bitwidth=128, name='input_wire_8')
input_wire_8 <<= state
new_8 = pyrtl.WireVector(bitwidth=128, name='new_8')
new_8 <<= state ^ expanded_key[384:512]
return new_8
def addroundkey_8(state, expanded_key):
input_wire_9 = pyrtl.WireVector(bitwidth=128, name='input_wire_9')
input_wire_9 <<= state
new_9 = pyrtl.WireVector(bitwidth=128, name='new_9')
new_9 <<= state ^ expanded_key[256:384]
return new_9
def addroundkey_9(state, expanded_key):
input_wire_10 = pyrtl.WireVector(bitwidth=128, name='input_wire_10')
input_wire_10 <<= state
new_10 = pyrtl.WireVector(bitwidth=128, name='new_10')
new_10 <<= state ^ expanded_key[128:256]
return new_10
def addroundkey_10(state, expanded_key):
input_wire_11 = pyrtl.WireVector(bitwidth=128, name='input_wire_11')
input_wire_11 <<= state
new_11 = pyrtl.WireVector(bitwidth=128, name='new_11')
new_11 <<= state ^ expanded_key[0:128]
return new_11
# Hardware build.
aes_input = pyrtl.Input(bitwidth=128, name='aes_input')
aes_output = pyrtl.Output(bitwidth=128, name='aes_output')
aes_output <<= addroundkey_x(aes_input)
print pyrtl.working_block()
print
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
for cycle in range(1):
sim.step({aes_input: 0xff})
sim_trace.render_trace(symbol_len=5, segment_size=5)
| bsd-3-clause | Python |
|
94f922c77ee89a5b54b99e135a5045f450badb0e | add new script to dump nice looking release notes like. Borrowed from antlr. | parrt/intellij-plugin-v4,antlr/intellij-plugin-v4,antlr/intellij-plugin-v4,parrt/intellij-plugin-v4 | scripts/github_release_notes.py | scripts/github_release_notes.py | # Get github issues / PR for a release
# Exec with "python github_release_notes.py YOUR_GITHUB_API_ACCESS_TOKEN 1.19"
import sys
from collections import Counter
from github import Github
TOKEN=sys.argv[1]
MILESTONE=sys.argv[2]
g = Github(login_or_token=TOKEN)
# Then play with your Github objects:
org = g.get_organization("antlr")
repo = org.get_repo("intellij-plugin-v4")
milestone = [x for x in repo.get_milestones() if x.title==MILESTONE]
milestone = milestone[0]
issues = repo.get_issues(state="closed", milestone=milestone, sort="created", direction="desc")
# dump bugs fixed
print()
print("## Issues fixed")
for x in issues:
labels = [l.name for l in x.labels]
if x.pull_request is None and not ("type:improvement" in labels or "type:feature" in labels):
print("* [%s](%s) (%s)" % (x.title, x.html_url, ", ".join([l.name for l in x.labels])))
# dump improvements closed for this release (issues or pulls)
print()
print("## Improvements, features")
for x in issues:
labels = [l.name for l in x.labels]
if ("type:enhancement" in labels or "type:feature" in labels):
print("* [%s](%s) (%s)" % (x.title, x.html_url, ", ".join(labels)))
# dump PRs closed for this release
print()
print("## Pull requests")
for x in issues:
labels = [l.name for l in x.labels]
if x.pull_request is not None:
print("* [%s](%s) (%s)" % (x.title, x.html_url, ", ".join(labels)))
# dump contributors
print()
print("## Contributors")
user_counts = Counter([x.user.login for x in issues])
users = {x.user.login:x.user for x in issues}
for login,count in user_counts.most_common(10000):
name = users[login].name
logins = f" ({users[login].login})"
if name is None:
name = users[login].login
logins = ""
print(f"* {count:3d} items: [{name}]({users[login].html_url}){logins}")
| bsd-3-clause | Python |
|
fe08ce77958c637539b24817ffca45587fa31a7e | Implement shared API | platformio/platformio-core,platformio/platformio-core | platformio/shared.py | platformio/shared.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-import
from platformio.device.filters.base import DeviceMonitorFilterBase
from platformio.device.list import list_serial_ports
from platformio.fs import to_unix_path
from platformio.platform.base import PlatformBase
from platformio.project.config import ProjectConfig
from platformio.project.helpers import load_build_metadata
from platformio.test.result import TestCase, TestCaseSource, TestStatus
from platformio.test.runners.base import TestRunnerBase
from platformio.test.runners.doctest import DoctestTestCaseParser
from platformio.test.runners.googletest import GoogletestTestRunner
from platformio.test.runners.unity import UnityTestRunner
from platformio.util import get_systype
| apache-2.0 | Python |
|
42297354f575e2c82346cf033202c5dfad5ddd99 | Add python class for writing out xyz files of trajectory coordinates | westpa/westpa | lib/examples/nacl_amb/utils.py | lib/examples/nacl_amb/utils.py | #!/usr/bin/env python
import numpy
class TrajWriter(object):
'''
A class for writing out trajectory traces as an xyz file, for subsequent
visualization.
'''
def __init__(self, trace, w, filename='trace.xyz'):
self.trace = trace
self.w = w
self.filename = filename
self._write()
def _get_coords(self, iteration, seg_id):
self.w.iteration = iteration
coords = self.w.current.auxdata['coord'][seg_id]
return coords
def _write(self):
all_coords = []
starting_iteration = self.w.iteration
for i, iteration in enumerate(self.trace.iteration):
seg_id = self.trace.seg_id[i]
coords = self._get_coords(iteration, seg_id)
# The last timepoint of one iteration is the same as the first
# timepoint of the last, so skip the last timepoint of each
# iteration
coords = coords[:-1]
all_coords.append(coords)
self.w.iteration = starting_iteration
all_coords = numpy.concatenate(all_coords)
with open(self.filename, 'w') as outfile:
for i, frame in enumerate(all_coords):
outfile.write("2\n")
outfile.write("{0}\n".format(i))
outfile.write("SOD {0:9.5f} {1:9.5f} {2:9.5f}\n".format(
float(frame[0,0]), float(frame[0,1]), float(frame[0,2])))
outfile.write("CLA {0:9.5f} {1:9.5f} {2:9.5f}\n".format(
float(frame[1,0]), float(frame[1,1]), float(frame[1,2])))
| mit | Python |
|
d5125205801b9771115a052162ee700f64601557 | Create frequency.py | nlpub/russe-evaluation,nlpub/russe-evaluation,nlpub/russe-evaluation | frequency.py | frequency.py | import sys
import csv
csv.field_size_limit(sys.maxsize)
from pymystem3 import Mystem
import time
import cProfile
from collections import defaultdict
class CsvHandler:
INPUTFILE = 'wiki_noxml_full.txt'
OUTPUTFILE = 'my_frequency_list.csv'
def __init__(self):
self.file_name = self.INPUTFILE
self.csvlength = 0
self.lemmatiser = Mystem()
#self.freq_dict = {}
self.fd = defaultdict(dict)
def do_cprofile(func):
def profiled_func(*args, **kwargs):
profile = cProfile.Profile()
try:
profile.enable()
result = func(*args, **kwargs)
profile.disable()
return result
finally:
profile.print_stats()
return profiled_func
def get_freq_dict(self, filename):
t0 = time.time()
print("Start freq dict")
counter = 0
with open(filename, 'r') as csvfile:
datareader = csv.reader(csvfile, delimiter='\t')
for ln, row in enumerate(datareader):
if ln % 100 == 0: print(ln, "articles processed")
input_text = row[2]
counter += 1
#if counter > 10:
#break
lemmas = self.get_lem_set(input_text)
for i,li in enumerate(lemmas):
self.fd[li] = 1 if li not in self.fd else self.fd[li] + 1
t1 = time.time()
for a,b in self.fd.items():
print(a,b)
print("Finished. Get input file processing time %2.2f secs, whoosh !" % (t1 - t0))
def get_lem_set(self, text):
return_set = set()
for el in self.lemmatiser.analyze(text):
analysis = el.get('analysis', None)
if analysis:
POS = ['A=', 'S,', 'V=']
if (analysis[0].get('gr')[0:2] in POS) and (len(analysis[0].get('lex'))>1):
return_set.add(analysis[0].get('lex'))
return return_set
def output_dict(self, filename, output_dictionary, threshold):
t0 = time.time()
with open(filename, 'w', newline='', encoding="UTF-8") as csv_file:
csv_writer = csv.writer(csv_file, dialect='excel')
csv_writer.writerow(["First word", "Second word", "Frequency"])
for key in output_dictionary.keys():
if output_dictionary[key] > threshold:
words = key.split(':::')
first_word = words[0]
second_word = words[1]
csv_writer.writerow([
first_word,
second_word,
output_dictionary[key]
])
csv_file.flush()
csv_file.close()
t1 = time.time()
print("Finished. Get output file processing time %2.2f secs, whoosh !" % (t1 - t0))
def process(self):
self.get_freq_dict(self.file_name)
#if self.freq_dict:
#t0 = time.time()
#sorted_dict = self.sort_dict()
#t1 = time.time()
#print("Finished. Sorting - processing time %2.2f secs, whoosh !" % (t1 - t0))
#self.output_dict(self.OUTPUTFILE, sorted_dict, 2)
#self.output_dict(self.OUTPUTFILE, self.freq_dict, 2)
if __name__ == '__main__':
print("Start")
c = CsvHandler()
t0 = time.time()
c.process()
t1 = time.time()
print("Finished. Total processing time %2.2f secs, whoosh !" % (t1 - t0))
| mit | Python |
|
0c719d59b6155ed50692810fab57814370fde1bb | Create fcp_xml2csv.py | Kevo89/OpenPeelTools | fcp_xml2csv.py | fcp_xml2csv.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
############################################
# FoodCheckPeel XML2CSV Converter
#
# This script converts the FoodCheckPeel XML
# file to the Comma Separated Values file type
# so it's easier to import to common spreadsheet
# applications.
# Based on the script posted here:
# http://www.bearpanther.com/2013/09/14/converting-xml-to-csv-with-python/
#
# Programmed with Python 2.7 + lxml 3.2.4
# Kevin Farrugia
############################################
__author__ = "Kevin Farrugia"
__copyright__ = "(c) 2014 Kevin Farrugia"
__license__ = "MIT"
__credits__ = "Derek Swingley (author of Bear Panther)"
import csv
from lxml import etree
print "Welcome to the FoodCheck Peel XML to CSV Conversion utility!\n"
print "============================================================\n"
print "\tPlease make sure that the XML file is the latest\n\t version so that your data isn't out of date.\n"
print "Authored by Kevin Farrugia - Last updated January 2014\n"
print "------------------------------------------------------------\n"
outFileName = raw_input("What would you like the output file to be named (this defaults to the correct FoodCheckPeel.csv)? ")
if outFileName.strip() is "":
outFileName = "FoodCheckPeel.csv"
outputData = []
# File parser
# Recover set to True to allow it to try to work through broken XML
# remove_blank_text set to True so that it removes tailing whitespace
fileParse = etree.XMLParser(recover=True, remove_blank_text=True)
#The input XML file name - in our case, it is static and predictable so we don't allow users to change it
fileName = "FoodCheckPeel.XML"
#Parse the XML
root = etree.parse(fileName, fileParse)
#Names of elements that will be carried over (in this case, all of them)
headers = [ "FACILITY_NUMBER", "FACILITY_NAME", "FACILITY_TYPE", "STREET_NUMBER", "STREET_NAME", "STREET_DIR", "CITY", "X", "Y", "LAT", "LON", "INSPECTION_DATE", "STATUS", "INSPECTION_ID", "INSPECTION_TYPE", "INFRACTION_ID", "INFRACTION_TYPE" ]
#Here is where we grab information for each FCP location and parse it out
def getInfo(p):
rowData = []
for attribute in headers:
node = p.find(attribute)
if node == "LAT" or node == "LON": #Maybe I should change this to X & Y and 2 decimals (still sub-metre accuracy)
#This is to round off the lat and long so that the filesize isn't as large
# 5 decimal places comes out to 0.7871m of accuracy error E/W at 45 degrees N/S
rowData.append(round(float(node.text),5))
else:
rowData.append(node.text.encode("utf-8"))
else:
rowData.append("")
return rowData
print "\nReading the Food Check Peel XML..."
print "\n\t...please be patient while it reads and writes the files..."
location = root.findall("ROW")
for p in location:
locationStatus = getInfo(p)
if locationStatus:
outputData.append(locationStatus)
print "\n...finished parsing the XML, starting to write the file..."
outputFile = open(outFileName, "wb")
#This writes the CSV using Python's CSV plugin
# quoting = QUOTE_MINIMAL is used for a couple of reasons:
# (1) It quotes text only where there are special characters that would interfere with the correct use of the CSV
# (2) It keeps the file size to a minimum and allows the end user more control over how the field types are interpreted
# As an alternate, QUOTE_NONNUMERIC could be used to quote all fields that contain text. This, however, makes non-quoted fields of type float (for better or worse)
# See http://docs.python.org/2/library/csv.html for more options and info
writeCSV = csv.writer(outputFile, quoting=csv.QUOTE_MINIMAL)
writeCount = 0
for row in outputData:
writeCSV.writerow(row)
writeCount += 1
outputFile.close()
print "\n------------------------------------------------------------"
print "\nWrote " + str(writeCount) + " rows out to the " + str(outFileName) + " output file."
print "Great success! Double check the output, though!"
| mit | Python |
|
88bd6466940d21d52c0d5235ace10b6a97d69d46 | Create emailtoHIBP.py | SudhanshuC/Maltego-Transforms,cmlh/Maltego-Transforms | emailtoHIBP.py | emailtoHIBP.py | #!/usr/bin/python
#EmailtoHIBP.py
#Author: Sudhanshu Chauhan - @Sudhanshu_C
#This Script will retrieve the Domain(s) at which the specified account has been compromised
#It uses the API provided by https://haveibeenpwned.com/
#Special Thanks to Troy Hunt - http://www.troyhunt.com/
#For MaltegoTransform library and Installation guidelines go to http://www.paterva.com/web6/documentation/developer-local.php
from MaltegoTransform import *
import sys
import urllib2
mt = MaltegoTransform();
mt.parseArguments(sys.argv);
email=mt.getValue();
mt = MaltegoTransform()
hibp="https://haveibeenpwned.com/api/breachedaccount/"
getrequrl=hibp+email
response = urllib2.urlopen(getrequrl)
for rep in response:
mt.addEntity("maltego.Phrase","Pwned at " + rep)
mt.returnOutput()
| mit | Python |
|
450557e0bfb902de862e5fe42868d3fbf7165600 | Add lc0983_minimum_cost_for_tickets.py from Hotel Schulz Berlin | bowen0701/algorithms_data_structures | lc0983_minimum_cost_for_tickets.py | lc0983_minimum_cost_for_tickets.py | """Leetcode 983. Minimum Cost For Tickets
Medium
URL: https://leetcode.com/problems/minimum-cost-for-tickets/
In a country popular for train travel, you have planned some train travelling
one year in advance. The days of the year that you will travel is given as
an array days. Each day is an integer from 1 to 365.
Train tickets are sold in 3 different ways:
- a 1-day pass is sold for costs[0] dollars;
- a 7-day pass is sold for costs[1] dollars;
- a 30-day pass is sold for costs[2] dollars.
The passes allow that many days of consecutive travel.
For example, if we get a 7-day pass on day 2, then we can travel for 7 days:
day 2, 3, 4, 5, 6, 7, and 8.
Return the minimum number of dollars you need to travel every day in the given
list of days.
Example 1:
Input: days = [1,4,6,7,8,20], costs = [2,7,15]
Output: 11
Explanation:
For example, here is one way to buy passes that lets you travel your travel plan:
On day 1, you bought a 1-day pass for costs[0] = $2, which covered day 1.
On day 3, you bought a 7-day pass for costs[1] = $7, which covered days 3, 4, ..., 9.
On day 20, you bought a 1-day pass for costs[0] = $2, which covered day 20.
In total you spent $11 and covered all the days of your travel.
Example 2:
Input: days = [1,2,3,4,5,6,7,8,9,10,30,31], costs = [2,7,15]
Output: 17
Explanation:
For example, here is one way to buy passes that lets you travel your travel plan:
On day 1, you bought a 30-day pass for costs[2] = $15 which covered days 1, 2, ..., 30.
On day 31, you bought a 1-day pass for costs[0] = $2 which covered day 31.
In total you spent $17 and covered all the days of your travel.
Note:
- 1 <= days.length <= 365
- 1 <= days[i] <= 365
- days is in strictly increasing order.
- costs.length == 3
- 1 <= costs[i] <= 1000
"""
class Solution(object):
def mincostTickets(self, days, costs):
"""
:type days: List[int]
:type costs: List[int]
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.