commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
d10505678fd5624e5e88f72ac7852109f149b264 | Add new kcov package (#14574) | LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack | var/spack/repos/builtin/packages/kcov/package.py | var/spack/repos/builtin/packages/kcov/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Kcov(CMakePackage):
"""Code coverage tool for compiled programs, Python and Bash which uses
debugging information to collect and report data without special
compilation options"""
homepage = "http://simonkagstrom.github.io/kcov/index.html"
url = "https://github.com/SimonKagstrom/kcov/archive/38.tar.gz"
version('38', sha256='b37af60d81a9b1e3b140f9473bdcb7975af12040feb24cc666f9bb2bb0be68b4')
depends_on('cmake@2.8.4:', type='build')
depends_on('zlib')
depends_on('curl')
def cmake_args(self):
# Necessary at least on macOS, fixes linking error to LLDB
# https://github.com/Homebrew/homebrew-core/blob/master/Formula/kcov.rb
return ['-DSPECIFY_RPATH=ON']
@run_after('install')
@on_package_attributes(run_tests=True)
def test(self):
# The help message exits with an exit code of 1
kcov = Executable(self.prefix.bin.kcov)
kcov('-h', ignore_errors=1)
| lgpl-2.1 | Python |
|
a6e65ac7378b12cc6889199cac602a8fbee4b6e8 | add nagios check on autoplot metrics | akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem | nagios/check_autoplot.py | nagios/check_autoplot.py | """Check autoplot stats"""
from __future__ import print_function
import sys
import psycopg2
def main():
"""Go Main Go"""
pgconn = psycopg2.connect(database='mesosite', host='iemdb',
user='nobody')
cursor = pgconn.cursor()
cursor.execute("""
select count(*), avg(timing) from autoplot_timing
where valid > now() - '4 hours'::interval
""")
(count, speed) = cursor.fetchone()
speed = 0 if speed is None else speed
print(("Autoplot cnt:%s speed:%.2f | COUNT=%s;; SPEED=%.3f;;"
) % (count, speed, count, speed))
sys.exit(0)
if __name__ == '__main__':
main()
| mit | Python |
|
b39eeea0b25e1e5bcec1d762a041e5ecf465885c | add solution for Reorder List | zhyu/leetcode,zhyu/leetcode | src/reorderList.py | src/reorderList.py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @return nothing
def reorderList(self, head):
if head is None or head.next is None:
return
slow = fast = head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
fast, slow.next = slow.next, None
fast = self.reverseList(fast)
self.merge2Lists(head, fast)
def reverseList(self, head):
if head is None or head.next is None:
return head
pre, cur = head, head.next
while cur:
nxt, cur.next = cur.next, pre
cur, pre = nxt, cur
head.next = None
return pre
def merge2Lists(self, l1, l2):
while l2:
n1, n2 = l1.next, l2.next
l1.next, l2.next = l2, n1
l1, l2 = n1, n2
| mit | Python |
|
68e056459dd3818ebb0c5dbdc8b4f1089bec9f07 | Add a few behavior tests for selection | photoshell/photoshell,SamWhited/photoshell,campaul/photoshell | tests/selection_test.py | tests/selection_test.py | import os
import pytest
import yaml
from photoshell.selection import Selection
@pytest.fixture
def sidecar(tmpdir):
tmpdir.join("test.sidecar").write(yaml.dump({
'developed_path': os.path.join(tmpdir.strpath, "test.jpeg"),
'datetime': '2014-10-10 00:00'
}, default_flow_style=False))
return os.path.join(tmpdir.strpath, "test.sidecar")
@pytest.fixture
def empty_selection():
s = Selection('', '')
return s
@pytest.fixture
def selection(empty_selection):
empty_selection.images.append('image')
empty_selection.photos.append('image')
return empty_selection
def test_current_default_selection(selection):
assert selection.current()
def test_current_is_none_if_selection_empty(empty_selection):
assert empty_selection.current() is None
def test_current_photo_default_selection(selection):
assert selection.current_photo()
def test_current_photo_is_none_if_selection_empty(empty_selection):
assert empty_selection.current_photo() is None
def test_next_prev_does_nothing_single_photo(selection):
assert selection.current() == selection.next()
assert selection.current() == selection.prev()
def test_next_prev_wrap_around(selection):
selection.photos.append('photo2')
selection.images.append('image2')
assert selection.next() == 'image2'
assert selection.next() == 'image'
assert selection.prev() == 'image2'
assert selection.prev() == 'image'
| mit | Python |
|
63a34000402f4253f16221b11d620e65e1786447 | add solution for Reverse Bits | zhyu/leetcode,zhyu/leetcode | src/reverseBits.py | src/reverseBits.py | class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
return int(bin(n)[2:].zfill(32)[::-1], 2)
| mit | Python |
|
f81a612eabf5972d15a5b3f11d12897530cbf155 | Add dump-tree command (wip) | ustuehler/git-cvs,ustuehler/git-cvs | cvsgit/command/dump-tree.py | cvsgit/command/dump-tree.py | """Command to dump the full state of the source tree at a certain
point in time."""
import re
import subprocess
from subprocess import PIPE
import sys
from cvsgit.cvs import split_cvs_source
from cvsgit.i18n import _
from cvsgit.main import Command, Conduit
from cvsgit.utils import Tempdir, stripnl
class dump_tree(Command):
__doc__ = _(
"""Dump the source tree state at a certain date
Usage: %prog <date>
Computes and dumps the state of the source tree as it was at the
given <date>.
""")
def initialize_options(self):
pass
def finalize_options(self):
if len(self.args) > 0:
self.usage_error(_('too many arguments'))
def run(self):
conduit = Conduit()
cvs = conduit.cvs
for changeset in cvs.changesets():
print changeset
if __name__ == '__main__':
dump_tree()
| isc | Python |
|
ff2c4b68a5eace4451eeef4fd6ca84d37435c556 | Add fields to privatemessage for network invitations. | ProjectFacet/facet,ProjectFacet/facet,ProjectFacet/facet,ProjectFacet/facet,ProjectFacet/facet | project/editorial/migrations/0087_auto_20180226_1409.py | project/editorial/migrations/0087_auto_20180226_1409.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-26 22:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('editorial', '0086_auto_20180102_2145'),
]
operations = [
migrations.AddField(
model_name='privatemessage',
name='network_invitation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='editorial.Network'),
),
migrations.AddField(
model_name='privatemessage',
name='network_invitation_response',
field=models.NullBooleanField(),
),
]
| mit | Python |
|
136dd3f0b5dd9d8eecb6e7bc20c25d4d2c131ad6 | add new tool to list shared libraries deps | AlertMe/cerbero,jackjansen/cerbero,ford-prefect/cerbero,atsushieno/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,nirbheek/cerbero,shoreflyer/cerbero,OptoFidelity/cerbero,OptoFidelity/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,ylatuya/cerbero,brion/cerbero,justinjoy/cerbero,ford-prefect/cerbero,justinjoy/cerbero,brion/cerbero,nzjrs/cerbero,flexVDI/cerbero,multipath-rtp/cerbero,AlertMe/cerbero,EricssonResearch/cerbero,flexVDI/cerbero,superdump/cerbero,jackjansen/cerbero,davibe/cerbero,ikonst/cerbero,GStreamer/cerbero,flexVDI/cerbero,EricssonResearch/cerbero,AlertMe/cerbero,fluendo/cerbero,GStreamer/cerbero,fluendo/cerbero,fluendo/cerbero,nirbheek/cerbero-old,nirbheek/cerbero,superdump/cerbero,atsushieno/cerbero,AlertMe/cerbero,nirbheek/cerbero-old,ikonst/cerbero,brion/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,centricular/cerbero,fluendo/cerbero,sdroege/cerbero,superdump/cerbero,centricular/cerbero,EricssonResearch/cerbero,davibe/cerbero,ramaxlo/cerbero,nzjrs/cerbero,ramaxlo/cerbero,EricssonResearch/cerbero,davibe/cerbero,ylatuya/cerbero,OptoFidelity/cerbero,multipath-rtp/cerbero,brion/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,flexVDI/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,GStreamer/cerbero,lubosz/cerbero,ford-prefect/cerbero,atsushieno/cerbero,atsushieno/cerbero,flexVDI/cerbero,shoreflyer/cerbero,nzjrs/cerbero,centricular/cerbero,davibe/cerbero,shoreflyer/cerbero,jackjansen/cerbero,multipath-rtp/cerbero,lubosz/cerbero,OptoFidelity/cerbero,centricular/cerbero,atsushieno/cerbero,superdump/cerbero,ikonst/cerbero,sdroege/cerbero,ramaxlo/cerbero,sdroege/cerbero,nirbheek/cerbero-old,fluendo/cerbero,ramaxlo/cerbero,lubosz/cerbero,AlertMe/cerbero,centricular/cerbero,nirbheek/cerbero-old,nzjrs/cerbero,sdroege/cerbero,ylatuya/cerbero,GStreamer/cerbero,ikonst/cerbero,shoreflyer/cerbero,shoreflyer/cerbero,brion/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,ylatuya/cerbero,ford-prefect/cerbero,nzjrs/cerbero,multipath-rtp/cerbero,nirbheek/cerbero,justinjoy/cerbero,multipath-rtp/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,EricssonResearch/cerbero,lubosz/cerbero,nirbheek/cerbero,jackjansen/cerbero,sdroege/cerbero,justinjoy/cerbero,GStreamer/cerbero,ikonst/cerbero,ramaxlo/cerbero | cerbero/tools/depstracker.py | cerbero/tools/depstracker.py | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2013 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.config import Platform
from cerbero.utils import shell
class RecursiveLister():
def list_file_deps(self, prefix, path):
raise NotImplemented()
def find_deps(self, prefix, lib, state={}, ordered=[]):
if state.get(lib, 'clean') == 'processed':
return
if state.get(lib, 'clean') == 'in-progress':
return
state[lib] = 'in-progress'
lib_deps = self.list_file_deps(prefix, lib)
for libdep in lib_deps:
self.find_deps(prefix, libdep, state, ordered)
state[lib] = 'processed'
ordered.append(lib)
return ordered
def list_deps(self, prefix, path):
return self.find_deps(prefix, os.path.realpath(path), {}, [])
class ObjdumpLister(RecursiveLister):
def list_file_deps(self, prefix, path):
files = shell.check_call('objdump -x %s' % path).split('\n')
files = [x.split(' ')[2][:-1] for x in files if 'DLL ' in x]
files = [os.path.join(prefix, 'bin', x) for x in files if \
x.lower().endswith('dll')]
return [os.path.realpath(x) for x in files if os.path.exists(x)]
class OtoolLister(RecursiveLister):
def list_file_deps(self, prefix, path):
files = shell.check_call('otool -L %s' % path).split('\n')[1:]
return [x.split(' ')[0][1:] for x in files if prefix in x]
class LddLister():
def list_deps(self, prefix, path):
files = shell.check_call('ldd %s' % path).split('\n')
return [x.split(' ')[2] for x in files if prefix in x]
class DepsTracker():
BACKENDS = {
Platform.WINDOWS: ObjdumpLister,
Platform.LINUX: LddLister,
Platform.DARWIN: OtoolLister}
def __init__(self, platform, prefix):
self.libs_deps = {}
self.prefix = prefix
if self.prefix[:-1] != '/':
self.prefix += '/'
self.lister = self.BACKENDS[platform]()
def list_deps(self, path):
deps = self.lister.list_deps(self.prefix, path)
rdeps = []
for d in deps:
if os.path.islink(d):
rdeps.append(os.path.realpath(d))
return [x.replace(self.prefix, '') for x in deps + rdeps]
| lgpl-2.1 | Python |
|
e23b53a6326dbdb9df1e0f8d6711be1a9563c885 | Add tests for processes | jcpeterson/Dallinger,jcpeterson/Dallinger,suchow/Wallace,Dallinger/Dallinger,Dallinger/Dallinger,suchow/Wallace,jcpeterson/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger,berkeley-cocosci/Wallace,berkeley-cocosci/Wallace,berkeley-cocosci/Wallace,suchow/Wallace,Dallinger/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger | tests/test_processes.py | tests/test_processes.py | from wallace import processes, networks, agents, db
class TestProcesses(object):
def setup(self):
self.db = db.init_db(drop_all=True)
def teardown(self):
self.db.rollback()
self.db.close()
def test_random_walk_from_source(self):
net = networks.Network(self.db)
agent1 = net.add_agent()
agent2 = net.add_agent()
agent3 = net.add_agent()
agent1.connect_to(agent2)
agent2.connect_to(agent3)
self.db.add_all([agent1, agent2, agent3])
self.db.commit()
source = agents.RandomBinaryStringSource()
net.add_local_source(source, agent1)
process = processes.RandomWalkFromSource(net)
process.step()
agent1.receive_all()
msg = agent1.ome.contents
process.step()
agent2.receive_all()
process.step()
agent3.receive_all()
assert msg == agent3.ome.contents
def test_moran_process_cultural(self):
# Create a fully-connected network.
net = networks.Network(self.db)
agent1 = net.add_agent()
agent2 = net.add_agent()
agent3 = net.add_agent()
agent1.connect_to(agent2)
agent1.connect_to(agent3)
agent2.connect_to(agent1)
agent2.connect_to(agent3)
agent3.connect_to(agent1)
agent3.connect_to(agent2)
self.db.add_all([agent1, agent2, agent3])
self.db.commit()
# Add a global source and broadcast to all the agents.
source = agents.RandomBinaryStringSource()
net.add_global_source(source)
source.broadcast()
self.db.commit()
for agent in net.agents:
agent.receive_all()
# Run a Moran process for 100 steps.
process = processes.MoranProcessCultural(net)
for i in range(100):
process.step()
for agent in net.agents:
agent.receive_all()
# Ensure that the process had reached fixation.
assert agent1.ome.contents == agent2.ome.contents
assert agent2.ome.contents == agent3.ome.contents
assert agent3.ome.contents == agent1.ome.contents
def test_moran_process_sexual(self):
# Create a fully-connected network.
net = networks.Network(self.db)
agent1 = net.add_agent()
agent2 = net.add_agent()
agent3 = net.add_agent()
agent1.connect_to(agent2)
agent1.connect_to(agent3)
agent2.connect_to(agent1)
agent2.connect_to(agent3)
agent3.connect_to(agent1)
agent3.connect_to(agent2)
self.db.add_all([agent1, agent2, agent3])
self.db.commit()
# Add a global source and broadcast to all the agents.
source = agents.RandomBinaryStringSource()
net.add_global_source(source)
source.broadcast()
self.db.commit()
for agent in net.agents:
agent.receive_all()
all_contents = [agent1.ome.contents,
agent2.ome.contents,
agent3.ome.contents]
# Run a Moran process for 100 steps.
process = processes.MoranProcessSexual(net)
for i in range(100):
process.step()
for agent in net.agents:
agent.receive_all()
# Ensure that the process had reached fixation.
assert agent1.status == "dead"
assert agent2.status == "dead"
assert agent3.status == "dead"
for agent in net.agents:
assert agent.ome.contents in all_contents
| mit | Python |
|
73084b964f964c05cb948be3acaa6ba68d62dc30 | test plotting particles | mufid/berkilau,mufid/berkilau | ws/CSUIBotClass2014/test/test_plot_particles.py | ws/CSUIBotClass2014/test/test_plot_particles.py | #!/usr/bin/python
# @author: vektor dewanto
# @obj: demonstrate how to plot particles in an occupancy grid map, _although_, for now, all positions are valid
import matplotlib.pyplot as plt
import numpy as np
import math
import matplotlib.cm as cmx
from matplotlib import colors
# Construct the occupancy grid map
grid_map = {'size': (10,10), 'res': 1.0}
grid = [1,1,1,1,1,1,1,1,1,1,\
1,0,0,1,0,1,0,0,0,1,\
1,0,0,1,0,1,0,0,0,1,\
1,0,0,0,0,1,0,1,1,1,\
1,1,1,1,0,0,0,0,0,1,\
1,0,0,1,0,0,0,0,0,1,\
1,0,0,0,0,0,0,0,0,1,\
1,0,0,1,0,0,0,0,0,1,\
1,0,0,1,0,0,0,0,1,1,\
1,1,1,1,1,1,1,1,1,1]
assert len(grid)==grid_map['size'][0]*grid_map['size'][1], 'grid size is mismatched'
grid = np.asarray(grid)
grid = grid.reshape(grid_map['size'][0], grid_map['size'][1])
grid_map['grid'] = grid
# Plot the map
plt.subplot(1,1,1)
plt.pcolormesh(grid_map['grid'], edgecolors='k', linewidths=0.1, cmap=colors.ListedColormap(['w','b']))
plt.title('The occupancy grid map with particles')
# At t=0, initiate X with n_particle particles drawn from a uniform distribution (since this is a global loc. problem)
# For now, we donot check whether the particle is on an occupied grid
n_particle = 100;
X_tmp = np.random.uniform(0.0, 10.0, n_particle)
Y_tmp = np.random.uniform(0.0, 10.0, n_particle)
THETA_tmp = np.random.uniform(0.0, math.pi*2.0, n_particle)
XYTHETA_tmp = zip(X_tmp, Y_tmp, THETA_tmp)
W = [1.0/n_particle] * n_particle# uniform
X = zip(XYTHETA_tmp, W)
# Plot positions, the color corresponds to the weight
ax = plt.axes()
ax.scatter([e[0][0] for e in X], [e[0][1] for e in X], c=[e[1] for e in X], marker='o', s=20, cmap=cmx.jet)
# Plot bearings
for e in X:
x = e[0][0]
y = e[0][1]
theta = e[0][2]
# convert polar to cartesian coord
r = 0.1
dx = r * math.cos(theta)
dy = r * math.sin(theta)
ax.arrow(x, y, dx, dy, head_width=0.05, head_length=0.1, fc='k', ec='k')
plt.show()
| mit | Python |
|
6342c6cab9b5dd0b34ca5de575ef82592474e1d5 | add mvnsite.py to build site without javadocs or test run | steveloughran/clusterconfigs,steveloughran/clusterconfigs | bin/mvnsite.py | bin/mvnsite.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import call
import sys
args = sys.argv[1 :]
# mvn site:site -Dmaven.javadoc.skip=true -DskipTests
call(["mvn.bat", "site:site", "-Dmaven.javadoc.skip=true", "-DskipTests"] + args)
| apache-2.0 | Python |
|
598d937f3f180e22a1b4793644ffdb1b9a26f261 | update crawler_main.py | CSnowstar/zhihu_crawler,SmileXie/zhihu_bigdata,SmileXie/zhihu_crawler | crawler_main.py | crawler_main.py | """
crawler study code
Author: smilexie1113@gmail.com
"""
import urllib.request
import os
import re
from collections import deque
ERROR_RETURN = "ERROR: "
def retrun_is_error(return_str):
return return_str[0 : len(ERROR_RETURN)] == ERROR_RETURN
def python_cnt(str):
return str.count("python")
def get_one_page(url):
try:
urlfd = urllib.request.urlopen(url, timeout = 2)
except Exception as ex:
return ERROR_RETURN + ("URL " + "\"" + url + "\"" + " open failed. " + str(ex))
if "html" not in urlfd.getheader("Content-Type"):
return ERROR_RETURN + ("URL " + "\"" + url + "\"" + "is not html page.")
try:
html_str = urlfd.read().decode("utf-8")
except:
return ERROR_RETURN + ("Fail to decode URL " + "\"" + url + "\"" + ".")
return html_str
if __name__ == "__main__":
start_url = "http://news.dbanotes.net/"
to_be_visited = deque()
visited = set()
cnt = 0
py_str_cnt = 0
to_be_visited.append(start_url)
while to_be_visited:
url = to_be_visited.popleft()
print(str(cnt) + "page(s) has been grabbed." + "URL " + "\"" + url + "\"" + " is being grabbed.")
html_str = get_one_page(url)
if retrun_is_error(html_str):
print(html_str)
continue
cnt += 1
visited |= {url}
py_cnt_tmp = python_cnt(html_str)
if py_cnt_tmp != 0:
py_str_cnt += py_cnt_tmp
print("Find %d \"python\" , total count %d" % (py_cnt_tmp, py_str_cnt))
#todo: parse the html_str
link_pattern = re.compile('href=\"(.+?)\"') #regular expression
for tmp_url in link_pattern.findall(html_str):
if "http" in tmp_url and tmp_url not in visited:
to_be_visited.append(tmp_url)
| """
crawler study code
Author: smilexie1113@gmail.com
"""
import urllib.request
import os
import re
from collections import deque
from filecmp import cmp
ERROR_RETURN = "ERROR:"
def retrun_is_error(return_str):
return return_str[0 : len(ERROR_RETURN)] == ERROR_RETURN
def python_cnt(str):
return str.count("python")
def get_one_page(url):
try:
urlfd = urllib.request.urlopen(url, timeout = 2)
except Exception as ex:
return ERROR_RETURN + ("URL " + "\"" + url + "\"" + " open failed. " + str(ex))
if "html" not in urlfd.getheader("Content-Type"):
return ERROR_RETURN + ("URL " + "\"" + url + "\"" + "is not html page.")
try:
html_str = urlfd.read().decode("utf-8")
except:
return ERROR_RETURN + ("Fail to decode URL " + "\"" + url + "\"" + ".")
return html_str
if __name__ == "__main__":
start_url = "http://news.dbanotes.net/"
to_be_visited = deque()
visited = set()
cnt = 0
py_str_cnt = 0
to_be_visited.append(start_url)
while to_be_visited:
url = to_be_visited.popleft()
print(str(cnt) + "page(s) has been grabbed." + "URL " + "\"" + url + "\"" + " is being grabbed.")
html_str = get_one_page(url)
if retrun_is_error(html_str):
print(html_str)
continue
cnt += 1
visited |= {url}
py_cnt_tmp = python_cnt(html_str)
if py_cnt_tmp != 0:
py_str_cnt += py_cnt_tmp
print("Find %d \"python\" , total count %d" % (py_cnt_tmp, py_str_cnt))
#todo: parse the html_str
link_pattern = re.compile('href=\"(.+?)\"') #links' regular expression
for tmp_url in link_pattern.findall(html_str):
if "http" in tmp_url and tmp_url not in visited:
to_be_visited.append(tmp_url)
| mit | Python |
e904341eb7b426ea583e345689249d7f13451dc9 | Add biome types. | mcedit/pymclevel,arruda/pymclevel,ahh2131/mchisel,mcedit/pymclevel,arruda/pymclevel,ahh2131/mchisel | biome_types.py | biome_types.py | biome_types = {
-1: "Will be computed",
0: "Ocean",
1: "Plains",
2: "Desert",
3: "Extreme Hills",
4: "Forest",
5: "Taiga",
6: "Swampland",
7: "River",
8: "Hell",
9: "Sky",
10: "FrozenOcean",
11: "FrozenRiver",
12: "Ice Plains",
13: "Ice Mountains",
14: "MushroomIsland",
15: "MushroomIslandShore",
16: "Beach",
17: "DesertHills",
18: "ForestHills",
19: "TaigaHills",
20: "Extreme Hills Edge",
21: "Jungle",
22: "JungleHills",
}
| isc | Python |
|
0a0b322ca7d42d28ba495b7786cd2bd92c0bfd34 | Add test_register.py | nineties/py-videocore | tests/test_assembler/test_register.py | tests/test_assembler/test_register.py | 'Test of videocore.Register'
from nose.tools import raises
from videocore.assembler import Register, AssembleError, REGISTERS
def test_register_names():
for name in REGISTERS:
assert name == REGISTERS[name].name
assert name == str(REGISTERS[name])
@raises(AssembleError)
def test_pack_of_accumulator():
REGISTERS['r0'].pack('nop')
@raises(AssembleError)
def test_pack_of_regfileB():
REGISTERS['rb0'].pack('nop')
@raises(AssembleError)
def test_unpack_of_regfileB():
REGISTERS['rb0'].unpack('nop')
| mit | Python |
|
12266ffcb7fcb809ec0e0a3102077581e64eb9e0 | Update migrations | petertrotman/adventurelookup,petertrotman/adventurelookup,petertrotman/adventurelookup,petertrotman/adventurelookup | server/adventures/migrations/0002_auto_20160909_1901.py | server/adventures/migrations/0002_auto_20160909_1901.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-09 19:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('adventures', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Setting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
],
),
migrations.AddField(
model_name='adventure',
name='publisher',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='adventures.Publisher'),
preserve_default=False,
),
migrations.AlterField(
model_name='adventure',
name='edition',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='adventures.Edition'),
),
migrations.AddField(
model_name='adventure',
name='setting',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='adventures.Setting'),
preserve_default=False,
),
]
| mit | Python |
|
6dc1abdb92d1226071ea84e6352389ad01d21fe6 | Create sequencer_scripting.py | kitelightning/UnrealEnginePython,kitelightning/UnrealEnginePython,kitelightning/UnrealEnginePython,getnamo/UnrealEnginePython,20tab/UnrealEnginePython,20tab/UnrealEnginePython,getnamo/UnrealEnginePython,20tab/UnrealEnginePython,20tab/UnrealEnginePython,kitelightning/UnrealEnginePython,Orav/UnrealEnginePython,getnamo/UnrealEnginePython,Orav/UnrealEnginePython,getnamo/UnrealEnginePython,kitelightning/UnrealEnginePython,20tab/UnrealEnginePython,Orav/UnrealEnginePython,Orav/UnrealEnginePython,getnamo/UnrealEnginePython | examples/sequencer_scripting.py | examples/sequencer_scripting.py | import unreal_engine as ue
from unreal_engine.classes import MovieSceneAudioTrack, LevelSequenceFactoryNew, MovieSceneSkeletalAnimationTrack, Character, SkeletalMesh, MovieScene3DTransformTrack, CineCameraActor
import time
from unreal_engine.structs import FloatRange, FloatRangeBound
from unreal_engine import FTransform, FVector
# create a new level sequence asset
factory = LevelSequenceFactoryNew()
seq = factory.factory_create_new('/Game/MovieMaster' + str(int(time.time())))
# add an audio track (without sound section ;) to the sequence
audio = seq.sequencer_add_master_track(MovieSceneAudioTrack)
# get a reference to the editor world (to spawn actors)
world = ue.get_editor_world()
# spawn a new character and modify it (post_edit_change will allow the editor/sequencer to be notified of actor updates)
character = world.actor_spawn(Character)
# notify modifications are about to happen...
character.modify()
character.Mesh.SkeletalMesh = ue.load_object(SkeletalMesh, '/Game/InfinityBladeAdversaries/Enemy/Enemy_Bear/Enemy_Bear.Enemy_Bear')
# finalize the actor
character.post_edit_change()
# add to the sequencer as a possessable (shortcut method returning the guid as string)
guid = seq.sequencer_add_actor(character)
# add an animation track mapped to the just added actor
anim = seq.sequencer_add_track(MovieSceneSkeletalAnimationTrack, guid)
# create 3 animations sections (assign AnimSequence field to set the animation to play)
anim_sequence = anim.sequencer_track_add_section()
anim_sequence.StartTime = 1
anim_sequence.EndTime = 3
anim_sequence.RowIndex = 0
anim_sequence2 = anim.sequencer_track_add_section()
anim_sequence2.RowIndex = 1
anim_sequence2.StartTime = 2
anim_sequence2.EndTime = 5
anim_sequence3 = anim.sequencer_track_add_section()
anim_sequence3.RowIndex = 1
anim_sequence3.SlotName = 'Hello'
anim_sequence3.StartTIme = 0
anim_sequence3.EndTime = 30
# add a transform track/section in one shot to the actor
transform = seq.sequencer_add_track(MovieScene3DTransformTrack, guid).sequencer_track_add_section()
transform.StartTime = 0
transform.EndTime = 5
# add keyframes to the transform section
transform.sequencer_section_add_key(0, FTransform(FVector(0, 0, 17 * 100)))
transform.sequencer_section_add_key(1, FTransform(FVector(0, 0, 22 * 100)))
transform.sequencer_section_add_key(2, FTransform(FVector(0, 0, 26 * 100)))
transform.sequencer_section_add_key(2.5, FTransform(FVector(0, 0, 30 * 100)))
# set playback range
float_range = FloatRange(LowerBound=FloatRangeBound(Value=0), UpperBound=FloatRangeBound(Value=10))
seq.MovieScene.PlaybackRange = float_range
# add camera cut track (can be only one)
camera_cut_track = seq.sequencer_add_camera_cut_track()
# add two camera views
camera1 = camera_cut_track.sequencer_track_add_section()
camera2 = camera_cut_track.sequencer_track_add_section()
# spawn 2 cine cameras in the stage and posses them with the sequencer
cine_camera = world.actor_spawn(CineCameraActor)
camera_guid = seq.sequencer_add_actor(cine_camera)
cine_camera2 = world.actor_spawn(CineCameraActor)
camera2_guid = seq.sequencer_add_actor(cine_camera2)
# assign the two cameras to the camera cut sections (via guid)
camera1.CameraGuid = ue.string_to_guid(camera_guid)
camera2.CameraGuid = ue.string_to_guid(camera2_guid)
# set cameras time slots
camera1.StartTime = 0
camera1.EndTime = 3.5
camera2.StartTime = 3.5
camera2.EndTime = 5
# notify the sequence editor that something heavily changed (True will focus to the sequence editor)
seq.sequencer_changed(True)
| mit | Python |
|
df9b7cd8d1b34f8c29c372589ad9efd3a5435d0f | Implement TwitchWordsCounterBot class. | sergeymironov0001/twitch-chat-bot | twitchbot/twitch_words_counter_bot.py | twitchbot/twitch_words_counter_bot.py | import irc.bot
import irc.strings
from .words_counter import WordsCounter
class TwitchWordsCounterBot(irc.bot.SingleServerIRCBot):
def __init__(self, channel, nickname, password, server, port=6667):
irc.bot.SingleServerIRCBot.__init__(self, [(server, port, password)], nickname, nickname)
self.server = server
self.channel = channel
self.words_counter = WordsCounter()
def start(self):
print("Connecting to the server '%s'..." % self.server)
super(TwitchWordsCounterBot, self).start()
def on_welcome(self, c, e):
print("Connected to the server '%s'." % self.server)
print("Joining to the channel '%s'..." % self.channel)
c.join(self.channel)
def _on_join(self, c, e):
super(TwitchWordsCounterBot, self)._on_join(c, e)
print("Joined to the channel '%s'!" % self.channel)
def _on_disconnect(self, c, e):
super(TwitchWordsCounterBot, self)._on_disconnect(c, e)
print("Disconnected from the server '%s'." % self.server)
print(e)
def on_pubmsg(self, c, e):
message = e.arguments[0]
self.words_counter.count_words(message)
print(self.words_counter)
| mit | Python |
|
6136eef341f1ac5ce0be278c3ab78192192d0efa | check if OS is UNIX-y | marshki/pyWipe,marshki/pyWipe | posix.py | posix.py | #!/bin/py
from sys import platform
def osCheck():
# Check if OS is UNIX-y
if "darwin" or "linux" in platform.lower():
print platform
osCheck()
| mit | Python |
|
2a0724922bde4cdd5219c721cdfd5460a2e5f3ed | Create Timely_Tweeter.py | B13rg/Timely-Tweeter | Timely_Tweeter.py | Timely_Tweeter.py | #-=- Coding: Python UTF-8 -=-
import tweepy, time, sys
argfile = str(sys.argv[1])
#Twitter Account info
#Place Keys and Tokens bewteen the quotes
CONSUMER_KEY = '' #The Consumer Key (API Key)
CONSUMER_SECRET = '' #The Consumer Secret (API Secret)
ACCESS_KEY = '' #The Access Token
ACCESS_SECRET = '' #The Access Token Secret
SLEEPY_TIME = #Time to wait in seconds between tweets
#Now it checks in with Twitter and gets authenticated
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
filename=open(argfile, 'r') #Opens file
f=filename.readlines() #Pulls data from file
filename.close() #Closes file
for line in f:
api.update_status(line)
time.sleep(SLEEPY_TIME) #Time to wait
| mit | Python |
|
9c0750ef401870e0187e3b7f0e4e39cf3d7e3944 | Make sure the profile data is unmarshallable as profile data. | edisongustavo/asv,giltis/asv,airspeed-velocity/asv,ericdill/asv,giltis/asv,pv/asv,mdboom/asv,edisongustavo/asv,waylonflinn/asv,cpcloud/asv,mdboom/asv,cpcloud/asv,giltis/asv,waylonflinn/asv,spacetelescope/asv,pv/asv,qwhelan/asv,airspeed-velocity/asv,ericdill/asv,ericdill/asv,spacetelescope/asv,airspeed-velocity/asv,waylonflinn/asv,qwhelan/asv,pv/asv,mdboom/asv,airspeed-velocity/asv,spacetelescope/asv,pv/asv,qwhelan/asv,cpcloud/asv,mdboom/asv,edisongustavo/asv,ericdill/asv,spacetelescope/asv,qwhelan/asv | test/test_benchmarks.py | test/test_benchmarks.py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import pstats
import pytest
import six
from asv import benchmarks
from asv import config
from asv import environment
BENCHMARK_DIR = os.path.join(os.path.dirname(__file__), 'benchmark')
INVALID_BENCHMARK_DIR = os.path.join(
os.path.dirname(__file__), 'benchmark.invalid')
ASV_CONF_JSON = {
'benchmark_dir': BENCHMARK_DIR,
'repo': 'https://github.com/spacetelescope/asv.git',
'project': 'asv'
}
def test_find_benchmarks(tmpdir):
tmpdir = six.text_type(tmpdir)
os.chdir(tmpdir)
d = {}
d.update(ASV_CONF_JSON)
d['env_dir'] = os.path.join(tmpdir, "env")
conf = config.Config.from_json(d)
b = benchmarks.Benchmarks(conf, regex='secondary')
assert len(b) == 3
b = benchmarks.Benchmarks(conf, regex='example')
assert len(b) == 3
b = benchmarks.Benchmarks(conf, regex='time_example_benchmark_1')
assert len(b) == 1
b = benchmarks.Benchmarks(conf)
assert len(b) == 7
envs = list(environment.get_environments(
conf.env_dir, conf.pythons, conf.matrix))
b = benchmarks.Benchmarks(conf)
times = b.run_benchmarks(envs[0], profile=True)
assert len(times) == 7
assert times[
'time_examples.TimeSuite.time_example_benchmark_1']['result'] is not None
# Benchmarks that raise exceptions should have a time of "None"
assert times[
'time_secondary.TimeSecondary.time_exception']['result'] is None
assert times[
'subdir.time_subdir.time_foo']['result'] is not None
assert times[
'mem_examples.mem_list']['result'] > 2000
assert times[
'time_secondary.track_value']['result'] == 42.0
assert 'profile' in times[
'time_secondary.track_value']
profile_path = os.path.join(tmpdir, 'test.profile')
with open(profile_path, 'wb') as fd:
fd.write(times['time_secondary.track_value']['profile'])
pstats.Stats(profile_path)
def test_invalid_benchmark_tree(tmpdir):
tmpdir = six.text_type(tmpdir)
os.chdir(tmpdir)
d = {}
d.update(ASV_CONF_JSON)
d['benchmark_dir'] = INVALID_BENCHMARK_DIR
d['env_dir'] = os.path.join(tmpdir, "env")
conf = config.Config.from_json(d)
with pytest.raises(ValueError):
b = benchmarks.Benchmarks(conf)
| # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import pytest
import six
from asv import benchmarks
from asv import config
from asv import environment
BENCHMARK_DIR = os.path.join(os.path.dirname(__file__), 'benchmark')
INVALID_BENCHMARK_DIR = os.path.join(
os.path.dirname(__file__), 'benchmark.invalid')
ASV_CONF_JSON = {
'benchmark_dir': BENCHMARK_DIR,
'repo': 'https://github.com/spacetelescope/asv.git',
'project': 'asv'
}
def test_find_benchmarks(tmpdir):
tmpdir = six.text_type(tmpdir)
os.chdir(tmpdir)
d = {}
d.update(ASV_CONF_JSON)
d['env_dir'] = os.path.join(tmpdir, "env")
conf = config.Config.from_json(d)
b = benchmarks.Benchmarks(conf, regex='secondary')
assert len(b) == 3
b = benchmarks.Benchmarks(conf, regex='example')
assert len(b) == 3
b = benchmarks.Benchmarks(conf, regex='time_example_benchmark_1')
assert len(b) == 1
b = benchmarks.Benchmarks(conf)
assert len(b) == 7
envs = list(environment.get_environments(
conf.env_dir, conf.pythons, conf.matrix))
b = benchmarks.Benchmarks(conf)
times = b.run_benchmarks(envs[0], profile=True)
assert len(times) == 7
assert times[
'time_examples.TimeSuite.time_example_benchmark_1']['result'] is not None
# Benchmarks that raise exceptions should have a time of "None"
assert times[
'time_secondary.TimeSecondary.time_exception']['result'] is None
assert times[
'subdir.time_subdir.time_foo']['result'] is not None
assert times[
'mem_examples.mem_list']['result'] > 2000
assert times[
'time_secondary.track_value']['result'] == 42.0
assert 'profile' in times[
'time_secondary.track_value']
def test_invalid_benchmark_tree(tmpdir):
tmpdir = six.text_type(tmpdir)
os.chdir(tmpdir)
d = {}
d.update(ASV_CONF_JSON)
d['benchmark_dir'] = INVALID_BENCHMARK_DIR
d['env_dir'] = os.path.join(tmpdir, "env")
conf = config.Config.from_json(d)
with pytest.raises(ValueError):
b = benchmarks.Benchmarks(conf)
| bsd-3-clause | Python |
d1ecc996269a801c65d3b88791f7f5546c8af1b8 | add setup.py | odanado/daria | setup.py | setup.py | from setuptools import setup
setup(
name='daria',
version='0.0.1',
description='pytorch trainer',
author='odanado',
author_email='odan3240@gmail.com',
url='https://github.com/odanado/daria',
license='MIT License',
packages=['daria'],
tests_require=['mock'],
test_suite='tests',
)
| mit | Python |
|
db92ed5e523eafb7ccba553f1ee25365cc254798 | add setup.py | pachi/epbdcalc | setup.py | setup.py | #!/usr/bin/env python
#encoding: utf-8
#
# Programa epbdcalc: Cálculo de la eficiencia energética ISO/DIS 52000-1:2015
#
# Copyright (C) 2015 Rafael Villar Burke <pachi@ietcc.csic.es>
# Daniel Jiménez González <danielj@ietcc.csic.es>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""epbdcalc - Cálculo de la eficiencia energética según ISO/DIS 52000-1:2015
Based on the pypa setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import codecs
import os.path
import re
from setuptools import setup, find_packages
def find_version(*file_paths, **kwargs):
with codecs.open(os.path.join(os.path.dirname(__file__), *file_paths),
encoding=kwargs.get("encoding", "utf8")) as fp:
version_file = fp.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
here = os.path.abspath(os.path.dirname(__file__))
README = codecs.open(os.path.join(here, 'README.rst'), encoding='utf-8').read()
NEWS = codecs.open(os.path.join(here, 'NEWS.txt'), encoding='utf-8').read()
setup(
name="pyepbd",
author="Rafael Villar Burke, Daniel Jiménez González",
author_email="pachi@ietcc.csic.es",
version=find_version("pyepbd", "__init__.py"),
description="Cálculo de la eficiencia energética según ISO/DIS 52000-1:2015",
long_description=README + "\n\n" + NEWS,
url="https://github.com/pachi/epbdcalc",
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: Implementation :: CPython',
# Environment
'Environment :: Console',
'Operating System :: OS Independent'
],
keywords=[u"energía", u"edificación", u"CTE", u"energy", u"buildings"],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
include_package_data = True,
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['pandas >= 0.15', 'numpy >= 1.7'],
# dependencies for the setup script to run
setup_requires=['pytest-runner'],
# dependencies for the test command to run
tests_require=['pytest', 'pytest-cov'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'test': ['pytest', 'pytest-cov'],
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'epbdcalc=pyepbd.cli:main',
],},
)
| mit | Python |
|
38bf3ce6db844999fe5903dad91e991c6fea57c7 | Add setup | Axik/trafaret,rrader/trafaret,rrader/trafaret,Deepwalker/trafaret,Deepwalker/trafaret,Axik/trafaret,rrader/trafaret,Axik/trafaret,Deepwalker/trafaret | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setupconf = dict(
name = 'contract',
version = '0.3',
license = 'BSD',
url = 'https://github.com/Deepwalker/contract/',
author = 'Barbuza, Deepwalker',
author_email = 'krivushinme@gmail.com',
description = ('Validation and parsing library'),
long_description = "Place README here",
packages = find_packages(),
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
if __name__ == '__main__':
setup(**setupconf)
| bsd-2-clause | Python |
|
ea7d55fa309d592669e86dae826b7cc08323de16 | update setup.py version to 0.2 | thegooglecodearchive/mpmath,thegooglecodearchive/mpmath,upiterbarg/mpmath,tectronics/mpmath,Alwnikrotikz/mpmath,Alwnikrotikz/mpmath,upiterbarg/mpmath,tectronics/mpmath | setup.py | setup.py | from distutils.core import setup
setup(name='mpmath',
description = 'Python library for arbitrary-precision floating-point arithmetic',
version='0.2',
url='http://mpmath.googlecode.com',
author='Fredrik Johansson',
author_email='fredrik.johansson@gmail.com',
license = 'BSD',
packages=['mpmath'],
)
| from distutils.core import setup
setup(name='mpmath',
description = 'Python library for arbitrary-precision floating-point arithmetic',
version='0.1',
url='http://mpmath.googlecode.com',
author='Fredrik Johansson',
author_email='fredrik.johansson@gmail.com',
license = 'BSD',
packages=['mpmath'],
)
| bsd-3-clause | Python |
12ece36bf0355ad619635675b419d9d0e7163cf4 | Add setup.py file | lamby/django-cache-toolbox,playfire/django-cache-toolbox,lamby/django-sensible-caching,thread/django-sensible-caching | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-cache-relation',
description="Non-magical object caching for Django.",
version='0.1',
url='http://code.playfire.com/',
author='Playfire.com',
author_email='tech@playfire.com',
license='BSD',
packages=find_packages(),
)
| bsd-3-clause | Python |
|
30d3f42b4910b84b2a3419e43ea6e5e6da2ab7a0 | Add setup | shervinea/enzynet | setup.py | setup.py | from setuptools import setup
setup(name = 'enzynet',
description = 'EnzyNet: enzyme classification using 3D convolutional neural networks on spatial representation',
author = 'Afshine Amidi and Shervine Amidi',
author_email = '<author1-lastname>@mit.edu, <author2-firstname>@stanford.edu',
license = 'MIT',
packages = ['enzynet'])
| mit | Python |
|
45e624fe5176dd59b8f42636b777a1b6a6106dca | Add initial setuptools integration, required by click | georgeyk/loafer | setup.py | setup.py | # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
from setuptools import setup
setup(
name='loafer',
version='0.0.1',
entry_points='''
[console_scripts]
loafer=loafer.cli:cli
''',
)
| mit | Python |
|
81e7e9ed4b3b0f6840e11adc5c73648471f606ef | Add setup.py | orangain/scrapy-slotstats | setup.py | setup.py | # coding: utf-8
from __future__ import print_function, unicode_literals
import sys
from setuptools import setup
install_requires = []
if sys.version_info[0] == 2:
install_requires.append('statistics')
setup(
name='scrapy-slotstats',
version='0.1',
license='MIT License',
description='Scrapy extension to show statistics of downloader slots',
author='orangain',
author_email='orangain@gmail.com',
url='https://github.com/orangain/scrapy-slotstats',
keywords="scrapy downloader slot stats",
py_modules=['scrapy_slotstats'],
platforms=['Any'],
install_requires=install_requires,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Framework :: Scrapy',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
]
)
| mit | Python |
|
21380bcf76a8144d182166c3441d308af2eda417 | Add first pass at setup.py | JDReutt/BayesDB,poppingtonic/BayesDB,JDReutt/BayesDB,poppingtonic/BayesDB,JDReutt/BayesDB,poppingtonic/BayesDB,JDReutt/BayesDB,poppingtonic/BayesDB,poppingtonic/BayesDB,JDReutt/BayesDB | setup.py | setup.py | #!/usr/bin/python
import os
from distutils.core import setup, Extension
ext_modules = []
packages = ['bayesdb', 'bayesdb.tests']
setup(
name='BayesDB',
version='0.1',
author='MIT.PCP',
author_email = 'bayesdb@mit.edu',
url='probcomp.csail.mit.edu/bayesdb',
long_description='BayesDB',
packages=packages,
package_dir={'bayesdb':'bayesdb/'},
ext_modules=ext_modules,
)
| apache-2.0 | Python |
|
374e27087d6d432ba01a0ef65c4109be84e50dcf | Add setup.py | RJMetrics/RJMetrics-py | setup.py | setup.py | import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
from distutils.command.build_py import build_py
path, script = os.path.split(sys.argv[0])
os.chdir(os.path.abspath(path))
# Don't import rjmetrics module here, since deps may not be installed
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'rjmetrics'))
from version import VERSION
install_requires = ['requests >= 0.8.8']
# Get simplejson if we don't already have json
if sys.version_info < (3, 0):
try:
from util import json
except ImportError:
install_requires.append('simplejson')
setup(
name='rjmetrics',
cmdclass={'build_py': build_py},
version=VERSION,
description='Python client for RJMetrics APIs',
author='RJMetrics',
author_email='support@rjmetrics.com',
url='https://rjmetrics.com/',
packages=['rjmetrics', 'rjmetrics.test'],
package_data={'rjmetrics': ['../VERSION']},
install_requires=install_requires,
test_suite='rjmetrics.test.all',
use_2to3=True,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
])
| apache-2.0 | Python |
|
9c05031446d0d17bdc207b00ebf47d9769f96d33 | Add a setup.py for owebunit to be able to obtain ocookie via pip | p/ocookie | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='ocookie',
version='0.1',
description='Comprehensive cookie library',
author='Oleg Pudeyev',
author_email='oleg@bsdpower.com',
url='http://github.com/p/ocookie',
packages=['ocookie'],
)
| bsd-2-clause | Python |
|
431acaabf7a3e77b416a57998bfadcb2d3864555 | Add a setup.py | mozillazg/bustard-httpbin,krissman/httpbin,nkhuyu/httpbin,phouse512/httpbin,gvangool/httpbin,scottydelta/httpbin,vscarpenter/httpbin,habibmasuro/httpbin,luhkevin/httpbin,luhkevin/httpbin,SunGuo/httpbin,paranoiasystem/httpbin,marinehero/httpbin,kennethreitz/httpbin,luosam1123/httpbin,logonmy/httpbin,nkhuyu/httpbin,ewdurbin/httpbin,yangruiyou85/httpbin,vscarpenter/httpbin,Jaccorot/httpbin,krissman/httpbin,Stackato-Apps/httpbin,lioonline/httpbin,SunGuo/httpbin,logonmy/httpbin,admin-zhx/httpbin,postmanlabs/httpbin,mojaray2k/httpbin,mansilladev/httpbin,OndrejPontes/httpbin,luosam1123/httpbin,pestanko/httpbin,paranoiasystem/httpbin,fangdingjun/httpbin,mansilladev/httpbin,yemingm/httpbin,ashcoding/httpbin,Stackato-Apps/httpbin,yemingm/httpbin,hnq90/httpbin,admin-zhx/httpbin,ashcoding/httpbin,lioonline/httpbin,ewdurbin/httpbin,Jaccorot/httpbin,habibmasuro/httpbin,scottydelta/httpbin,Runscope/httpbin,mojaray2k/httpbin,Runscope/httpbin,OndrejPontes/httpbin,bradparks/httpbin__http_echo_service_for_testing_http_requests,pskrz/httpbin,pskrz/httpbin,postmanlabs/httpbin,pestanko/httpbin,shaunstanislaus/httpbin,tatsuhiro-t/httpbin,sigmavirus24/httpbin,shaunstanislaus/httpbin,fangdingjun/httpbin,gvangool/httpbin,phouse512/httpbin,kennethreitz/httpbin,yangruiyou85/httpbin,bradparks/httpbin__http_echo_service_for_testing_http_requests,sigmavirus24/httpbin,mozillazg/bustard-httpbin,marinehero/httpbin,hnq90/httpbin,Lukasa/httpbin,Lukasa/httpbin | setup.py | setup.py | from setuptools import setup, find_packages
import codecs
import os
import re
setup(
name="httpbin",
version="0.1.0",
description="HTTP Request and Response Service",
# The project URL.
url='https://github.com/kennethreitz/httpbin',
# Author details
author='Kenneth Reitz',
author_email='me@kennethreitz.com',
# Choose your license
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
packages=find_packages(),
install_requires=['Flask','MarkupSafe','decorator','itsdangerous','six'],
)
| isc | Python |
|
82b8651c9eed0c19224c8a7b53a0bedae81337a3 | Add a setup.py. | mikeboers/WebStar | setup.py | setup.py |
from setuptools import setup, find_packages
setup(
name = "WebStar",
version = "0.1b",
author="Mike Boers",
author_email="webstar@mikeboers.com",
license="BSD-3"
)
| bsd-3-clause | Python |
|
d157b4e1f4709b0205d5de31df65a5308f926d49 | Add setup.py | ushuz/autumn | setup.py | setup.py | #!/usr/bin/env python
# coding: utf-8
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = ""
with open("autumn.py", "r") as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
f.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError("No version information")
setup(name="autumn",
version=version,
description="A simple Pythonic MySQL ORM.",
author="ushuz",
url="https://github.com/ushuz/autumn",
py_modules=["autumn"],
license="MIT License",
)
| mit | Python |
|
a2bfe07ba67e902870dd366626b23dbb5e6e2696 | Create messageMode.py | hongfeioo/messagemodule | messageMode.py | messageMode.py |
#!/usr/bin/python
#coding=utf-8
#filename: messageMode.py
import telnetlib
import os,sys,commands,multiprocessing
import smtplib
import time
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
import urllib2
#---init---
begintime = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
muti_phone='13521161000'
muti_mail='yhf@XXXX.com'
pythonlog ='/home/sms_mail.log'
sender = 'hxx@163.com'
smtpserver = 'hxx.163.com'
username = 'hxx@163.com'
password = 'password'
#----------
def sendtxtmail(_subject,_mail_off,_msg,_fuc_mail,_begintime):
for mail_index in range(0, len(_fuc_mail.split(';'))):
if _mail_off == 1:
break
_receiver = _fuc_mail.split(';')[mail_index]
if _receiver.find('null') == -1:
try:
msg = MIMEText('<html>'+_msg+'</html>','html','utf-8')
msg['Subject'] = _subject
msg['to'] = _receiver
smtp = smtplib.SMTP()
smtp.connect(smtpserver)
smtp.login(username, password)
smtp.sendmail(sender,_receiver, msg.as_string())
smtp.quit()
os.system("echo "+_begintime+' '+_subject+' '+_receiver+" mail send successful >> "+pythonlog)
print "mail send successful"
except Exception,e:
print "mail send fail"
print e[1]
os.system("echo "+_begintime+' '+_subject+' '+_receiver+" mail send fail ,Code: "+str(e[0])+' '+e[1].split()[0]+'- -! >>'+pythonlog)
return 'mail func over'
def main(arg_msg):
sendtxtmail('test_subject',0,arg_msg,muti_mail,begintime)
return 'main func over'
if __name__ == "__main__":
print main(sys.argv[1])
| apache-2.0 | Python |
|
3d020f09332093807f70a1bca5360e1418633bb4 | Add setup.py. | znick/anytask,znick/anytask,znick/anytask,znick/anytask | setup.py | setup.py | from setuptools import setup, find_packages
setup(name='Anytask',
packages=find_packages(),
)
| mit | Python |
|
b38eb4f8a7b8e3400ea09c600e241d8c4a9d0846 | Add setup so sgfs can install this to test with | westernx/sgsession | setup.py | setup.py | from distutils.core import setup
setup(
name='sgsession',
version='0.1-dev',
description='Shotgun ORM/Session.',
url='http://github.com/westernx/sgsession',
packages=['sgsession'],
author='Mike Boers',
author_email='sgsession@mikeboers.com',
license='BSD-3',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries :: Python Modules',
],
) | bsd-3-clause | Python |
|
5263a684d4bd111b903456a8da2c92ddb25e7811 | Add migration | stefanw/seriesly,stefanw/seriesly | seriesly/series/migrations/0002_auto_20180127_0718.py | seriesly/series/migrations/0002_auto_20180127_0718.py | # Generated by Django 2.0 on 2018-01-27 13:18
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('series', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='show',
name='added',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='show',
name='country',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='show',
name='network',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='show',
name='timezone',
field=models.CharField(blank=True, max_length=255),
),
]
| agpl-3.0 | Python |
|
874fbb6749d60ea3fcf078d25d7911d7ac314ab1 | Add a setup.py file for use with python install tools. | iestynpryce/file-validator | setup.py | setup.py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'File validator',
'author': 'Iestyn Pryce',
'url': '',
'download_url': '',
'author_email': 'iestyn.pryce@gmail.com',
'version': '0.1',
'install_requires': ['nose'],
'packages': ['validator'],
'scripts': ['bin/validate_file.py'],
'name': 'validator'
}
setup(**config)
| mit | Python |
|
3258a5ba8c748ce079082c34d13b231f157b1463 | Add experimental top-level copy of setup.py | daviddrysdale/python-phonenumbers,dongguangming/python-phonenumbers,daviddrysdale/python-phonenumbers,agentr13/python-phonenumbers,shikigit/python-phonenumbers,SergiuMir/python-phonenumbers,titansgroup/python-phonenumbers,gencer/python-phonenumbers,daodaoliang/python-phonenumbers,daviddrysdale/python-phonenumbers,roubert/python-phonenumbers | setup.py | setup.py | #!/usr/bin/env python
# Original libphonenumber Java code:
# Copyright (C) 2009-2011 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import distutils.core
import sys
# Importing setuptools adds some features like "setup.py test", but
# it's optional so swallow the error if it's not there.
try:
import setuptools
except ImportError:
pass
major, minor = sys.version_info[:2]
python_25 = (major > 2 or (major == 2 and minor >= 5))
if not python_25:
raise RuntimeError("Python 2.5 or newer is required")
python_3x = (major >= 3)
if python_3x:
package_name = 'phonenumbers3k'
dev_status = 'Development Status :: 3 - Alpha'
else:
package_name = 'phonenumbers'
dev_status = 'Development Status :: 4 - Beta'
# Add ./python/ subdirectory to path
sys.path.append('python')
# Discover version of phonenumbers package
from phonenumbers import __version__
distutils.core.setup(name=package_name,
version=__version__,
description="Python version of Google's common library for parsing, formatting, storing and validating international phone numbers.",
author='David Drysdale',
author_email='dmd@lurklurk.org',
url='https://github.com/daviddrysdale/python-phonenumbers',
license='Apache License 2.0',
packages=['phonenumbers', 'phonenumbers.data', 'phonenumbers.geodata'],
package_dir={'': 'python'},
test_suite="tests",
platforms='Posix; MacOS X; Windows',
classifiers=[dev_status,
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Communications :: Telephony',
],
)
| apache-2.0 | Python |
|
58dd2d188aab1fbf30ff843307eecf5ca685527c | Add setup | shudmi/ngx-task | setup.py | setup.py | from setuptools import find_packages, setup
setup(
name='ngx-task',
version='0.1',
description='Testimonial for candidates to show up their code-foo',
author='Dmitry Shulyak',
author_email='dmitri.shulyak@gmail.com',
url='https://github.com/shudmi/ngx-task',
classifiers=[
'License :: Apache License 2.0',
'Programming Language :: Python',
'Programming Language :: Python 3',
'Programming Language :: Python 3.4',
],
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=[],
entry_points="""
[console_scripts]
ngx_generate=ngx_task.cli.generate_data
ngx_process=ngx_task.cli.process_data
"""
)
| apache-2.0 | Python |
|
90746eba08c67c4f62462ed74d08566cafa18724 | Add setup.py | graypawn/wrenet | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='wrenet',
version='0.1',
description='Network configurations viewer in the Windows Registry',
author='graypawn',
author_email='choi.pawn' '@gmail.com',
url='https://github.com/graypawn/wrenet',
license='Apache License (2.0)',
packages=find_packages(),
install_requires = {'python-registry >= 1.0.0'},
classifiers = ["Programming Language :: Python",
"Programming Language :: Python :: 3",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: Apache Software License"],
entry_points={
'console_scripts': [
'wrenet=wrenet.wrenet:main'
]
}
)
| apache-2.0 | Python |
|
50742b6e629e6f54a9f3784a3c1495eb9d82c238 | Add start of processed package | brightway-lca/brightway | brightway_projects/processing/processed_package.py | brightway_projects/processing/processed_package.py | from ..errors import InconsistentFields, NonUnique
def greedy_set_cover(data, exclude=None):
"""Find unique set of attributes that uniquely identifies each element in ``data``.
Feature selection is a well known problem, and is analogous to the `set cover problem <https://en.wikipedia.org/wiki/Set_cover_problem>`__, for which there is a `well known heuristic <https://en.wikipedia.org/wiki/Set_cover_problem#Greedy_algorithm>`__.
Args:
data (iterable): List of dictionaries with the same fields.
exclude (iterable): Fields to exclude during search for uniqueness. ``id`` is Always excluded.
Returns:
Set of attributes (strings)
Raises:
NonUnique: The given fields are not enough to ensure uniqueness
"""
if exclude is None:
exclude = {"id"}
else:
exclude = set(exclude)
exclude.add("id")
def as_unique_attributes(data, exclude=None, include=None):
"""Format ``data`` as unique set of attributes and values for use in ``create_processed_datapackage``.
Note: Each element in ``data`` must have the attributes ``id``.
data = [
{},
]
Args:
data (iterable): List of dictionaries with the same fields.
exclude (iterable): Fields to exclude during search for uniqueness. ``id`` is Always excluded.
include (iterable): Fields to include when returning, even if not unique
Returns:
(list of field names as strings, dictionary of data ids to values for given field names)
Raises:
InconsistentFields: Not all features provides all fields.
"""
include = set([]) if include is None else set(include)
fields = greedy_set_cover(data, exclude)
if len({set(obj.keys()) for obj in data}) > 1:
raise InconsistentFields
def formatter(obj, fields, include):
return {
key: value
for key, value in obj.items()
if (key in fields or key in include or key == "id")
}
return (fields, [formatter(obj, fields, include) for obj in data])
def create_processed_datapackage(
array,
rows,
cols,
filepath=None,
id_=None,
metadata=None,
replace=True,
compress=True,
in_memory=False,
):
"""Create a datapackage with numpy structured arrays and metadata.
Exchanging large, dense datasets like MRIO tables is not efficient if each exchange must be listed separately. Instead, we would prefer to exchange the processed arrays used to build the matrices directly. However, these arrays use integer indices which are not consistent across computers or even Brightway projects. This function includes additional metadata to solve this problem, mapping these integer ids to enough attributes to uniquely identify each feature. Separate metadata files are included for each column in the array (i.e. the row and column indices).
Args:
array (numpy structured array): The numeric data. Usually generated via ``create_numpy_structured_array``.
rows (dict): Dictionary mapping integer indices in ``row_value`` to a dictionary of attributes.
cols (dict): Dictionary mapping integer indices in ``col_value`` to a dictionary of attributes.
Returns:
Something :)
"""
pass
| bsd-3-clause | Python |
|
c68cda0549bb9c47be0580ecd43f55966e614b31 | Add Pascal's Triangle/nCr Table | PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank | mathematics/combinatorics/ncr_table/kevin.py | mathematics/combinatorics/ncr_table/kevin.py | #!/usr/bin/env python
# https://www.hackerrank.com/challenges/ncr-table
def get_number():
return int(input().strip())
def nCr(row_number):
rows = [[1], [1, 1], [1, 2, 1]]
while row_number >= len(rows):
# 1
# 1 1
# 1 2 1
# 1 4 4 1
# .......
row = [(rows[-1][index] + rows[-1][index + 1])
for index in range(len(rows) - 1)]
rows.append([1] + row + [1])
# Spew elements with * to show the proper output
print(*rows[row_number])
# Generate this row from the nCr table
inputs = []
number_of_items = get_number()
for i in range(number_of_items):
pascals_row = get_number()
inputs.append(pascals_row)
print()
[nCr(item) for item in inputs]
| mit | Python |
|
842869063ead9b2e6a1e22d11c9901072f2319aa | Add script to self generate docs for recurring data types | vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks | docs/generate_spec.py | docs/generate_spec.py | # -*- encoding: utf-8 -*-
#
# This script is to be used to automagically generate the recurring data types
# documentation based on the API specification.
#
# to run it just do:
#
# $ python generate_spec.py > outputfile.md
#
# :authors: Arturo Filastò
# :licence: see LICENSE
import inspect
from globaleaks.rest.messages import base
def create_spec(spec):
doc = ""
for k, v in spec.items():
doc += " %s: %s\n" % (k, v)
return doc
def create_class_doc(klass):
doc = "## %s\n" % klass.__name__
if klass.__doc__:
docstring = [line.strip() for line in klass.__doc__.split("\n")]
doc += '\n'.join(docstring)
doc += "\n"
doc += create_spec(klass.specification)
return doc
for name, klass in inspect.getmembers(base, inspect.isclass):
if issubclass(klass, base.GLTypes) and klass != base.GLTypes:
print create_class_doc(klass)
| agpl-3.0 | Python |
|
7d23ad49da0044d83f781105cb01addb1a4aa41c | Add catalog.wsgi file | caasted/aws-flask-catalog-app,caasted/aws-flask-catalog-app | catalog.wsgi | catalog.wsgi | #!/usr/bin/python
import sys
sys.path.insert(0,"/var/www/html/catalog/")
from catalog import app as application
application.secret_key = 'super_secret_key'
| mit | Python |
|
c16fae0519068e40d7b1ed988f49460198f6fd43 | Create decode_diameter.py | eriwoon/ShellScriptCollect,eriwoon/ShellScriptCollect,eriwoon/ShellScriptCollect,eriwoon/ShellScriptCollect,eriwoon/ShellScriptCollect,eriwoon/ShellScriptCollect,eriwoon/ShellScriptCollect,eriwoon/ShellScriptCollect | decode_diameter.py | decode_diameter.py | #-------------------------------------------------------------------------------
# Name: Decode Diameter
# Purpose:
#
# Author: XIAO Zhen
#
# Created: 08/10/2014
# Copyright: (c) XIAO Zhen 2014
# Licence: MIT License
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import os
import sys
def logerr(msg):
print "Error: " + msg
def loginfo(msg):
print "Info : " + msg
def output(msg):
print msg
def loadAvpDefineFile(filename):
d = dict()
try:
file = open(filename,'r')
except:
logerr("Cannot open file:" + filename)
return d
cur_avp = '-1'
detail = []
for line in file.readlines():
if(line[:4] == 'avp '):
if(cur_avp != '-1'):
d[cur_avp] = detail
detail = []
cur_avp = line.split()[1]
if(cur_avp in d):
cur_avp = '-1'
elif(line.find("VENDOR_ID") != -1 and cur_avp != '-1'):
cur_avp += ':' + line.split()[2][:-1]
elif(line.find('DATA_TYPE') != -1):
detail.append(line.split()[2][:-1])
elif(line.find('AVP_NAME') != -1):
detail.append(line.split()[2][1:-2])
file.close()
return d
def decode(avps,hex):
'''
0. Grouped
1. OctetString
2. OctetString
3. Int32
4. Int64
5. UInt32
6. UInt64
9. Address
10.Time
11.Diameter-Identify
12.DiameterURI
13.Enum
459:0
['13', 'User-Equipment-Info-Type']
'''
i = 0
if(hex[i:i + 2] != '01'):
logerr("This is not a diameter message!")
return
i += 2
offset = []
offset.append(eval('0x' + hex[i:i+6]) - 8)
def main():
#use the the directory where the script located as current work dir
os.chdir(os.path.dirname(sys.argv[0]))
#load the avp define file
file_name_avp_define = "Avpdefine.avp"
avps = loadAvpDefineFile(file_name_avp_define)
i = 0
for avp in avps:
print avp
print avps[avp]
i += 1
if(i == 10):
break
hex = '-'
decode(avps,hex)
if __name__ == '__main__':
main()
| mit | Python |
|
8968251b7e1b89171b285e377d17dae299019cd0 | Test that '--checks' accepts notebooks either before or after the check command (#887) | mwouts/jupytext,mwouts/jupytext,mwouts/jupytext,mwouts/jupytext,mwouts/jupytext,mwouts/jupytext,mwouts/jupytext,mwouts/jupytext,mwouts/jupytext,mwouts/jupytext | tests/test_cli_check.py | tests/test_cli_check.py | import pytest
from nbformat.v4.nbbase import new_code_cell, new_notebook
from jupytext import write
from jupytext.cli import jupytext
from .utils import requires_black
@pytest.fixture
def non_black_notebook(python_notebook):
return new_notebook(metadata=python_notebook.metadata, cells=[new_code_cell("1+1")])
@requires_black
def test_check_notebooks_left_or_right_black(python_notebook, tmpdir, cwd_tmpdir):
write(python_notebook, str(tmpdir / "nb1.ipynb"))
write(python_notebook, str(tmpdir / "nb2.ipynb"))
jupytext(["*.ipynb", "--check", "black --check {}"])
jupytext(["--check", "black --check {}", "*.ipynb"])
@requires_black
def test_check_notebooks_left_or_right_not_black(
non_black_notebook, tmpdir, cwd_tmpdir
):
write(non_black_notebook, str(tmpdir / "nb1.ipynb"))
write(non_black_notebook, str(tmpdir / "nb2.ipynb"))
with pytest.raises(SystemExit):
jupytext(["*.ipynb", "--check", "black --check {}"])
with pytest.raises(SystemExit):
jupytext(["--check", "black --check {}", "*.ipynb"])
| mit | Python |
|
4694f6bf2405d0aae5e6c3fc393f8a839e8aac07 | Add tests for converter.Line and converter.Generator. | rbarrois/uconf | tests/test_converter.py | tests/test_converter.py | # coding: utf-8
# Copyright (c) 2010-2012 Raphaël Barrois
import unittest
from confmgr import converter
class LineTestCase(unittest.TestCase):
def test_repr(self):
self.assertEqual("Line('foo', 'bar')",
repr(converter.Line('foo', 'bar')))
def test_equality(self):
self.assertEqual(
converter.Line('foo', 'bar'),
converter.Line('foo', 'bar'))
self.assertNotEqual(
converter.Line('foo', 'bar'),
converter.Line('foo', 'baz'))
self.assertNotEqual(
converter.Line('foo', 'bar'),
converter.Line('fo', 'bar'))
def test_compare_to_other(self):
self.assertNotEqual('foo', converter.Line('foo', 'bar'))
self.assertNotEqual(converter.Line('foo', 'bar'), 'foo')
def test_hash(self):
s = set()
for _i in range(5):
s.add(converter.Line('foo', 'bar'))
self.assertEqual(1, len(s))
self.assertEqual(set([converter.Line('foo', 'bar')]), s)
def test_fill_original_normal(self):
l = converter.Line('foo', None)
self.assertEqual(None, l.original)
l.fill_original()
self.assertEqual('foo', l.original)
def test_fill_original_comment(self):
l = converter.Line('#@foo', None)
self.assertEqual(None, l.original)
l.fill_original()
self.assertEqual('#@@foo', l.original)
l = converter.Line('"@foo', None)
self.assertEqual(None, l.original)
l.fill_original()
self.assertEqual('"@@foo', l.original)
l = converter.Line('!@foo', None)
self.assertEqual(None, l.original)
l.fill_original()
self.assertEqual('!@@foo', l.original)
class GeneratorTestCase(unittest.TestCase):
def test_no_special(self):
txt = [
'foo',
'bar',
'baz',
]
g = converter.Generator(txt, categories=[], fs=None)
expected = [converter.Line(s, s) for s in txt]
out = list(g)
self.assertItemsEqual(expected, out)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | Python |
|
a37640d107d1dd58ba4f9db3e043020ad76cd25d | Create cam_control.py | mic100/Raspberry_Pi | cam_control.py | cam_control.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from cv2 import *
import MySQLdb as ms
import time
import _mysql_exceptions as M
import os
def get_image():
cam1 = VideoCapture(0)
cam2 = VideoCapture(1)
s1, img1 = cam1.read()
s2, img2 = cam2.read()
if s1:
imwrite("test1.jpg",img)
if s2:
imwrite("test2.jpg",img)
def read_image():
fin1 = open("test1.jpg")
fin2 = open("test2.jpg")
img1 = fin1.read()
img2 = fin2.read()
return img1,img2
def query() :
try :
db = ms.connect(host="your_host_name",user="your_user_name",\
passwd="your_password",db="your_database_name")
except(M.OperationalError):
print '\n', "########ISSUE_%s_Mysqldatabase_########" % ("your_host_name")
print "########RPi_CANT_REACH_DATABASE########"
print "########CHECK_WIRES_FROM_RPI_TO_INTERNETPROVIDER'S_ROOTER(BOX)##"
os.system("sudo reboot")
data1 = read_image()[0]
data2 = read_image()[1]
try :
#set up of a cursor to be able to execute a query in database.
c = db.cursor()
date = time.strftime("%a, %d, %b %Y %H:%M:%S", time.gmtime())
c.execute("INSERT INTO images(date,cam1,cam2) VALUES (%s,%s,%s)", (date,data1,data2))
print "<--- Send image --->","--- / date / --- : ",date
except(NameError) :
#os.system("sudo reboot")
print "NameError: ", NameError
if __name__ == "__main__" :
while True :
get_image()
try :
query()
#print "Ok test.jpg image found"
except :
print "No test.jpg image found"
#cam get .jpg file and send an image \
#every 30 minutes=1800 seconds
#every 5minutes = 300 seconds
time.sleep(300)
| mit | Python |
|
d2a283856a9e2559a131c5aaa2407477be993af0 | add file to help gather all the data we need | hahnicity/ecs251-final-project,hahnicity/ecs251-final-project | collate.py | collate.py | import csv
from glob import glob
def collate_from_breath_meta(cohort):
"""
Gets all breath_meta.csv files in our specific cohort and then gets all
the data from these files and stores them in a dictionary.
"""
if cohort not in ["ardscohort", "controlcohort"]:
raise Exception("Input must either be ardscohort or controlcohort")
dirs = os.listdir(cohort)
cohort_files = []
for dir in dirs:
files = glob("{}/{}/0*_breath_meta.csv".format(cohort, dir))
for f in files:
cohort_files.append(f)
data = []
for f in cohort_files:
with open(f) as meta:
reader = csv.reader(meta)
for line in reader:
data.append(line)
return data
if __name__ == "__main__":
main()
| mit | Python |
|
d2667faded6dfdd1fb2992ec188b8fed12bb2723 | Add ncurses 5.9 | BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild | packages/ncurses.py | packages/ncurses.py | class NcursesPackage (GnuPackage):
def __init__ (self):
GnuPackage.__init__ (self, 'ncurses', '5.9')
self.sources.extend ([
'https://trac.macports.org/export/136235/trunk/dports/devel/ncurses/files/hex.diff',
'https://trac.macports.org/export/136235/trunk/dports/devel/ncurses/files/ungetch_guard.diff',
'https://trac.macports.org/export/136235/trunk/dports/devel/ncurses/files/configure.diff',
'https://trac.macports.org/export/136235/trunk/dports/devel/ncurses/files/constructor_types.diff',
'https://trac.macports.org/export/136235/trunk/dports/devel/ncurses/files/pkg_config_libdir.diff'
])
def prep (self):
Package.prep (self)
if Package.profile.name == 'darwin':
for p in range (1, len (self.sources)):
self.sh ('patch -p0 --ignore-whitespace < "%{sources[' + str (p) + ']}"')
def make (self):
self.local_make_flags.extend (['-DPKG_CONFIG_LIBDIR=%s' % self.PKG_CONFIG_PATH])
Package.make (self)
NcursesPackage ()
| mit | Python |
|
00bfd02f921a42d4f288254d1accb7546d8df2c5 | Add hbase consistency check throw hbase hbck command, easily can be added some checks like backups servers or region servers | keedio/nagios-hadoop,keedio/nagios-hadoop | check_hbase.py | check_hbase.py | #!/usr/bin/env python
# vim: ts=4:sw=4:et:sts=4:ai:tw=80
from utils import krb_wrapper,StringContext
import os
import argparse
import nagiosplugin
import re
import subprocess
html_auth = None
def parser():
version="0.1"
parser = argparse.ArgumentParser(description="Checks datanode")
parser.add_argument('-p', '--principal', action='store', dest='principal')
parser.add_argument('-s', '--secure',action='store_true')
parser.add_argument('-k', '--keytab',action='store')
parser.add_argument('--cache_file',action='store', default='/tmp/nagios.krb')
parser.add_argument('-v','--version', action='version', version='%(prog)s ' + version)
args = parser.parse_args()
if args.secure and (args.principal is None or args.keytab is None):
parser.error("if secure cluster, both of --principal and --keytab required")
return args
class Hbase(nagiosplugin.Resource):
def __init__(self):
p = subprocess.Popen(['hbase','hbck'],stdout=subprocess.PIPE,stderr=None)
output,err = p.communicate()
self.status=None
if err is None:
for line in output.splitlines():
m = re.match('^\s*Status\s*:\s*(?P<STATUS>\w+)\s*',line)
if m:
self.status=m.group('STATUS')
else:
return 2,"Critical: "+err
def probe(self):
yield nagiosplugin.Metric('status',self.status,context="status")
@nagiosplugin.guarded
def main():
args = parser()
if args.secure:
auth_token = krb_wrapper(args.principal,args.keytab,args.cache_file)
os.environ['KRB5CCNAME'] = args.cache_file
check = nagiosplugin.Check(Hbase(),
StringContext('status',
'OK'))
check.main()
if auth_token: auth_token.destroy()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
f865bf2d7365ccecec07be7e51e8d81676f3aae2 | Add check_cycles tests module | danforthcenter/plantcv,danforthcenter/plantcv,danforthcenter/plantcv | tests/plantcv/morphology/test_check_cycles.py | tests/plantcv/morphology/test_check_cycles.py | import cv2
from plantcv.plantcv import outputs
from plantcv.plantcv.morphology import check_cycles
def test_check_cycles(morphology_test_data):
# Clear previous outputs
outputs.clear()
mask = cv2.imread(morphology_test_data.ps_mask, -1)
_ = check_cycles(mask)
assert outputs.observations['default']['num_cycles']['value'] == 16
| mit | Python |
|
1ab296398aaa796a9a5b620c4281d9376ada8b3e | Add short script which prints the entire CMIP6 MIP experiment list #197. | goord/ece2cmor3,goord/ece2cmor3 | ece2cmor3/scripts/mip-experiment-list.py | ece2cmor3/scripts/mip-experiment-list.py | #!/usr/bin/env python
# Thomas Reerink
#
# Run example:
# python mip-experiment-list.py
#
# Looping over all MIPs and within each MIP over all its MIP experiments.
# Printing the MIP experiment list with some additional info.
#
from dreqPy import dreq
dq = dreq.loadDreq()
mip_list_file= open( 'mip-experiment-list.txt', 'w' )
# Loop over the MIPs:
for mip in dq.coll['mip'].items:
# Loop over the MIP experiments:
for u in dq.inx.iref_by_sect[mip.uid].a['experiment']:
ex = dq.inx.uid[u]
mip_list_file.write( '{:20} {:20} {:30} {:3} {}'.format(mip.label, ex.mip, ex.label, ex.tier[0], ex.title) + '\n')
#print '{:20} {:20} {:30} {:3} {}'.format(mip.label, ex.mip, ex.label, ex.tier[0], ex.title)
mip_list_file.close()
| apache-2.0 | Python |
|
97eabd4e33086c66372b0e15dd1eeda12e99f427 | Create createfile.py | thewhitetulip/SamplePythonScripts | createfile.py | createfile.py | import os
#creates file on the go on the entries of a tuple
ports=[20,21,23,25,43,49,53,69,70,79,80,109,110,115,137,139,143,161,194,389,443,444,458,546,547,1080]
path=raw_input('Enter the path you want to create the files: ')
try:
os.chdir(path)
except:
print "Invalid Path"
try:
for i in ports:
for i in ports:
file = open('./'+str(i),'w')
file.close()
except:
print "Could not create files, please check if you have the appropriate read/write permissions
| mit | Python |
|
6d50dc3c266f4a1b7f517935b961cfb20602011b | add benchmark.py | bowlofstew/capstone,bughoho/capstone,pyq881120/capstone,bSr43/capstone,zuloloxi/capstone,bowlofstew/capstone,dynm/capstone,krytarowski/capstone,sigma-random/capstone,bowlofstew/capstone,xia0pin9/capstone,07151129/capstone,bughoho/capstone,zuloloxi/capstone,capturePointer/capstone,AmesianX/capstone,bSr43/capstone,AmesianX/capstone,sephiroth99/capstone,zuloloxi/capstone,zneak/capstone,pombredanne/capstone,techvoltage/capstone,fvrmatteo/capstone,nplanel/capstone,xia0pin9/capstone,NeilBryant/capstone,nplanel/capstone,bSr43/capstone,dynm/capstone,07151129/capstone,fvrmatteo/capstone,sephiroth99/capstone,sigma-random/capstone,bughoho/capstone,8l/capstone,pombredanne/capstone,angelabier1/capstone,fvrmatteo/capstone,pranith/capstone,bigendiansmalls/capstone,AmesianX/capstone,pranith/capstone,zneak/capstone,bughoho/capstone,AmesianX/capstone,pyq881120/capstone,bSr43/capstone,sephiroth99/capstone,bowlofstew/capstone,pyq881120/capstone,bowlofstew/capstone,pyq881120/capstone,pranith/capstone,code4bones/capstone,capturePointer/capstone,capturePointer/capstone,bughoho/capstone,sigma-random/capstone,sephiroth99/capstone,krytarowski/capstone,krytarowski/capstone,dynm/capstone,techvoltage/capstone,zuloloxi/capstone,krytarowski/capstone,bowlofstew/capstone,sigma-random/capstone,nplanel/capstone,fvrmatteo/capstone,krytarowski/capstone,dynm/capstone,bigendiansmalls/capstone,bSr43/capstone,code4bones/capstone,NeilBryant/capstone,NeilBryant/capstone,xia0pin9/capstone,pyq881120/capstone,pranith/capstone,krytarowski/capstone,sephiroth99/capstone,07151129/capstone,bSr43/capstone,nplanel/capstone,sephiroth99/capstone,techvoltage/capstone,angelabier1/capstone,capturePointer/capstone,zneak/capstone,capturePointer/capstone,bughoho/capstone,xia0pin9/capstone,AmesianX/capstone,fvrmatteo/capstone,bigendiansmalls/capstone,xia0pin9/capstone,bSr43/capstone,fvrmatteo/capstone,bigendiansmalls/capstone,sigma-random/capstone,8l/capstone,pyq881120/capstone,bigendiansmalls/capstone,8l/capstone,AmesianX/capstone,zneak/capstone,pombredanne/capstone,pranith/capstone,bigendiansmalls/capstone,zneak/capstone,zuloloxi/capstone,pranith/capstone,capturePointer/capstone,pyq881120/capstone,code4bones/capstone,sephiroth99/capstone,NeilBryant/capstone,capturePointer/capstone,zuloloxi/capstone,8l/capstone,krytarowski/capstone,AmesianX/capstone,techvoltage/capstone,angelabier1/capstone,angelabier1/capstone,nplanel/capstone,dynm/capstone,xia0pin9/capstone,8l/capstone,pombredanne/capstone,07151129/capstone,bigendiansmalls/capstone,pombredanne/capstone,code4bones/capstone,zneak/capstone,bughoho/capstone,techvoltage/capstone,angelabier1/capstone,8l/capstone,code4bones/capstone,zneak/capstone,07151129/capstone,dynm/capstone,dynm/capstone,sigma-random/capstone,NeilBryant/capstone,nplanel/capstone,techvoltage/capstone,techvoltage/capstone,code4bones/capstone,NeilBryant/capstone,pranith/capstone,code4bones/capstone,sigma-random/capstone,zuloloxi/capstone,bowlofstew/capstone,nplanel/capstone,xia0pin9/capstone,angelabier1/capstone,8l/capstone,NeilBryant/capstone,07151129/capstone,angelabier1/capstone,07151129/capstone,fvrmatteo/capstone,pombredanne/capstone,pombredanne/capstone | suite/benchmark.py | suite/benchmark.py | #!/usr/bin/python
# Simple benchmark for Capstone by disassembling random code. By Nguyen Anh Quynh, 2014
from capstone import *
from time import time
from random import randint
def random_str(size):
lst = [str(randint(0, 255)) for _ in xrange(size)]
return "".join(lst)
def cs(md, data):
insns = md.disasm(data, 0)
# uncomment below line to speed up this function 200 times!
# return
for i in insns:
if i.address == 0x100000:
print i
md = Cs(CS_ARCH_X86, CS_MODE_32)
md.detail = False
# warm up few times
for i in xrange(3):
data = random_str(128)
cs(md, data)
# start real benchmark
c_t = 0
for i in xrange(10000):
code = random_str(128)
t1 = time()
cs(md, code)
c_t += time() - t1
print "Capstone:", c_t, "seconds"
| bsd-3-clause | Python |
|
78aea51f508a14bb1b03b49933576c84b56a7459 | Add an example for the new dropdowns | Rapptz/discord.py,Harmon758/discord.py,Harmon758/discord.py,rapptz/discord.py | examples/views/dropdown.py | examples/views/dropdown.py | import typing
import discord
from discord.ext import commands
# Defines a custom Select containing colour options
# that the user can choose. The callback function
# of this class is called when the user changes their choice
class Dropdown(discord.ui.Select):
def __init__(self):
# Set the options that will be presented inside the dropdown
options = [
discord.SelectOption(label='Red', description='Your favourite colour is red', emoji='🟥'),
discord.SelectOption(label='Green', description='Your favourite colour is green', emoji='🟩'),
discord.SelectOption(label='Blue', description='Your favourite colour is blue', emoji='🟦')
]
# The placeholder is what will be shown when no option is chosen
# The min and max values indicate we can only pick one of the three options
# The options parameter defines the dropdown options. We defined this above
super().__init__(placeholder='Choose your favourite colour...', min_values=1, max_values=1, options=options)
async def callback(self, interaction: discord.Interaction):
# Use the interaction object to send a response message containing
# the user's favourite colour or choice. The self object refers to the
# Select object, and the values attribute gets a list of the user's
# selected options. We only want the first one.
await interaction.response.send_message(f'Your favourite colour is {self.values[0]}')
class DropdownView(discord.ui.View):
def __init__(self):
super().__init__()
# Adds the dropdown to our view object.
self.add_item(Dropdown())
class Bot(commands.Bot):
def __init__(self):
super().__init__(command_prefix=commands.when_mentioned_or('$'))
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
bot = Bot()
@bot.command()
async def colour(ctx):
"""Sends a message with our dropdown containing colours"""
# Create the view containing our dropdown
view = DropdownView()
# Sending a message containing our view
await ctx.send('Pick your favourite colour:', view=view)
bot.run('token')
| mit | Python |
|
bcb6c0780aacf77069a08f8d5b44d295881d9b9d | Create solution to swap odd even characters | laxmena/CodeKata,laxmena/CodeKata | swapOddEvenChar.py | swapOddEvenChar.py | #Python3
word = list(input().strip())
for i in range(0,len(word),2):
if(i+1>=len(word)):
break
word[i],word[i+1] = word[i+1],word[i]
print(''.join(word)) | mit | Python |
|
7bde47d48f4e80b4449049a8b05767b30eb2c516 | Add stupid CSV export example | SoundGoof/NIPAP,plajjan/NIPAP,SpriteLink/NIPAP,bbaja42/NIPAP,ettrig/NIPAP,bbaja42/NIPAP,fredsod/NIPAP,SoundGoof/NIPAP,fredsod/NIPAP,ettrig/NIPAP,SpriteLink/NIPAP,ettrig/NIPAP,SoundGoof/NIPAP,bbaja42/NIPAP,fredsod/NIPAP,fredsod/NIPAP,fredsod/NIPAP,garberg/NIPAP,bbaja42/NIPAP,garberg/NIPAP,SpriteLink/NIPAP,plajjan/NIPAP,garberg/NIPAP,SpriteLink/NIPAP,SoundGoof/NIPAP,plajjan/NIPAP,bbaja42/NIPAP,SoundGoof/NIPAP,garberg/NIPAP,plajjan/NIPAP,SpriteLink/NIPAP,garberg/NIPAP,ettrig/NIPAP,ettrig/NIPAP,fredsod/NIPAP,bbaja42/NIPAP,ettrig/NIPAP,SpriteLink/NIPAP,plajjan/NIPAP,garberg/NIPAP,SoundGoof/NIPAP,plajjan/NIPAP | utilities/export-csv.py | utilities/export-csv.py | #!/usr/bin/python
import os
import csv
import sys
sys.path.append('../pynipap')
import pynipap
class Export:
def __init__(self, xmlrpc_uri):
self.xmlrpc_uri = xmlrpc_uri
def write(self, output_file, schema_name):
"""
"""
f = open(output_file, "w+")
writer = csv.writer(f, quoting=csv.QUOTE_MINIMAL)
pynipap.xmlrpc_uri = xmlrpc_uri
ao = pynipap.AuthOptions({ 'authoritative_source': 'nipap' })
import socket,xmlrpclib
try:
schema = pynipap.Schema.list({ 'name': schema_name })[0]
except socket.error:
print >> sys.stderr, "Connection refused, please check hostname & port"
sys.exit(1)
except xmlrpclib.ProtocolError:
print >> sys.stderr, "Authentication failed, please check your username / password"
sys.exit(1)
except IndexError:
print >> sys.stderr, "Non existing schema (", schema_name, ")"
sys.exit(1)
res = pynipap.Prefix.smart_search(schema, ' ', { 'include_all_parents': True })
for p in res['result']:
writer.writerow([p.display_prefix, p.type, p.node, p.order_id, p.description])
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser()
parser.add_option('--username', default='', help="Username")
parser.add_option('--password', default='', help="Password")
parser.add_option('--host', help="NIPAP backend host")
parser.add_option('--port', default=1337, help="NIPAP backend port")
parser.add_option('--schema', help="Schema name")
parser.add_option('--file', help="Output file")
(options, args) = parser.parse_args()
if options.host is None:
print >> sys.stderr, "Please specify the NIPAP backend host to work with"
sys.exit(1)
if options.schema is None:
print >> sys.stderr, "Please specify a schema to export"
sys.exit(1)
if options.file is None:
print >> sys.stderr, "Please specify an output file"
sys.exit(1)
auth_uri = ''
if options.username:
auth_uri = "%s:%s@" % (options.username, options.password)
xmlrpc_uri = "http://%(auth_uri)s%(host)s:%(port)s" % {
'auth_uri' : auth_uri,
'host' : options.host,
'port' : options.port
}
wr = Export(xmlrpc_uri)
wr.write(options.file, options.schema)
| mit | Python |
|
9b6eddb88f5de1b7c44d42e1d4a3dc1c90180862 | Implement deck. | cwahbong/onirim-py | onirim/deck.py | onirim/deck.py | import random
class Deck:
def __init__(self, cards):
self._undrawn = list(cards)
self._discarded = []
self._limbo = []
def draw(self, n=1):
"""Draw n cards."""
if n > len(self._undrawn) or n < 0:
raise ValueError()
drawn, self._undrawn = self._undrawn[:n], self._undrawn[n:]
return drawn
def put_discard(self, card):
"""Put a card to discard pile."""
self._discarded.append(card)
def put_limbo(self, card):
"""Put a card to Limbo pile."""
self._limbo.append(card)
def shuffle(self):
"""Shuffle the undrawn pile."""
random.shuffle(self._undrawn)
def shuffle_with_limbo(self):
"""Shuffle limbo pile back to undrawn pile."""
self._undrawn += self._limbo
self._limbo = []
random.shuffle(self._undrawn)
| mit | Python |
|
f5711401b79433f5b52e675cec67b63f6511836a | add tests file | aloverso/loanbot | tests.py | tests.py | #!flask/bin/python
import unittest
from server import app
def add(a, b):
return a+b
class TestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
self.app = app.test_client()
def tearDown(self):
pass
def test_add(self):
self.assertEqual(add(1, 2), 3)
self.assertEqual(add(3, 4), 7)
if __name__ == '__main__':
unittest.main() | mit | Python |
|
e9d87a087a0f0102157d7c718a048c72f655c54a | Store registered refs as plugin metadata | marshmallow-code/apispec,Nobatek/apispec,marshmallow-code/smore,gorgias/apispec,jmcarp/smore | smore/ext/marshmallow.py | smore/ext/marshmallow.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from marshmallow.compat import iteritems
from marshmallow import class_registry
from smore import swagger
from smore.apispec.core import Path
from smore.apispec.utils import load_operations_from_docstring
def schema_definition_helper(spec, name, schema, **kwargs):
"""Definition helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` to provide Swagger
metadata.
:param type schema: A marshmallow Schema class.
"""
# Store registered refs, keyed by Schema class
plug = spec.plugins['smore.ext.marshmallow']
if 'refs' not in plug:
plug['refs'] = {}
plug['refs'][schema] = name
return swagger.schema2jsonschema(schema)
def schema_path_helper(view, **kwargs):
doc_operations = load_operations_from_docstring(view.__doc__)
if not doc_operations:
return
operations = doc_operations.copy()
for method, config in iteritems(doc_operations):
if 'schema' in config:
schema_cls = class_registry.get_class(config['schema'])
if not operations[method].get('responses'):
operations[method]['responses'] = {}
operations[method]['responses']['200'] = swagger.schema2jsonschema(schema_cls)
return Path(operations=operations)
def setup(spec):
spec.register_definition_helper(schema_definition_helper)
spec.register_path_helper(schema_path_helper)
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
from marshmallow.compat import iteritems
from marshmallow import class_registry
from smore import swagger
from smore.apispec.core import Path
from smore.apispec.utils import load_operations_from_docstring
def schema_definition_helper(name, schema, **kwargs):
"""Definition helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` to provide Swagger
metadata.
:param type schema: A marshmallow Schema class.
"""
return swagger.schema2jsonschema(schema)
def schema_path_helper(view, **kwargs):
doc_operations = load_operations_from_docstring(view.__doc__)
if not doc_operations:
return
operations = doc_operations.copy()
for method, config in iteritems(doc_operations):
if 'schema' in config:
schema_cls = class_registry.get_class(config['schema'])
if not operations[method].get('responses'):
operations[method]['responses'] = {}
operations[method]['responses']['200'] = swagger.schema2jsonschema(schema_cls)
return Path(operations=operations)
def setup(spec):
spec.register_definition_helper(schema_definition_helper)
spec.register_path_helper(schema_path_helper)
| mit | Python |
e4734cb85458475ad4fd2cf66db456b7924d6fe0 | Add : LFI Exploit tool | m101/m101-tools | exploit-lfi.py | exploit-lfi.py | #!/usr/bin/python
import argparse
import base64
import re
import requests
import sys
def scrap_results (content):
# regexp
regexp_start = re.compile ('.*STARTSTART.*')
regexp_end = re.compile ('.*ENDEND.*')
# results
results = list()
# result start and end
found_start = False
found_end = False
# getting lines
lines = content.split ('\n')
for line in lines:
if found_start and found_end:
break
if found_start == False and len (regexp_start.findall (line)) != 0:
# print 'found STARTSTART'
line = re.sub ('.*STARTSTART', '', line)
found_start = True
if found_start == True and found_end == False and len (regexp_end.findall (line)) != 0:
# print 'found ENDEND'
line = re.sub ('ENDEND.*', '', line)
found_end = True
if found_start == True and len (line) != 0:
results.append (line)
return results
# extract all potential base64 strings
# decode correct one and store potentials
def scrap_b64str (content):
# search for base64 strings, shorter than 16 chars is refused
regexp_b64 = re.compile ('[A-Za-z0-9+/=]{16,}=+')
words = regexp_b64.findall (content)
# validate each base64
# if validated it is added to our list
results = list()
for word in words:
found = True
decoded = ''
try:
decoded = base64.b64decode (word)
except Exception:
found = False
if found == False and len (re.findall ('=+$', word)) != 0:
decoded = word
found = True
if found == True and len (decoded) != 0:
results.append (decoded)
return results
parser = argparse.ArgumentParser(description='Exploit LFI')
parser.add_argument('--url', '-u', nargs=1, type=str, help='URL to attack', required=True)
parser.add_argument('--arg', '-a', nargs=1, type=str, help='Technique argument', required=True)
parser.add_argument('--technique', '-t', nargs=1, default='env', help='input, env or read')
args = parser.parse_args ()
payload = '<?php echo "STARTSTART"; passthru ("{0}"); echo "ENDEND"; ?>'.format (args.arg[0])
if args.technique[0] == 'input':
form = {
'' : payload
}
filename = 'php://input'
url = args.url[0].replace ('PAYLOAD', filename)
req = requests.get (url, data=form)
# print result
results = scrap_results (req.text)
for result in results:
print result
elif args.technique[0] == 'read':
php_filter = 'php://filter/convert.base64-encode/resource=' + args.arg[0]
url = args.url[0].replace ('PAYLOAD', php_filter)
req = requests.get (url)
# print result
results = scrap_b64str (req.text)
for result in results:
print result
else:
headers = {
'User-Agent' : payload
}
filename = '/proc/self/environ'
url = args.url[0].replace ('PAYLOAD', filename)
print url
req = requests.get (url, headers=headers)
# print result
results = scrap_results (req.text)
for result in results:
print result
| agpl-3.0 | Python |
|
3654817845e1d22a5b0e648a79d0bf6db12c2704 | add run_sql shell command | jgraham/treeherder,tojonmz/treeherder,gbrmachado/treeherder,glenn124f/treeherder,jgraham/treeherder,sylvestre/treeherder,tojon/treeherder,glenn124f/treeherder,gbrmachado/treeherder,tojonmz/treeherder,KWierso/treeherder,glenn124f/treeherder,kapy2010/treeherder,jgraham/treeherder,adusca/treeherder,akhileshpillai/treeherder,akhileshpillai/treeherder,wlach/treeherder,wlach/treeherder,akhileshpillai/treeherder,edmorley/treeherder,deathping1994/treeherder,vaishalitekale/treeherder,parkouss/treeherder,moijes12/treeherder,tojon/treeherder,vaishalitekale/treeherder,jgraham/treeherder,kapy2010/treeherder,rail/treeherder,avih/treeherder,tojon/treeherder,akhileshpillai/treeherder,akhileshpillai/treeherder,adusca/treeherder,wlach/treeherder,edmorley/treeherder,glenn124f/treeherder,KWierso/treeherder,adusca/treeherder,wlach/treeherder,avih/treeherder,avih/treeherder,jgraham/treeherder,deathping1994/treeherder,vaishalitekale/treeherder,moijes12/treeherder,kapy2010/treeherder,gbrmachado/treeherder,parkouss/treeherder,wlach/treeherder,akhileshpillai/treeherder,rail/treeherder,moijes12/treeherder,deathping1994/treeherder,rail/treeherder,sylvestre/treeherder,tojonmz/treeherder,moijes12/treeherder,parkouss/treeherder,deathping1994/treeherder,rail/treeherder,edmorley/treeherder,moijes12/treeherder,moijes12/treeherder,sylvestre/treeherder,tojonmz/treeherder,jgraham/treeherder,deathping1994/treeherder,vaishalitekale/treeherder,adusca/treeherder,vaishalitekale/treeherder,KWierso/treeherder,parkouss/treeherder,glenn124f/treeherder,gbrmachado/treeherder,parkouss/treeherder,adusca/treeherder,gbrmachado/treeherder,tojonmz/treeherder,gbrmachado/treeherder,deathping1994/treeherder,avih/treeherder,tojon/treeherder,adusca/treeherder,tojonmz/treeherder,sylvestre/treeherder,sylvestre/treeherder,wlach/treeherder,kapy2010/treeherder,glenn124f/treeherder,KWierso/treeherder,parkouss/treeherder,vaishalitekale/treeherder,avih/treeherder,rail/treeherder,avih/treeherder,kapy2010/treeherder,edmorley/treeherder,rail/treeherder,sylvestre/treeherder | treeherder/model/management/commands/run_sql.py | treeherder/model/management/commands/run_sql.py | import MySQLdb
from optparse import make_option
from django.core.management.base import BaseCommand
from treeherder.model.models import Datasource
from django.conf import settings
class Command(BaseCommand):
help = ("Runs an arbitrary sql statement or file"
" on a number of databases.")
option_list = BaseCommand.option_list + (
make_option(
'--datasources',
action='store',
dest='datasources',
default='all',
help='A comma separated list of datasources to execute the sql code on'),
make_option(
'--data-type',
action='store',
dest='data_type',
default='jobs',
choices=['jobs', 'objectstore'],
help='The target data-type of the sql code'),
make_option(
'-f', '--file',
dest='sql_file',
help='Sql source file',
metavar='FILE',
default="")
)
def handle(self, *args, **options):
if not options["sql_file"]:
self.stderr.write("No sql file provided!")
return
datasources = Datasource.objects.filter(contenttype=options['data_type'])
if options['datasources'] != 'all':
if ',' in options['datasources']:
datasources = datasources.filter(
project__in=options['datasources'].split(','))
else:
datasources = datasources.filter(
project=options['datasources'])
with open(options["sql_file"]) as sql_file:
sql_code = sql_file.read()
self.stdout.write("{0} datasource found".format(
len(datasources)
))
for datasource in datasources:
self.stdout.write("--------------------------")
db = MySQLdb.connect(
host=datasource.host,
db=datasource.name,
user=settings.TREEHERDER_DATABASE_USER,
passwd=settings.TREEHERDER_DATABASE_PASSWORD)
try:
cursor = db.cursor()
cursor.execute(sql_code)
self.stdout.write("Sql code executed on {0}".format(datasource))
except Exception as e:
error_string = "!!! Sql code execution failed on {0} !!!"
self.stderr.write(error_string.format(datasource))
self.stderr.write("{0}".format(e))
finally:
if cursor:
cursor.close()
| mpl-2.0 | Python |
|
31924096f82954e87b33fcb4af2e7ea46a5c6336 | add map estimate of gaussian case | catniplab/vLGP | vlgp/gmap.py | vlgp/gmap.py | import click
# import jax
import numpy as onp
import jax.numpy as np
from jax.numpy import linalg
from .evaluation import timer
from .gp import sekernel
from .preprocess import get_config, get_params, initialize, fill_params, fill_trials
from .util import cut_trials
def make_prior(trials, n_factors, dt, var, scale):
for trial in trials:
n, ydim = trial['y'].shape
t = np.arange(n) * dt
K = sekernel(t, var, scale)
trial['bigK'] = np.kron(np.eye(n_factors), K)
def em(y, C, d, R, K, max_iter):
zdim, ydim = C.shape
n = K.shape[0]
m = y.shape[0]
bigK = np.kron(np.eye(zdim), K)
bigR = np.kron(np.eye(n), R)
Y = y.reshape(-1, ydim)
for i in range(max_iter):
# E step
with timer() as e_elapsed:
bigC = np.kron(C.T, np.eye(n))
A = bigK @ bigC.T
B = bigC @ A + bigR
residual = y - d[None, :]
residual = residual.transpose((0, 2, 1)).reshape(m, -1, 1)
z = A[None, ...] @ linalg.solve(B[None, ...], residual)
z = z.reshape(m, zdim, -1).transpose((0, 2, 1))
z -= z.mean(axis=(0, 1), keepdims=True)
# M step
with timer() as m_elapsed:
Z = z.reshape(-1, zdim)
C, d, r = leastsq(Y, Z) # Y = Z C + d
R = np.diag(r ** 2)
C /= linalg.norm(C)
click.echo("Iteration {:4d}, E-step {:.2f}s, M-step {:.2f}s".format(i + 1, e_elapsed(), m_elapsed()))
return z, C, d, R
def infer(trials, C, d, R):
for trial in trials:
n, ydim = trial['y'].shape
_, zdim = trial['mu'].shape
y = trial['y'] - d[None, :]
y = y.T.reshape(-1, 1)
bigC = np.kron(C.T, np.eye(n))
bigK = trial['bigK']
bigR = np.kron(np.eye(n), R)
A = bigK @ bigC.T
z = A @ linalg.solve(bigC @ A + bigR, y)
trial['mu'] = z.reshape((zdim, -1)).T
def leastsq(Y, Z, constant=True):
if constant:
Z = np.column_stack([Z, np.ones(Z.shape[0])])
C, r, *_ = onp.linalg.lstsq(Z, Y, rcond=None)
# C = linalg.solve(Z.T @ Z, Z.T @ Y)
return C[:-1, :], C[[-1], :], r
def loglik(y, z, C, d, R, var, scale, dt):
zdim, ydim = C.shape
m, n, _ = y.shape
t = np.arange(n) * dt
K = sekernel(t, var, scale)
bigK = np.kron(np.eye(zdim), K)
r = y - z @ C - d[None, :]
r = r @ (1 / np.sqrt(R))
Z = z.transpose((0, 2, 1)).reshape(m, -1, 1)
return np.sum(r ** 2) + np.sum(Z.transpose((0, 2, 1)) @ linalg.solve(bigK[None, ...], Z)) + m * linalg.slogdet(bigK)[1]
def fit(trials, n_factors, **kwargs):
"""
:param trials: list of trials
:param n_factors: number of latent factors
:param kwargs
:return:
"""
config = get_config(**kwargs)
kwargs["omega_bound"] = config["omega_bound"]
params = get_params(trials, n_factors, **kwargs)
# initialization
click.echo("Initializing")
with timer() as elapsed:
initialize(trials, params, config)
click.secho("Initialized {:.2f}s".format(elapsed()), fg="green")
# fill arrays
fill_params(params)
params['R'] = kwargs['R']
dt = kwargs['dt']
var = kwargs['var']
scale = kwargs['scale']
fill_trials(trials)
make_prior(trials, n_factors=n_factors, dt=dt, var=var, scale=scale)
segments = cut_trials(trials, params, config)
y = np.stack([segment['y'] for segment in segments])
# fill_trials(segments)
# make_prior(segments, n_factors=n_factors, dt=kwargs['dt'], var=kwargs['var'], scale=kwargs['scale'])
# EM
click.echo("Fitting")
C, d, R = params['a'], params['b'], params['R']
n = config["window"]
t = np.arange(n) * dt
K = sekernel(t, var, scale)
z, C, d, R = em(y, C, d, R, K, config['max_iter'])
params['a'], params['b'], params['R'] = C, d, R
# Inference
# click.echo("Inferring")
# infer(trials, C, d, R)
# click.secho("Done", fg="green")
return y, z, C, d, R
| mit | Python |
|
5b20a487afa90c0d91a43d4d29526d352511316f | add utils.py with utilities | enricobacis/cineca-scopus | utils.py | utils.py | from csv import DictReader
import re
def read_csv(filename):
with open(filename) as csvfile:
return list(DictReader(csvfile, dialect='excel'))
def split_name(string):
surname, name = re.search(r'^([A-Z\'\.\s]+)\s(.+)$', string).groups()
return name, surname
def iterate_names(name, surname):
yield name, surname
while ' ' in name:
name = name.rsplit(' ', 1)[0]
yield name, surname
while ' ' in surname:
surname = surname.rsplit(' ', 1)[0]
yield name, surname
| mit | Python |
|
89d8e6a8a422bade352d3bf94f2c59c1d0dc601b | Create dictionary.py | joshavenue/python_notebook | dictionary.py | dictionary.py | x = {'job': 'teacher', 'color': 'blue'} // Create a dictionary, list with defination
print(x['job']) // You will see 'teacher'
y = {'emotion': 'happy', 'reason': {'action': 'playing game', 'platform': 'PC'}}
print(y['reason']['action']) // You will see 'playing game'
| unlicense | Python |
|
b08341d2822ad266e07d4104a45604ad9d5b504a | add unit test for text_analyzer | misssoft/Fan.Python | src/text_analyzer.py | src/text_analyzer.py | import os
import unittest
def analyze_text(filename):
lines = 0
chars = 0
with open(filename, 'r') as f:
for line in f:
lines += 1
chars += len(line)
return (lines, chars)
class TextAnalysisTests(unittest.TestCase):
"""Test for the ``analyze_test()`` function"""
def setUp(self):
self.filename = 'funfile.txt'
with open(self.filename, 'w') as f:
f.write('Spring is here. \n'
'As the birds sing. \n'
'And the flowers and bees. \n'
'In such a joy.')
def tearDown(self):
try:
os.remove(self.filename)
except:
pass
def test_function_runs(self):
analyze_text(self.filename)
def test_line_count(self):
self.assertEqual(analyze_text(self.filename)[0], 4)
def test_charactor_count(self):
self.assertEqual(analyze_text(self.filename)[1], 78)
def test_no_such_file(self):
with self.assertRaises(IOError):
analyze_text("foo")
def test_no_deletion(self):
analyze_text(self.filename)
self.assertTrue(os.path.exists(self.filename))
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
e1aa02badee2951f4f4aeeb09f37be030466e711 | Add pyupgrades.py | streeter/dotfiles,streeter/dotfiles | bin/pyupgrades.py | bin/pyupgrades.py | #!/usr/bin/env python
import xmlrpclib
import pip
import argparse
import re
from pkg_resources import parse_version
def version_number_compare(version1, version2):
return cmp(parse_version(version1), parse_version(version2))
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version1), normalize(version2))
package_format = '{dist.project_name} {dist.version}'
display_format = '{package:40} {message}'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-a', '--all', dest='all', action='store_true', default=False)
parser.add_argument('-m', '--mirror', dest='mirror', default='http://pypi.python.org/pypi')
args = parser.parse_args()
if not args:
exit(-1)
pypi = xmlrpclib.ServerProxy(args.mirror)
for dist in pip.get_installed_distributions():
package_str = package_format.format(dist=dist)
available = pypi.package_releases(dist.project_name)
if not available:
# Try the capitalized package name
available = pypi.package_releases(dist.project_name.capitalize())
upgrade_available = True
if not available:
print display_format.format(package=package_str, message='no releases at pypi')
continue
comparison = version_number_compare(available[0], dist.version)
if comparison == 0:
if not args.all:
continue
print display_format.format(package=package_str, message='up to date')
elif comparison < 0:
print display_format.format(package=package_str, message='older version on pypi')
else:
print display_format.format(package=package_str, message='%s available' % available[0])
| mit | Python |
|
7b09a44c7df8b2aa28e45c5382626c2f8c4bf61b | Add a script to convert from rst style files to markdown | kenhys/redpen-doc,kenhys/redpen-doc | bin/run_redpen.py | bin/run_redpen.py | #!/usr/bin/python
import os
import re
import shutil
from optparse import OptionParser
def main():
parser = OptionParser(usage="usage: %prog [options]",
version="%prog 1.0")
parser.add_option("-i", "--inputdir",
action="store",
dest="indir",
default="source",
help="specify the input directory containing rst files.")
parser.add_option("-o", "--outdir",
action="store",
dest="outdir",
default="build/mdfiles",
help="specify the output directory of markdownized files.")
(options, args) = parser.parse_args()
indir = options.indir
outdir = options.outdir
if os.path.exists(outdir) == True:
shutil.rmtree(outdir)
os.makedirs(outdir)
for root, dirs, files in os.walk(indir):
for file in files:
mdfile_pat = re.compile(".*\.rst")
if not mdfile_pat.search(file):
continue
fileroot, ext = os.path.splitext(file)
cmdline = "pandoc -r markdown -w rst %s -o %s" % (os.path.join(root, file),
outdir + "/" + fileroot + ".md")
os.system(cmdline)
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
30c368f1794f7bbc4121f732143ac07e7148a3ca | Create KevinAndExpectation.py | tejasnikumbh/Algorithms,tejasnikumbh/Algorithms,tejasnikumbh/Algorithms | Probability/KevinAndExpectation.py | Probability/KevinAndExpectation.py | # Importing standard libraries
import sys
from math import sqrt
# Parsing functions
def parseInt(stream):
return int(stream.readline().rstrip())
'''
Dynamically precomputing the summation series for N < 10^6 so that each test case
is solved in constnat time for any N less than 10^6. There fore for Task 1, this
solution takes O(1) time
'''
# Computing the summation series
def getL(N):
L = [0]*(N + 1)
L[1] = 1.0
for i in range(2, N + 1):
L[i] = L[i - 1] + sqrt(i * 4.0 - 3.0)
return L
'''
For N greater than 10^6 we take an approximation of the series since we have not
precomputed it already. This approximation was obtained from Wolfram alpha
'''
def getAns(N):
return (4.0/3.0) * (N ** 1.5)
# Main function for the program
if __name__ == "__main__":
stream = sys.stdin
T = parseInt(stream)
L = getL(1000000)
for i in range(T):
N = parseInt(stream)
if(N < 1000000):
summationN = L[N]
ans = 0.5 - 1.0/N + (0.5/N) * (summationN)
print ans
else:
summationN = getAns(N)
ans = 0.5 - 1.0/N + (0.5/N) * (summationN)
print ans
| bsd-2-clause | Python |
|
53b0d93a7a29121e9d24058bfe4b7ee3bd33f7ca | Add info for version 2.16 (#3601) | skosukhin/spack,matthiasdiener/spack,lgarren/spack,iulian787/spack,TheTimmy/spack,EmreAtes/spack,lgarren/spack,krafczyk/spack,skosukhin/spack,EmreAtes/spack,mfherbst/spack,TheTimmy/spack,matthiasdiener/spack,EmreAtes/spack,skosukhin/spack,LLNL/spack,iulian787/spack,krafczyk/spack,lgarren/spack,LLNL/spack,iulian787/spack,tmerrick1/spack,EmreAtes/spack,lgarren/spack,mfherbst/spack,tmerrick1/spack,tmerrick1/spack,TheTimmy/spack,iulian787/spack,matthiasdiener/spack,skosukhin/spack,lgarren/spack,mfherbst/spack,krafczyk/spack,tmerrick1/spack,EmreAtes/spack,mfherbst/spack,krafczyk/spack,matthiasdiener/spack,skosukhin/spack,tmerrick1/spack,iulian787/spack,LLNL/spack,TheTimmy/spack,matthiasdiener/spack,LLNL/spack,mfherbst/spack,LLNL/spack,TheTimmy/spack,krafczyk/spack | var/spack/repos/builtin/packages/ack/package.py | var/spack/repos/builtin/packages/ack/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Ack(Package):
"""ack 2.14 is a tool like grep, optimized for programmers.
Designed for programmers with large heterogeneous trees of
source code, ack is written purely in portable Perl 5 and takes
advantage of the power of Perl's regular expressions."""
homepage = "http://beyondgrep.com/"
url = "http://beyondgrep.com/ack-2.14-single-file"
version('2.16', '7085b5a5c76fda43ff049410870c8535', expand=False)
version('2.14', 'e74150a1609d28a70b450ef9cc2ed56b', expand=False)
depends_on('perl')
def install(self, spec, prefix):
mkdirp(prefix.bin)
ack = 'ack-{0}-single-file'.format(self.version)
# rewrite the script's #! line to call the perl dependency
shbang = '#!' + join_path(spec['perl'].prefix.bin, 'perl')
filter_file(r'^#!/usr/bin/env perl', shbang, ack)
install(ack, join_path(prefix.bin, "ack"))
set_executable(join_path(prefix.bin, "ack"))
| ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Ack(Package):
"""ack 2.14 is a tool like grep, optimized for programmers.
Designed for programmers with large heterogeneous trees of
source code, ack is written purely in portable Perl 5 and takes
advantage of the power of Perl's regular expressions."""
homepage = "http://beyondgrep.com/"
url = "http://beyondgrep.com/ack-2.14-single-file"
version('2.14', 'e74150a1609d28a70b450ef9cc2ed56b', expand=False)
depends_on('perl')
def install(self, spec, prefix):
mkdirp(prefix.bin)
ack = 'ack-{0}-single-file'.format(self.version)
# rewrite the script's #! line to call the perl dependency
shbang = '#!' + join_path(spec['perl'].prefix.bin, 'perl')
filter_file(r'^#!/usr/bin/env perl', shbang, ack)
install(ack, join_path(prefix.bin, "ack"))
set_executable(join_path(prefix.bin, "ack"))
| lgpl-2.1 | Python |
d0d182605389ec73773df35b9e06455b9f9a2923 | add get_posts | Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python | facebook/get_posts.py | facebook/get_posts.py | """
A simple example script to get all posts on a user's timeline.
Originally created by Mitchell Stewart.
<https://gist.github.com/mylsb/10294040>
"""
import facebook
import requests
def some_action(post):
""" Here you might want to do something with each post. E.g. grab the
post's message (post['message']) or the post's picture (post['picture']).
In this implementation we just print the post's created time.
"""
print(post['created_time'])
# You'll need an access token here to do anything. You can get a temporary one
# here: https://developers.facebook.com/tools/explorer/
access_token = 'CAAHPNmH9dEUBAJ53c9925baOfzbjsCmaAujxZBSEBBpIKqxBwyqBTDMsQSZCsfxReqDlAIsyAWC6ZCtLMibt5G6AcHy2nDb2IC4pvFz0SMJWpnMJol3Rzvt80PKNz9IYGDHfNZBQTF3VhI36yDE8qiI2EzTK7LKuNLBEq3AugsSgXdFGtKcbP2UOtoZCZBaRSZBxHzph5yOmV5yflsJ5258'
# Look at Bill Gates's profile for this example by using his Facebook id.
user = 'BillGates'
graph = facebook.GraphAPI(access_token)
profile = graph.get_object(user)
posts = graph.get_connections(profile['id'], 'posts')
# Wrap this block in a while loop so we can keep paginating requests until
# finished.
while True:
try:
# Perform some action on each post in the collection we receive from
# Facebook.
[some_action(post=post) for post in posts['data']]
# Attempt to make a request to the next page of data, if it exists.
posts = requests.get(posts['paging']['next']).json()
except KeyError:
# When there are no more pages (['paging']['next']), break from the
# loop and end the script.
break
| mit | Python |
|
419ca7099bf47ed00ede73d9de14690a643a3943 | Add data for integration testing of basic csv and crosstab formats | henrykironde/deletedret,goelakash/retriever,henrykironde/deletedret,goelakash/retriever | test/test_integration.py | test/test_integration.py | """Integrations tests for EcoData Retriever"""
import os
import shutil
from retriever import HOME_DIR
simple_csv = {'name': 'simple_csv',
'raw_data': "a,b,c\n1,2,3\n4,5,6",
'script': "shortname: simple_csv\ntable: simple_csv, http://example.com/simple_csv.txt",
'expect_out': "a,b,c\n1,2,3\n4,5,6"}
crosstab = {'name': 'crosstab',
'raw_data': "a,b,c1,c2\n1,1,1.1,1.2\n1,2,2.1,2.2",
'script': "shortname: crosstab\ntable: crosstab, http://example.com/crosstab.txt\n*column: a, int\n*column: b, int\n*ct_column: c\n*column: val, ct-double\n*ct_names: c1,c2",
'expect_out': "a,b,c,val\n1,1,c1,1.1\n1,1,c2,1.2\n1,2,c1,2.1\n1,2,c2,2.2"}
tests = [simple_csv, crosstab]
| mit | Python |
|
465fbc1657e90134323fd05ee4216da5af110ee4 | add tools | pengmeng/PyCrawler,ymero/PyCrawler,pengmeng/PyCrawler | pycrawler/utils/tools.py | pycrawler/utils/tools.py | __author__ = 'mengpeng'
import time
def gethash(string, cap=0xffffffff):
return hash(string) & cap
def timestamp():
return time.strftime("%H:%M:%S", time.localtime(time.time()))
def datastamp():
return time.strftime("%Y-%m-%d", time.localtime(time.time()))
def fullstamp():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) | mit | Python |
|
1983e84e41f6dfe8b54e4a7d7535d0b89f9dd58a | add an example of a client for a change | teepark/greenhouse | examples/parallel_client.py | examples/parallel_client.py | '''a bunch of examples of how to get a list of urls in parallel
each of them uses a different greenhouse api to retrieve a list of urls in
parallel and return a dictionary mapping urls to response bodies
'''
import urllib2
import greenhouse
# urllib2 obviously doesn't explicitly use greenhouse sockets, but we can
# override socket.socket so it uses them anyway
greenhouse.io.monkeypatch()
#
# simply schedule greenlets and use an event to signal the all clear
#
def _get_one(url, results, count, done_event):
results[url] = urllib2.urlopen(url).read()
if (len(results)) == count:
done_event.set() # wake up the original greenlet
def get_urls(urls):
count = len(urls)
results = {}
alldone = greenhouse.Event()
# each url gets its own greenlet to fetch it
for index, url in enumerate(urls):
greenhouse.schedule(_get_one, args=(url, results, count, alldone))
alldone.wait()
return results
#
# create two Queue objects, one for sending urls to be processed, another for
# sending back the results.
#
# this is a little awkward for this specific use case, but is more like how you
# might do it if you don't have a bounded set of inputs but will want to
# constantly send off jobs to be run.
#
def _queue_runner(in_q, out_q, stop):
while 1:
url = in_q.get()
if url is stop:
break
out_q.put((url, urllib2.urlopen(url).read()))
def get_urls_queue(urls, parallelism=None):
in_q = greenhouse.Queue()
out_q = greenhouse.Queue()
results = {}
stop = object()
parallelism = parallelism or len(urls)
for i in xrange(parallelism):
greenhouse.schedule(_queue_runner, args=(in_q, out_q, stop))
for url in urls:
in_q.put(url)
for url in urls:
url, result = out_q.get()
results[url] = result
for i in xrange(parallelism):
in_q.put(stop)
return results
#
# the Queue example above is basically a small reimplementation of Pools
#
def _pool_job(url):
return url, urllib2.urlopen(url).read()
def get_urls_pool(urls, parallelism=None):
pool = greenhouse.Pool(_pool_job, parallelism or len(urls))
pool.start()
results = {}
for url in urls:
pool.put(url)
for url in urls:
url, result = pool.get()
results[url] = result
pool.close()
return results
#
# this one returns a list of the results in an order corresponding to the
# arguments instead of a dictionary mapping them (to show off OrderedPool)
#
def _ordered_pool_job(url):
return urllib2.urlopen(url).read()
def get_urls_ordered_pool(urls, parallelism=None):
pool = greenhouse.OrderedPool(_ordered_pool_job, parallelism or len(urls))
pool.start()
for url in urls:
pool.put(url)
# OrderedPool caches out-of-order results and produces
# them corresponding to the order in which they were put()
results = [pool.get() for url in urls]
pool.close()
return results
#
# one last version, showcasing a further abstraction of OrderedPool
#
def get_urls_ordered_map(urls, parallelism=None):
return greenhouse.pool.map(
lambda u: urllib2.urlopen(u).read(),
urls,
pool_size=parallelism or len(urls))
| bsd-3-clause | Python |
|
d8fc66417860e634bbb2a6d860628b645811d62c | Add WIP for Python example | intel-iot-devkit/upm,Propanu/upm,nitirohilla/upm,kissbac/upm,kissbac/upm,nitirohilla/upm,kissbac/upm,jontrulson/upm,skiselev/upm,kissbac/upm,pylbert/upm,nitirohilla/upm,malikabhi05/upm,g-vidal/upm,pylbert/upm,g-vidal/upm,spitfire88/upm,skiselev/upm,andreivasiliu2211/upm,Jon-ICS/upm,Propanu/upm,stefan-andritoiu/upm,g-vidal/upm,whbruce/upm,g-vidal/upm,andreivasiliu2211/upm,spitfire88/upm,whbruce/upm,Propanu/upm,skiselev/upm,sasmita/upm,sasmita/upm,jontrulson/upm,whbruce/upm,intel-iot-devkit/upm,skiselev/upm,Jon-ICS/upm,Jon-ICS/upm,skiselev/upm,andreivasiliu2211/upm,nitirohilla/upm,intel-iot-devkit/upm,andreivasiliu2211/upm,malikabhi05/upm,skiselev/upm,malikabhi05/upm,pylbert/upm,Propanu/upm,spitfire88/upm,whbruce/upm,g-vidal/upm,malikabhi05/upm,stefan-andritoiu/upm,malikabhi05/upm,nitirohilla/upm,spitfire88/upm,Jon-ICS/upm,kissbac/upm,whbruce/upm,andreivasiliu2211/upm,sasmita/upm,stefan-andritoiu/upm,pylbert/upm,g-vidal/upm,sasmita/upm,intel-iot-devkit/upm,Propanu/upm,pylbert/upm,sasmita/upm,malikabhi05/upm,jontrulson/upm,pylbert/upm,stefan-andritoiu/upm,intel-iot-devkit/upm,intel-iot-devkit/upm,Jon-ICS/upm,spitfire88/upm,jontrulson/upm,stefan-andritoiu/upm,Propanu/upm,jontrulson/upm,stefan-andritoiu/upm | examples/python/curieimu.py | examples/python/curieimu.py | #!/usr/bin/python
# Author: Ron Evans (@deadprogram)
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
import time, sys, signal, atexit
import pyupm_curieimu as curieimu
## Exit handlers ##
# This stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This lets you run code on exit,
# including functions from myAccelrCompass
def exitHandler():
print "Exiting"
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
while(1):
# Get the acceleration
curieimu.updateAccel();
outputStr = "acc: gX {0} - gY {1} - gZ {2}".format(
curieimu.getAccelX(), curieimu.getAccelY(),
curieimu.getAccelZ())
print outputStr
print " "
time.sleep(1)
| mit | Python |
|
b78fb81cba34992bb84ed3814aae04ce05ef913f | Add del-uri.py example script | ufcg-lsd/python-hpOneView,HewlettPackard/python-hpOneView,miqui/python-hpOneView,andreadean5/python-hpOneView,HewlettPackard/python-hpOneView,danielreed/python-hpOneView | examples/scripts/del-uri.py | examples/scripts/del-uri.py | #!/usr/bin/env python3
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
import sys
if sys.version_info < (3, 4):
raise Exception("Must use Python 3.4 or later")
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print("EULA display needed")
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def deluri(con, uri):
resource = con.delete(uri)
pprint(resource)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Delete resource by URI
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-i', dest='uri', required=False,
help='''
URI of the resource to delete''')
args = parser.parse_args()
credential = {'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
sec = hpov.security(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
deluri(con, args.uri)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| mit | Python |
|
5cf3ff125226ddbf2edfad9d3c0d6ea2d59618ce | add missing file | mmarkov/pygraphviz,mmarkov/pygraphviz | pygraphviz/tests/test.py | pygraphviz/tests/test.py | #!/usr/bin/env python
import sys
from os import path,getcwd
def run(verbosity=1,doctest=False,numpy=True):
"""Run PyGraphviz tests.
Parameters
----------
verbosity: integer, optional
Level of detail in test reports. Higher numbers provide more detail.
doctest: bool, optional
True to run doctests in code modules
"""
try:
import nose
except ImportError:
raise ImportError(\
"The nose package is needed to run the tests.")
sys.stderr.write("Running PyGraphiz tests:")
nx_install_dir=path.join(path.dirname(__file__), path.pardir)
# stop if running from source directory
if getcwd() == path.abspath(path.join(nx_install_dir,path.pardir)):
raise RuntimeError("Can't run tests from source directory.\n"
"Run 'nosetests' from the command line.")
argv=[' ','--verbosity=%d'%verbosity,
'-w',nx_install_dir,
'-exe']
if doctest:
argv.extend(['--with-doctest','--doctest-extension=txt'])
nose.run(argv=argv)
if __name__=="__main__":
run()
| bsd-3-clause | Python |
|
2af53a39096c0eab9d95c304c802281fe3c580ae | Make JAX CompiledFunction objects pickle-able. | google/jax,google/jax,google/jax,google/jax | tests/pickle_test.py | tests/pickle_test.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for interoperability between JAX and pickling libraries."""
import pickle
import unittest
from absl.testing import absltest
try:
import cloudpickle
except ImportError:
cloudpickle = None
import jax
from jax.config import config
from jax import test_util as jtu
config.parse_flags_with_absl()
class CloudpickleTest(jtu.JaxTestCase):
@unittest.skipIf(cloudpickle is None, "Requires cloudpickle")
@unittest.skipIf(jax.lib._xla_extension_version < 31,
"Requires jaxlib 0.1.71")
def testPickleOfJittedFunctions(self):
@jax.jit
def f(x, y):
return x * y
@jax.jit
def g(z):
return f(z, z + 77) # noqa: F821
expected = g(32)
s = cloudpickle.dumps(g)
del f, g
g_unpickled = pickle.loads(s)
actual = g_unpickled(32)
self.assertEqual(expected, actual)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| apache-2.0 | Python |
|
c4ee6bb374e07a07bac8b8f52cf94d7d474e0e33 | Fix typo in test comment | rhizolab/rhizo | tests/test_config.py | tests/test_config.py | import os
from pathlib import Path
from rhizo.config import load_config
def check_config(config):
assert config.output_path == '/foo/bar'
assert config.sub_config.a == 'test'
assert config.sub_config.b == 2
assert round(config.sub_config.c - 3.14, 4) == 0
def _load_test_config(filename, use_environ=False):
"""Load a config file from the test_data subdirectory."""
path = Path(__file__).parent / 'test_data' / filename
return load_config(str(path), use_environ)
def test_text_config():
config = _load_test_config('sample_config.txt')
check_config(config)
def test_environment_config():
os.environ['RHIZO_SUB_CONFIG'] = 'a: override\nb: 3'
os.environ['RHIZO_OTHER_SETTING'] = 'from_env'
config = _load_test_config('sample_config.json', True)
# Not overridden in environment
assert config.output_path == '/foo/bar'
# Overridden in environment; dict value in environment
assert config.sub_config == { "a": "override", "b": 3 }
# Only specified in environment
assert config.other_setting == 'from_env'
def test_json_config():
# Make sure environment override only happens if requested
os.environ['RHIZO_OUTPUT_PATH'] = 'overridden'
config = _load_test_config('sample_config.json')
check_config(config)
def test_hjson_config():
config = _load_test_config('sample_config.hjson')
check_config(config)
def test_config_update():
config = _load_test_config('sample_config.hjson')
config.update(_load_test_config('update.hjson'))
assert config.output_path == '/foo/test'
assert config.sub_config.a == 'test'
assert config.sub_config.b == 3
| import os
from pathlib import Path
from rhizo.config import load_config
def check_config(config):
assert config.output_path == '/foo/bar'
assert config.sub_config.a == 'test'
assert config.sub_config.b == 2
assert round(config.sub_config.c - 3.14, 4) == 0
def _load_test_config(filename, use_environ=False):
"""Load a config file from the test_data subdirectory."""
path = Path(__file__).parent / 'test_data' / filename
return load_config(str(path), use_environ)
def test_text_config():
config = _load_test_config('sample_config.txt')
check_config(config)
def test_environment_config():
os.environ['RHIZO_SUB_CONFIG'] = 'a: override\nb: 3'
os.environ['RHIZO_OTHER_SETTING'] = 'from_env'
config = _load_test_config('sample_config.json', True)
# Not overridden in environmene
assert config.output_path == '/foo/bar'
# Overridden in environment; dict value in environment
assert config.sub_config == { "a": "override", "b": 3 }
# Only specified in environment
assert config.other_setting == 'from_env'
def test_json_config():
# Make sure environment override only happens if requested
os.environ['RHIZO_OUTPUT_PATH'] = 'overridden'
config = _load_test_config('sample_config.json')
check_config(config)
def test_hjson_config():
config = _load_test_config('sample_config.hjson')
check_config(config)
def test_config_update():
config = _load_test_config('sample_config.hjson')
config.update(_load_test_config('update.hjson'))
assert config.output_path == '/foo/test'
assert config.sub_config.a == 'test'
assert config.sub_config.b == 3
| mit | Python |
8006d142a00a6dae70850b3c9d816f745f252260 | create settings file with parent_separator setting | noxan/django-mini-cms | cms/settings.py | cms/settings.py | from django.conf import settings
PARENT_SEPARATOR = getattr(settings, 'MINICMS_PARENT_SEPARATOR', '/')
| bsd-3-clause | Python |
|
7c8d43b16d6b47555caeb00234590bc8d335ed71 | test markup | willmcgugan/rich | tests/test_markup.py | tests/test_markup.py | import pytest
from rich.markup import MarkupError, _parse, render
from rich.text import Span
def test_parse():
result = list(_parse("[foo]hello[/foo][bar]world[/][[escaped]]"))
expected = [
(None, "[foo]"),
("hello", None),
(None, "[/foo]"),
(None, "[bar]"),
("world", None),
(None, "[/]"),
("[", None),
("escaped", None),
("]", None),
]
assert result == expected
def test_render():
result = render("[bold]FOO[/bold]")
assert str(result) == "FOO"
assert result.spans == [Span(0, 3, "bold")]
def test_markup_error():
with pytest.raises(MarkupError):
assert render("foo[/]")
with pytest.raises(MarkupError):
assert render("foo[/bar]")
with pytest.raises(MarkupError):
assert render("[foo]hello[/bar]")
| mit | Python |
|
93b2972c41855511cddf57029ab8fce0dccd9265 | add hashtable using open addressing | haandol/algorithm_in_python | ds/hash.py | ds/hash.py | '''HashTable using open addressing'''
class HashTable(object):
def __init__(self):
self.size = 11
self.keys = [None] * self.size
self.data = [None] * self.size
def hash(self, key):
return key % self.size
def rehash(self, key):
return (key + 1) % self.size
def put(self, key, data):
slot = self.hash(key)
if self.keys[slot] is None:
self.keys[slot] = key
self.data[slot] = data
else:
while self.keys[slot] is not None:
slot = self.rehash(slot)
if self.keys[slot] == key:
self.data[slot] = data # replace
break
else:
self.keys[slot] = key
self.data[slot] = data
def get(self, key):
slot = self.hash(key)
if self.keys[slot] == key:
return self.data[slot]
else:
start_slot = slot
while self.keys[slot] != key:
slot = self.rehash(slot)
if slot == start_slot:
return None
else:
return self.data[slot]
def __setitem__(self, key, data):
self.put(key, data)
def __getitem__(self, key):
return self.get(key)
def __str__(self):
return ', '.join(map(str, enumerate(self.data)))
if __name__ == '__main__':
H = HashTable()
H[54] = "cat"
H[26] = "dog"
H[93] = "lion"
H[17] = "tiger"
H[77] = "bird"
H[31] = "cow"
H[44] = "goat"
H[55] = "pig"
H[20] = "chicken"
print(H)
H[9] = "duck"
print(H[9])
print(H)
| mit | Python |
|
256e1bb8dd543051fe51b3b669ab4a10c0556f40 | add back pytext | Kaggle/docker-python,Kaggle/docker-python | tests/test_pytext.py | tests/test_pytext.py | import unittest
from pytext.config.field_config import FeatureConfig
from pytext.data.featurizer import InputRecord, SimpleFeaturizer
class TestPyText(unittest.TestCase):
def test_tokenize(self):
featurizer = SimpleFeaturizer.from_config(
SimpleFeaturizer.Config(), FeatureConfig()
)
tokens = featurizer.featurize(InputRecord(raw_text="At eight o'clock")).tokens
self.assertEqual(['at', 'eight', "o'clock"], tokens)
| apache-2.0 | Python |
|
ea0b0e3b3ca2b3ad51ae9640f7f58d9f2737f64c | Split out runner | emonty/dox,stackforge/dox,coolsvap/dox,emonty/dox | dox/runner.py | dox/runner.py | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'Runner',
]
import sh
class Runner(object):
def __init__(self, args):
self.args = args
def run(self, image, command):
print("Going to run {0} in {1}".format(command, image))
if self.args.rebuild:
print("Need to rebuild")
sh.ls()
| apache-2.0 | Python |
|
af75f727e5ec22020c8d91af6a0302ea0e4bda74 | Support for http://docs.oasis-open.org/security/saml/Post2.0/sstc-request-initiation-cd-01.html in the metadata. | tpazderka/pysaml2,tpazderka/pysaml2,Runscope/pysaml2,Runscope/pysaml2 | src/saml2/extension/reqinit.py | src/saml2/extension/reqinit.py | #!/usr/bin/env python
#
# Generated Thu May 15 13:58:36 2014 by parse_xsd.py version 0.5.
#
import saml2
from saml2 import md
NAMESPACE = 'urn:oasis:names:tc:SAML:profiles:SSO:request-init'
class RequestInitiator(md.EndpointType_):
"""The urn:oasis:names:tc:SAML:profiles:SSO:request-init:RequestInitiator
element """
c_tag = 'RequestInitiator'
c_namespace = NAMESPACE
c_children = md.EndpointType_.c_children.copy()
c_attributes = md.EndpointType_.c_attributes.copy()
c_child_order = md.EndpointType_.c_child_order[:]
c_cardinality = md.EndpointType_.c_cardinality.copy()
def request_initiator_from_string(xml_string):
return saml2.create_class_from_xml_string(RequestInitiator, xml_string)
ELEMENT_FROM_STRING = {
RequestInitiator.c_tag: request_initiator_from_string,
}
ELEMENT_BY_TAG = {
'RequestInitiator': RequestInitiator,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
| bsd-2-clause | Python |
|
cfa5b544c3d44a7440feca006c01bbd72ecc0286 | Test arena constants | ntruessel/qcgc,ntruessel/qcgc,ntruessel/qcgc | test/test_arena.py | test/test_arena.py | from support import lib,ffi
from qcgc_test import QCGCTest
class ArenaTestCase(QCGCTest):
def test_size_calculations(self):
exp = lib.QCGC_ARENA_SIZE_EXP
size = 2**exp
bitmap = size / 128
effective_cells = (size - 2 * bitmap) / 16
self.assertEqual(size, lib.qcgc_arena_size)
self.assertEqual(bitmap, lib.qcgc_arena_bitmap_size)
self.assertEqual(effective_cells, lib.qcgc_arena_cells_count)
| mit | Python |
|
12270bc14b44343b4babef3b6445074685b59bd7 | Create histogram.py | vosmann/miniutils,vosmann/miniutils,vosmann/miniutils | python/histogram.py | python/histogram.py | import sys
histogram = dict()
bin_width = 5
max_index = 0
for line in sys.stdin:
if not line:
continue
number = int(line)
bin_index = number / bin_width
if bin_index not in histogram:
histogram[bin_index] = 0
histogram[bin_index] = histogram[bin_index] + 1
if bin_index > max_index:
max_index = bin_index
for index in range(max_index) + [max_index + 1]:
if index not in histogram:
histogram[index] = 0
count = histogram[index]
if count == None:
count = 0
print "[{0}, {1}> : {2}".format(index * bin_width, (index + 1) * bin_width, count)
| apache-2.0 | Python |
|
8b6b30997816bae1255c3e035851b8e6edb5e4c7 | add a test | h4ki/couchapp,diderson/couchapp,h4ki/couchapp,couchapp/couchapp,diderson/couchapp,flimzy/couchapp,diderson/couchapp,dustin/couchapp,couchapp/couchapp,diderson/couchapp,flimzy/couchapp,h4ki/couchapp,flimzy/couchapp,couchapp/couchapp,dustin/couchapp,benoitc/erica,couchapp/couchapp,h4ki/couchapp,dustin/couchapp,flimzy/couchapp,benoitc/erica | python/test/test.py | python/test/test.py | import unittest
import os
import couchapp.utils
class CouchAppTest(unittest.TestCase):
def testInCouchApp(self):
dir_, file_ = os.path.split(__file__)
if dir_:
os.chdir(dir_)
startdir = os.getcwd()
try:
os.chdir("in_couchapp")
os.chdir("installed")
cwd = os.getcwd()
self.assertEquals(couchapp.utils.in_couchapp(), cwd,
"in_couchapp() returns %s" %
couchapp.utils.in_couchapp())
os.chdir(os.path.pardir)
os.chdir("no_install")
self.assert_(not couchapp.utils.in_couchapp(),
"Found a couchapp at %s but didn't expect one!"
% couchapp.utils.in_couchapp())
finally:
os.chdir(startdir)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | Python |
|
952438d97fc0c96afaf505469cc7b9cb0c9f287d | Add config file with the list of relays availables | pahumadad/raspi-relay-api | relay_api/conf/config.py | relay_api/conf/config.py | # List of available relays
relays = [
{
"id": 1,
"gpio": 20,
"name": "relay 1"
},
{
"id": 2,
"gpio": 21,
"name": "relay 2"
}
]
| mit | Python |
|
b5083af1cce5fb5b9c7bb764b18edce8640bd3a1 | add utilLogger.py from toLearn/ and update to v0.4 | yasokada/python-151113-lineMonitor,yasokada/python-151113-lineMonitor | utilLogger.py | utilLogger.py | import os.path
import datetime
'''
v0.4 2015/11/30
- comment out test run
- add from sentence to import CUtilLogger
v0.3 2015/11/30
- change array declaration to those using range()
- __init__() does not take saveto arg
- automatically get file name based on the date
v0.2 2015/11/30
- update add() to handle auto save feature
v0.1 2015/11/30
- add save()
- add add()
- add __init__()
'''
class CUtilLogger:
def __init__(self):
self.idx = 0
self.bufferNum = 5
self.strs = [ 0 for idx in range(10)]
return
def clear(self):
for idx in range(0, self.idx):
self.strs[idx] = ""
self.idx = 0
def add(self,str):
self.strs[self.idx] = str
self.idx = self.idx + 1
# print self.idx
if self.idx >= self.bufferNum:
self.save()
self.clear()
def save(self):
today = datetime.date.today()
yymmdd = today.strftime("%y%m%d")
filename = yymmdd + ".log"
with open(filename, "a") as logfd:
for idx in range(0, self.idx):
text = self.strs[idx] + "\r\n"
logfd.write(text)
# Usage
'''
from utilLogger import CUtilLogger
logger = CUtilLogger()
for loop in range(0, 31):
logger.add("test")
logger.save() # to save the rest
logger = None
'''
| mit | Python |
|
99f5d264ab88573e0541c529eca905b8a1d16873 | Bump to 0.5.3 dev. | davidt/rbtools,datjwu/rbtools,reviewboard/rbtools,beol/rbtools,haosdent/rbtools,1tush/rbtools,reviewboard/rbtools,datjwu/rbtools,davidt/rbtools,haosdent/rbtools,beol/rbtools,reviewboard/rbtools,davidt/rbtools,halvorlu/rbtools,halvorlu/rbtools,haosdent/rbtools,datjwu/rbtools,beol/rbtools,halvorlu/rbtools | rbtools/__init__.py | rbtools/__init__.py | #
# __init__.py -- Basic version and package information
#
# Copyright (c) 2007-2009 Christian Hammond
# Copyright (c) 2007-2009 David Trowbridge
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The version of RBTools
#
# This is in the format of:
#
# (Major, Minor, Micro, alpha/beta/rc/final, Release Number, Released)
#
VERSION = (0, 5, 3, 'alpha', 0, False)
def get_version_string():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3] != 'final':
if VERSION[3] == 'rc':
version += ' RC%s' % VERSION[4]
else:
version += ' %s %s' % (VERSION[3], VERSION[4])
if not is_release():
version += " (dev)"
return version
def get_package_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3] != 'final':
version += '%s%s' % (VERSION[3], VERSION[4])
return version
def is_release():
return VERSION[5]
__version_info__ = VERSION[:-1]
__version__ = get_package_version()
| #
# __init__.py -- Basic version and package information
#
# Copyright (c) 2007-2009 Christian Hammond
# Copyright (c) 2007-2009 David Trowbridge
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The version of RBTools
#
# This is in the format of:
#
# (Major, Minor, Micro, alpha/beta/rc/final, Release Number, Released)
#
VERSION = (0, 5, 2, 'final', 0, True)
def get_version_string():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3] != 'final':
if VERSION[3] == 'rc':
version += ' RC%s' % VERSION[4]
else:
version += ' %s %s' % (VERSION[3], VERSION[4])
if not is_release():
version += " (dev)"
return version
def get_package_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3] != 'final':
version += '%s%s' % (VERSION[3], VERSION[4])
return version
def is_release():
return VERSION[5]
__version_info__ = VERSION[:-1]
__version__ = get_package_version()
| mit | Python |
90948c62d1d01800c6a75dd5f15d7fef334dc66f | Add python unittests | dragonfi/noticeboard,dragonfi/noticeboard,dragonfi/noticeboard,dragonfi/noticeboard | noticeboard/test_noticeboard.py | noticeboard/test_noticeboard.py | import os
import json
import tempfile
import unittest
from noticeboard import noticeboard
class TestNoticeboard(unittest.TestCase):
def setUp(self):
self.fd, noticeboard.app.config["DATABASE"] = tempfile.mkstemp()
noticeboard.app.config["TESTING"] = True
self.app = noticeboard.app.test_client()
noticeboard.init_db()
def tearDown(self):
os.close(self.fd)
os.unlink(noticeboard.app.config["DATABASE"])
def decode_json(self, resp):
return json.loads(resp.data.decode('utf-8'))
def test_no_note_by_default(self):
resp = self.app.get("/api/v1/notes")
data = self.decode_json(resp)
self.assertEqual(data["notes"], [])
def test_creating_note_with_text(self):
text = "Foo Bar Baz"
resp = self.app.get("/api/v1/notes/create/{}".format(text))
data = self.decode_json(resp)
self.assertEqual(data["note"]["text"], text)
def test_created_note_can_be_retrieved(self):
text = "Hello World!"
resp = self.app.get("/api/v1/notes/create/{}".format(text))
created_note = self.decode_json(resp)["note"]
resp = self.app.get("/api/v1/notes/{}".format(created_note["id"]))
retrieved_note = self.decode_json(resp)["note"]
self.assertEqual(retrieved_note, created_note)
def test_created_note_shows_up_in_notes(self):
text = "Hello, 世界!"
resp = self.app.get("/api/v1/notes/create/{}".format(text))
note1 = self.decode_json(resp)["note"]
text = "This is fun!"
resp = self.app.get("/api/v1/notes/create/{}".format(text))
note2 = self.decode_json(resp)["note"]
resp = self.app.get("/api/v1/notes")
notes = self.decode_json(resp)["notes"]
self.assertIn(note1, notes)
self.assertIn(note2, notes)
| mit | Python |
|
17fcdd9a01be24ad9562e5a558e2dd65a84d1a19 | Add missing tests/queuemock.py | privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea | tests/queuemock.py | tests/queuemock.py | # -*- coding: utf-8 -*-
#
# 2019-01-07 Friedrich Weber <friedrich.weber@netknights.it>
# Implement queue mock
#
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNE7SS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import functools
import mock
from privacyidea.lib.queue import get_job_queue
from privacyidea.config import TestingConfig
from privacyidea.lib.queue.promise import ImmediatePromise
from privacyidea.lib.queue.base import BaseQueue, QueueError
from tests.base import OverrideConfigTestCase
class FakeQueue(BaseQueue):
"""
A queue class that keeps track of enqueued jobs, for usage in unit tests.
"""
def __init__(self, options):
BaseQueue.__init__(self, options)
self._jobs = {}
self.reset()
@property
def jobs(self):
return self._jobs
def reset(self):
self.enqueued_jobs = []
def add_job(self, name, func, fire_and_forget=False):
if name in self._jobs:
raise QueueError(u"Job {!r} already exists".format(name))
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if fire_and_forget:
return None
else:
return result
self._jobs[name] = wrapper
def enqueue(self, name, args, kwargs):
if name not in self._jobs:
raise QueueError(u"Unknown job: {!r}".format(name))
self.enqueued_jobs.append((name, args, kwargs))
return ImmediatePromise(self._jobs[name](*args, **kwargs))
class MockQueueTestCase(OverrideConfigTestCase):
"""
A test case class which has a mock job queue set up.
You can check the enqueued jobs with::
queue = get_job_queue()
self.assertEqual(queue.enqueued_jobs, ...)
The ``enqueued_jobs`` attribute is reset for each test case.
"""
class Config(TestingConfig):
PI_JOB_QUEUE_CLASS = "fake"
@classmethod
def setUpClass(cls):
""" override privacyidea.config.config["testing"] with the inner config class """
with mock.patch.dict("privacyidea.lib.queue.QUEUE_CLASSES", {"fake": FakeQueue}):
super(MockQueueTestCase, cls).setUpClass()
def setUp(self):
get_job_queue().reset()
OverrideConfigTestCase.setUp(self)
| agpl-3.0 | Python |
|
6083124c110e0ce657b78f6178cd7464996a042b | add tests I want to pass | theavey/ParaTemp,theavey/ParaTemp | tests/test_geometries.py | tests/test_geometries.py | """This contains a set of tests for ParaTemp.geometries"""
########################################################################
# #
# This script was written by Thomas Heavey in 2017. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2017 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
from __future__ import absolute_import
import pytest
class TestXYZ(object):
@pytest.fixture
def xyz(self):
from ..ParaTemp.geometries import XYZ
return XYZ('tests/test-data/stil-3htmf.xyz')
def test_n_atoms(self, xyz):
assert xyz.n_atoms == 66
def test_energy(self, xyz):
assert xyz.energy == -1058630.8496721
| apache-2.0 | Python |
|
8c9034e91d82487ae34c592b369a3283b577acc8 | Add a new test for the latest RegexLexer change, multiple new states including '#pop'. | aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments | tests/test_regexlexer.py | tests/test_regexlexer.py | # -*- coding: utf-8 -*-
"""
Pygments regex lexer tests
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2007 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import unittest
from pygments.token import Text
from pygments.lexer import RegexLexer
class TestLexer(RegexLexer):
"""Test tuple state transitions including #pop."""
tokens = {
'root': [
('a', Text.Root, 'rag'),
('e', Text.Root),
],
'beer': [
('d', Text.Beer, ('#pop', '#pop')),
],
'rag': [
('b', Text.Rag, '#push'),
('c', Text.Rag, ('#pop', 'beer')),
],
}
class TupleTransTest(unittest.TestCase):
def test(self):
lx = TestLexer()
toks = list(lx.get_tokens_unprocessed('abcde'))
self.assertEquals(toks,
[(0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'),
(3, Text.Beer, 'd'), (4, Text.Root, 'e')])
| bsd-2-clause | Python |
|
ba0c292753355e5ff7e8e131c61e8086f31b3b76 | Create src/task_2_0.py | fitifit/pythonintask,askras/pythonintask | src/task_2_0.py | src/task_2_0.py | # Раздел 1. Задача 2. Вариант 0.
# Напишите программу, которая будет выводить на экран наиболее понравившееся вам высказывание, автором которого является Ф.М.Достоевский. Не забудьте о том, что автор должен быть упомянут на отдельной строке.
print("Жизнь, везде жизнь, жизнь в нас самих, а не во внешнем.")
print("\n\t\t\t\t\tФ.М.Достоевский")
input("\n\nНажмите Enter для выхода.")
| apache-2.0 | Python |
|
6f00204ae2603063eafbd74a369e9da0864854ca | Create new monthly violence polls | unicefuganda/edtrac,unicefuganda/edtrac,unicefuganda/edtrac | poll/management/commands/create_new_violence_polls.py | poll/management/commands/create_new_violence_polls.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
import traceback
from poll.models import Poll
from unregister.models import Blacklist
from django.conf import settings
from optparse import make_option
from poll.forms import NewPollForm
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from rapidsms.models import Contact
from django.db.models import Q
class Command(BaseCommand):
help = "Create new violence polls"
option_list = BaseCommand.option_list + (
make_option('-n', '--name', dest='n'),
make_option('-t', '--poll_type', dest='t'),
make_option('-q', '--question', dest='q'),
make_option('-r', '--default_response', dest='r'),
make_option('-c', '--contacts', dest='c'),
make_option('-u', '--user', dest='u'),
make_option('-s', '--start_immediately', dest='s'),
make_option('-e', '--response_type', dest='e'),
make_option('-g', '--groups', dest='g'),
)
def handle(self, **options):
edtrac_violence_girls = Poll.objects.create(
name="edtrac_violence_girls",
type="n",
question="How many cases of violence against girls were recorded this month? Answer in figures e.g. 5",
default_response='',
user=User.objects.get(username='admin'),
)
edtrac_violence_girls.sites.add(Site.objects.get_current())
edtrac_violence_boys = Poll.objects.create(
name="edtrac_violence_boys",
type="n",
question="How many cases of violence against boys were recorded this month? Answer in figures e.g. 4",
default_response='',
user = User.objects.get(username='admin'),
)
edtrac_violence_boys.sites.add(Site.objects.get_current())
edtrac_violence_reported = Poll.objects.create(
name='edtrac_violence_reported',
type='n',
question='How many cases of violence were referred to the Police this month? Answer in figures e.g. 6',
default_response='',
user=User.objects.get(username='admin'),
)
edtrac_violence_reported.sites.add(Site.objects.get_current())
| bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.