commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
9cce47d37f6e2d08a66b9deedfc6f2f74b02720a | add int validator | faycheng/tpl,faycheng/tpl | tpl/prompt/validator.py | tpl/prompt/validator.py | # -*- coding:utf-8 -*-
from prompt_toolkit.validation import Validator, ValidationError
class StrValidator(Validator):
def validate(self, document):
pass
class IntValidator(Validator):
def validate(self, document):
text = document.text
for index, char in enumerate(text):
if not char.isdigit():
raise ValidationError(message='Input contains non-numeric char', cursor_position=index)
| # -*- coding:utf-8 -*-
from prompt_toolkit.validation import Validator, ValidationError
class StrValidator(Validator):
def validate(self, document):
pass
| mit | Python |
4f73601c843ff9507064b85ddd33179af9fed653 | Raise stderr message | hotosm/osm-export-tool2,hotosm/osm-export-tool2,hotosm/osm-export-tool2,hotosm/osm-export-tool2 | utils/unfiltered_pbf.py | utils/unfiltered_pbf.py | # -*- coding: utf-8 -*-
import logging
import os
from string import Template
from subprocess import PIPE, Popen
from .artifact import Artifact
from .osm_xml import OSM_XML
LOG = logging.getLogger(__name__)
class InvalidOsmXmlException(Exception):
pass
class UnfilteredPBF(object):
name = 'full_pbf'
description = 'Unfiltered OSM PBF'
cmd = Template('osmconvert $osm --out-pbf >$pbf')
def __init__(self, aoi_geom, output_pbf, url):
self.aoi_geom = aoi_geom
self.output_pbf = output_pbf
self.url = url
def run(self):
if self.is_complete:
LOG.debug("Skipping UnfilteredPBF, file exists")
return
osm_xml = "{}.xml".format(self.output_pbf)
osm_xml_task = OSM_XML(self.aoi_geom, osm_xml, url=self.url)
osm_xml_task.run()
convert_cmd = self.cmd.safe_substitute({
'osm': osm_xml,
'pbf': self.output_pbf
})
LOG.debug('Running: %s' % convert_cmd)
p = Popen(convert_cmd, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if stderr:
raise InvalidOsmXmlException(stderr)
LOG.debug('Osmconvert complete')
@property
def results(self):
return [Artifact([self.output_pbf], UnfilteredPBF.name)]
@property
def is_complete(self):
return os.path.isfile(self.output_pbf)
| # -*- coding: utf-8 -*-
import logging
import os
from string import Template
from subprocess import PIPE, Popen
from .artifact import Artifact
from .osm_xml import OSM_XML
LOG = logging.getLogger(__name__)
class InvalidOsmXmlException(Exception):
pass
class UnfilteredPBF(object):
name = 'full_pbf'
description = 'Unfiltered OSM PBF'
cmd = Template('osmconvert $osm --out-pbf >$pbf')
def __init__(self, aoi_geom, output_pbf, url):
self.aoi_geom = aoi_geom
self.output_pbf = output_pbf
self.url = url
def run(self):
if self.is_complete:
LOG.debug("Skipping UnfilteredPBF, file exists")
return
osm_xml = "{}.xml".format(self.output_pbf)
osm_xml_task = OSM_XML(self.aoi_geom, osm_xml, url=self.url)
osm_xml_task.run()
convert_cmd = self.cmd.safe_substitute({
'osm': osm_xml,
'pbf': self.output_pbf
})
LOG.debug('Running: %s' % convert_cmd)
p = Popen(convert_cmd, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if stderr:
LOG.warn('Failed: %s', stderr)
with open(self.input_xml, 'rb') as fd:
sample = fd.readlines(8)
raise InvalidOsmXmlException(sample)
LOG.debug('Osmconvert complete')
@property
def results(self):
return [Artifact([self.output_pbf], UnfilteredPBF.name)]
@property
def is_complete(self):
return os.path.isfile(self.output_pbf)
| bsd-3-clause | Python |
451e20818c7fbcc0b45500c71c5c5beee96eb316 | update jaxlib | tensorflow/probability,tensorflow/probability,google/jax,google/jax,google/jax,google/jax | jaxlib/version.py | jaxlib/version.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.1.17"
| # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.1.16"
| apache-2.0 | Python |
1c15d302c2a1df22b4dd89f3215decf141a4c20e | return None if there is an error during scan | abilian/abilian-core,abilian/abilian-core,abilian/abilian-core,abilian/abilian-core,abilian/abilian-core | abilian/services/antivirus/__init__.py | abilian/services/antivirus/__init__.py | # coding=utf-8
"""
"""
from __future__ import absolute_import
try:
import clamd
cd = clamd.ClamdUnixSocket()
CLAMD_AVAILABLE = True
except ImportError:
CLAMD_AVAILABLE = False
from abilian.core.models.blob import Blob
from ..base import Service
class AntiVirusService(Service):
"""
Antivirus service
"""
name = 'antivirus'
def scan(self, file_or_stream):
"""
:param file_or_stream: :class:`Blob` instance, filename or file object
:returns: True if file is 'clean', False if a virus is detected, None if
file could not be scanned.
If `file_or_stream` is a Blob, scan result is stored in
Blob.meta['antivirus'].
"""
res = self._scan(file_or_stream)
if isinstance(file_or_stream, Blob):
file_or_stream.meta['antivirus'] = res
return res
def _scan(self, file_or_stream):
if not CLAMD_AVAILABLE:
return None
content = file_or_stream
if isinstance(file_or_stream, Blob):
scan = cd.scan
# py3 compat: bytes == py2 str(). Pathlib uses os.fsencode()
content = bytes(file_or_stream.file)
elif isinstance(file_or_stream, (str, unicode)):
scan = cd.scan
else:
scan = cd.instream
res = None
try:
res = scan(content)
except clamd.ClamdError as e:
self.logger.warning('Error during content scan: %s', repr(e))
return None
if content not in res:
# may happen if file doesn't exists
return False
res = res[content]
return res[0] == u'OK'
service = AntiVirusService()
| # coding=utf-8
"""
"""
from __future__ import absolute_import
try:
import clamd
cd = clamd.ClamdUnixSocket()
CLAMD_AVAILABLE = True
except ImportError:
CLAMD_AVAILABLE = False
from abilian.core.models.blob import Blob
from ..base import Service
class AntiVirusService(Service):
"""
Antivirus service
"""
name = 'antivirus'
def scan(self, file_or_stream):
"""
:param file_or_stream: :class:`Blob` instance, filename or file object
:returns: True if file is 'clean', False if a virus is detected, None if
file could not be scanned.
If `file_or_stream` is a Blob, scan result is stored in
Blob.meta['antivirus'].
"""
res = self._scan(file_or_stream)
if isinstance(file_or_stream, Blob):
file_or_stream.meta['antivirus'] = res
return res
def _scan(self, file_or_stream):
if not CLAMD_AVAILABLE:
return None
content = file_or_stream
if isinstance(file_or_stream, Blob):
scan = cd.scan
# py3 compat: bytes == py2 str(). Pathlib uses os.fsencode()
content = bytes(file_or_stream.file)
elif isinstance(file_or_stream, (str, unicode)):
scan = cd.scan
else:
scan = cd.instream
res = None
try:
res = scan(content)
except clamd.ClamdError as e:
self.logger.warning('Error during content scan: %s', repr(e))
if content not in res:
# may happen if file doesn't exists
return False
res = res[content]
return res[0] == u'OK'
service = AntiVirusService()
| lgpl-2.1 | Python |
8e10657f94023a69967345114ee221c8d579c05d | Fix error with new issue while not login. | noracami/track-it,noracami/track-it | trackit/issues/views.py | trackit/issues/views.py | from django.shortcuts import render, get_object_or_404, redirect
from .models import Ticket, Label, User, Comment
import hashlib
# Create your views here.
def home(request):
issue = Ticket.objects.filter().order_by('-id')
readit = []
for i in issue:
issue_get = {}
issue_get['id'] = i.id
issue_get['title'] = i.ticket_title
issue_get['status'] = i.status
issue_get['time'] = i.time
issue_get['label'] = i.label_set.all()
readit.append(issue_get)
#pass
return render(request, 'home.html', {"readit": readit, "request": request})
def issues(request, ticket_id):
issue = get_object_or_404(Ticket, id=ticket_id)
issue_get = {}
issue_get['id'] = issue.id
issue_get['title'] = issue.ticket_title
issue_get['status'] = issue.status
issue_get['time'] = issue.time
issue_get['label'] = issue.label_set.all()
return render(request, 'issues.html', {"issue_get": issue_get, "request": request})
def newissues(request):
if "login" in request.session:
name = request.session['login']
else:
name = "default"
return render(request, 'newissues.html', {"issue_get": name, "request": request})
def add(request):
if request.method == 'POST':
if 'login' in request.session:
if request.POST['todo'] == "newissue":
title = request.POST['title']
content = request.POST['comment']
ticket = Ticket(ticket_title=title)
ticket.save()
user = get_object_or_404(User, id=1)
comment = Comment(ticket=ticket, content=content, user=user)
comment.save()
return redirect('home')
def loginhere(request):
return render(request, 'loginhere.html', {"issue_get": "", "request": request})
def login(request):
#TODO rewrite please
if request.method == 'POST':
if request.POST['login_password']:
plain = request.POST['login_password']
if hashlib.sha224(plain.encode()).hexdigest() == '71454996db126e238e278a202a7dbc49dda187ec4f8c9dfc95584900':
#login
request.session['login'] = request.POST['login_select']
return redirect('home')
def logout(request):
if request.session['login']:
del request.session['login']
return redirect('home')
| from django.shortcuts import render, get_object_or_404, redirect
from .models import Ticket, Label, User, Comment
import hashlib
# Create your views here.
def home(request):
issue = Ticket.objects.filter().order_by('-id')
readit = []
for i in issue:
issue_get = {}
issue_get['id'] = i.id
issue_get['title'] = i.ticket_title
issue_get['status'] = i.status
issue_get['time'] = i.time
issue_get['label'] = i.label_set.all()
readit.append(issue_get)
#pass
return render(request, 'home.html', {"readit": readit, "request": request})
def issues(request, ticket_id):
issue = get_object_or_404(Ticket, id=ticket_id)
issue_get = {}
issue_get['id'] = issue.id
issue_get['title'] = issue.ticket_title
issue_get['status'] = issue.status
issue_get['time'] = issue.time
issue_get['label'] = issue.label_set.all()
return render(request, 'issues.html', {"issue_get": issue_get, "request": request})
def newissues(request):
if "login" in request.session:
name = request.session['login']
else:
name = "default"
return render(request, 'newissues.html', {"issue_get": name, "request": request})
def add(request):
if request.method == 'POST':
if request.session['login']:
if request.POST['todo'] == "newissue":
title = request.POST['title']
content = request.POST['comment']
ticket = Ticket(ticket_title=title)
ticket.save()
user = get_object_or_404(User, id=1)
comment = Comment(ticket=ticket, content=content, user=user)
comment.save()
return redirect('home')
def loginhere(request):
return render(request, 'loginhere.html', {"issue_get": "", "request": request})
def login(request):
#TODO rewrite please
if request.method == 'POST':
if request.POST['login_password']:
plain = request.POST['login_password']
if hashlib.sha224(plain.encode()).hexdigest() == '71454996db126e238e278a202a7dbc49dda187ec4f8c9dfc95584900':
#login
request.session['login'] = request.POST['login_select']
return redirect('home')
def logout(request):
if request.session['login']:
del request.session['login']
return redirect('home')
| mit | Python |
7e4b66fe3df07afa431201de7a5a76d2eeb949a1 | Fix django custom template tag importing | xuru/substrate,xuru/substrate,xuru/substrate | app/main.py | app/main.py | #!/usr/bin/env python
import env_setup; env_setup.setup(); env_setup.setup_django()
from django.template import add_to_builtins
add_to_builtins('agar.django.templatetags')
from webapp2 import RequestHandler, Route, WSGIApplication
from agar.env import on_production_server
from agar.config import Config
from agar.django.templates import render_template
class MainApplicationConfig(Config):
"""
:py:class:`~agar.config.Config` settings for the ``main`` `webapp2.WSGIApplication`_.
Settings are under the ``main_application`` namespace.
The following settings (and defaults) are provided::
main_application_NOOP = None
To override ``main`` `webapp2.WSGIApplication`_ settings, define values in the ``appengine_config.py`` file in the
root of your project.
"""
_prefix = 'main_application'
#: A no op.
NOOP = None
config = MainApplicationConfig.get_config()
class MainHandler(RequestHandler):
def get(self):
render_template(self.response, 'index.html')
application = WSGIApplication(
[
Route('/', MainHandler, name='main'),
],
debug=not on_production_server)
def main():
from google.appengine.ext.webapp import template, util
template.register_template_library('agar.django.templatetags')
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
from env_setup import setup_django
setup_django()
from env_setup import setup
setup()
from webapp2 import RequestHandler, Route, WSGIApplication
from agar.env import on_production_server
from agar.config import Config
from agar.django.templates import render_template
class MainApplicationConfig(Config):
"""
:py:class:`~agar.config.Config` settings for the ``main`` `webapp2.WSGIApplication`_.
Settings are under the ``main_application`` namespace.
The following settings (and defaults) are provided::
main_application_NOOP = None
To override ``main`` `webapp2.WSGIApplication`_ settings, define values in the ``appengine_config.py`` file in the
root of your project.
"""
_prefix = 'main_application'
#: A no op.
NOOP = None
config = MainApplicationConfig.get_config()
class MainHandler(RequestHandler):
def get(self):
render_template(self.response, 'index.html')
application = WSGIApplication(
[
Route('/', MainHandler, name='main'),
],
debug=not on_production_server)
def main():
from google.appengine.ext.webapp import template, util
template.register_template_library('agar.django.templatetags')
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| mit | Python |
35c52ecbe34611f003d8f647dafdb15c00d70212 | update doc | DennyZhang/devops_public,DennyZhang/devops_public,DennyZhang/devops_public,DennyZhang/devops_public | python/git_pull_codedir/git_pull_codedir.py | python/git_pull_codedir/git_pull_codedir.py | # -*- coding: utf-8 -*-
#!/usr/bin/python
##-------------------------------------------------------------------
## @copyright 2017 DennyZhang.com
## Licensed under MIT
## https://raw.githubusercontent.com/DennyZhang/devops_public/master/LICENSE
##
## File : git_pull_codedir.py
## Author : Denny <denny@dennyzhang.com>
## Description :
## --
## Created : <2017-03-24>
## Updated: Time-stamp: <2017-03-27 18:10:44>
##-------------------------------------------------------------------
import os, sys
import sys
import logging
import argparse
# Notice: Need to run: pip install GitPython
import git
logger = logging.getLogger("git_pull_codedir")
formatter = logging.Formatter('%(name)-12s %(asctime)s %(levelname)-8s %(message)s', '%a, %d %b %Y %H:%M:%S',)
file_handler = logging.FileHandler("/var/log/git_pull_codedir.log")
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler(sys.stderr)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
def git_pull(code_dir):
logger.info("Run git pull in %s" %(code_dir))
if os.path.exists(code_dir) is False:
logger.error("Code directory(%s): doesn't exist" % (code_dir))
sys.exit(1)
os.chdir(code_dir)
g = git.cmd.Git(code_dir)
g.pull()
# Sample python git_pull_codedir.py --code_dirs "/data/code_dir/repo1,/data/code_dir/repo2"
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--code_dirs', required=True, \
help="Code directories to pull. If multiple, separated by comma", type=str)
l = parser.parse_args()
code_dirs = l.code_dirs
separator = ","
for code_dir in code_dirs.split(separator):
git_pull(code_dir)
## File : git_pull_codedir.py ends
| # -*- coding: utf-8 -*-
#!/usr/bin/python
##-------------------------------------------------------------------
## @copyright 2017 DennyZhang.com
## Licensed under MIT
## https://raw.githubusercontent.com/DennyZhang/devops_public/master/LICENSE
##
## File : git_pull_codedir.py
## Author : Denny <denny@dennyzhang.com>
## Description :
## --
## Created : <2017-03-24>
## Updated: Time-stamp: <2017-03-24 15:51:04>
##-------------------------------------------------------------------
import os, sys
import sys
import logging
import argparse
# Notice: Need to run: pip install GitPython
import git
logger = logging.getLogger("git_pull_codedir")
formatter = logging.Formatter('%(name)-12s %(asctime)s %(levelname)-8s %(message)s', '%a, %d %b %Y %H:%M:%S',)
file_handler = logging.FileHandler("/var/log/git_pull_codedir.log")
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler(sys.stderr)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
def git_pull(code_dir):
logger.info("Run git pull in %s" %(code_dir))
if os.path.exists(code_dir) is False:
logger.error("Code directory(%s): doesn't exist" % (code_dir))
sys.exit(1)
os.chdir(code_dir)
g = git.cmd.Git(code_dir)
g.pull()
# Sample python perform_git_pull.py --code_dirs "/data/code_dir/repo1,/data/code_dir/repo2"
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--code_dirs', required=True, \
help="Code directories to pull. If multiple, separated by comma", type=str)
l = parser.parse_args()
code_dirs = l.code_dirs
separator = ","
for code_dir in code_dirs.split(separator):
git_pull(code_dir)
## File : git_pull_codedir.py ends
| mit | Python |
3ab5586ec4ac9ff3ac3fd7583bc9a71c7b5cd27a | fix lockedNormal, use MItMeshPolygon instead of MItMeshVertex, fix Fix() fucntion | sol-ansano-kim/medic,sol-ansano-kim/medic,sol-ansano-kim/medic | python/medic/plugins/Tester/lockedNormal.py | python/medic/plugins/Tester/lockedNormal.py | from medic.core import testerBase
from maya import OpenMaya
class LockedNormal(testerBase.TesterBase):
Name = "LockedNormal"
Description = "vertex(s) which has locked normal"
Fixable = True
def __init__(self):
super(LockedNormal, self).__init__()
def Match(self, node):
return node.object().hasFn(OpenMaya.MFn.kMesh)
def Test(self, node):
it = None
mesh = None
try:
it = OpenMaya.MItMeshPolygon(node.object())
mesh = OpenMaya.MFnMesh(node.object())
except:
return (False, None)
vertices = OpenMaya.MIntArray()
while (not it.isDone()):
for i in range(it.polygonVertexCount()):
vi = it.vertexIndex(i)
if vi in vertices:
continue
ni = it.normalIndex(i)
if mesh.isNormalLocked(ni):
vertices.append(vi)
it.next()
if vertices.length() > 0:
comp = OpenMaya.MFnSingleIndexedComponent()
comp_obj = comp.create(OpenMaya.MFn.kMeshVertComponent)
comp.addElements(vertices)
return (True, comp_obj)
return (False, None)
def Fix(self, node, component, parameterParser):
if node.dg().isFromReferencedFile():
return False
mesh = OpenMaya.MFnMesh(node.object())
vertices = OpenMaya.MIntArray()
ver_comp = OpenMaya.MFnSingleIndexedComponent(component)
ver_comp.getElements(vertices)
mesh.unlockVertexNormals(vertices)
return True
Tester = LockedNormal
| from medic.core import testerBase
from maya import OpenMaya
class LockedNormal(testerBase.TesterBase):
Name = "LockedNormal"
Description = "vertex(s) which has locked normal"
Fixable = True
def __init__(self):
super(LockedNormal, self).__init__()
def Match(self, node):
return node.object().hasFn(OpenMaya.MFn.kMesh)
def Test(self, node):
it = None
mesh = None
try:
it = OpenMaya.MItMeshVertex(node.object())
mesh = OpenMaya.MFnMesh(node.object())
except:
return (False, None)
result = False
comp = OpenMaya.MFnSingleIndexedComponent()
comp_obj = comp.create(OpenMaya.MFn.kMeshVertComponent)
while (not it.isDone()):
normal_indices = OpenMaya.MIntArray()
it.getNormalIndices(normal_indices)
for i in range(normal_indices.length()):
if mesh.isNormalLocked(normal_indices[i]):
result = True
comp.addElement(it.index())
break
it.next()
return (result, comp_obj if result else None)
def Fix(self, node, component, parameterParser):
if node.dg().isFromReferencedFile():
return False
target_normal_indices = OpenMaya.MIntArray()
mesh = OpenMaya.MFnMesh(node.object())
it = OpenMaya.MItMeshVertex(node.getPath(), component)
while (not it.isDone()):
normal_indices = OpenMaya.MIntArray()
it.getNormalIndices(normal_indices)
for i in range(normal_indices.length()):
target_normal_indices.append(normal_indices[i])
it.next()
mesh.unlockVertexNormals(target_normal_indices)
return True
Tester = LockedNormal
| mit | Python |
e74b4867f9067e28686aecd19eb6f1d352ee28bf | fix imports | tomviner/dojo-adventure-game | game.py | game.py | import random
from characters import guests as people
from adventurelib import when, start
import rooms
from sys import exit
murder_config_people = list(people)
random.shuffle(murder_config_people)
murder_location = random.choice(list(rooms.rooms))
murderer = random.choice(list(people))
current_config_people = list(people)
random.shuffle(current_config_people)
current_location = random.choice(list(rooms.rooms))
@when('where am i')
def my_room():
print("I am in: ", current_location)
@when('go to ROOM')
@when('go to the ROOM')
def to_room(room):
global current_location
r = rooms.rooms.find(room)
if current_location == r:
print("I am already in %s" % room)
elif r:
print("I am now in %s" % room)
current_location = r
else:
print("I can't find the %s" % room)
@when('it was PERSON')
def accuse(person):
p = people.find(person)
if p == murderer:
print ("Yes, %s is the murderer!" % p)
exit
else:
if p:
print ("%s said: 'How could you!'" % p)
else:
print ("No one has ever heard of '%s'!" % person)
start()
| import random
from characters import guests as people
from adventurelib import Item, Bag, when, start
import rooms
import characters
from sys import exit
murder_config_people = list(people)
random.shuffle(murder_config_people)
murder_location = random.choice(list(rooms.rooms))
murderer = random.choice(list(people))
current_config_people = list(people)
random.shuffle(current_config_people)
current_location = random.choice(list(rooms.rooms))
@when('where am i')
def my_room():
print("I am in: ", current_location)
@when('go to ROOM')
@when('go to the ROOM')
def to_room(room):
global current_location
r = rooms.rooms.find(room)
if current_location == r:
print("I am already in %s" % room)
elif r:
print("I am now in %s" % room)
current_location = r
else:
print("I can't find the %s" % room)
@when('it was PERSON')
def accuse(person):
p = people.find(person)
if p == murderer:
print ("Yes, %s is the murderer!" % p)
exit
else:
if p:
print ("%s said: 'How could you!'" % p)
else:
print ("No one has ever heard of '%s'!" % person)
start()
| mit | Python |
aaba085cd2e97c8c23e6724da3313d42d12798f0 | Make sure request.user is a user | comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django | app/grandchallenge/annotations/validators.py | app/grandchallenge/annotations/validators.py | from rest_framework import serializers
from django.conf import settings
def validate_grader_is_current_retina_user(grader, context):
"""
This method checks if the passed grader equals the request.user that is passed in the context.
Only applies to users that are in the retina_graders group.
"""
request = context.get("request")
if (
request is not None
and request.user is not None
and request.user.is_authenticated
):
user = request.user
if user.groups.filter(
name=settings.RETINA_GRADERS_GROUP_NAME
).exists():
if grader != user:
raise serializers.ValidationError(
"User is not allowed to create annotation for other grader"
)
| from rest_framework import serializers
from django.conf import settings
def validate_grader_is_current_retina_user(grader, context):
"""
This method checks if the passed grader equals the request.user that is passed in the context.
Only applies to users that are in the retina_graders group.
"""
request = context.get("request")
if request and request.user.is_authenticated:
user = request.user
if user.groups.filter(
name=settings.RETINA_GRADERS_GROUP_NAME
).exists():
if grader != user:
raise serializers.ValidationError(
"User is not allowed to create annotation for other grader"
)
| apache-2.0 | Python |
9108f24183b2743647a8ed3ab354673e945d5f2a | Update release number | SpamScope/mail-parser | mailparser_version/__init__.py | mailparser_version/__init__.py | __version__ = "1.1.0"
| __version__ = "1.0.0"
| apache-2.0 | Python |
8128791c5b4cb8d185ceb916df2b6aa896f17453 | add test for custom ylabels | josesho/bootstrap_contrast | test_run.py | test_run.py | #! /usr/bin/env python
# Load Libraries
import matplotlib as mpl
mpl.use('SVG')
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set(style='ticks',context='talk')
import bootstrap_contrast as bsc
import pandas as pd
import numpy as np
import scipy as sp
# Dummy dataset
dataset=list()
for seed in [10,11,12,13,14,15]:
np.random.seed(seed) # fix the seed so we get the same numbers each time.
dataset.append(np.random.randn(40))
df=pd.DataFrame(dataset).T
cols=['Control','Group1','Group2','Group3','Group4','Group5']
df.columns=cols
# Create some upwards/downwards shifts.
df['Group2']=df['Group2']-0.1
df['Group3']=df['Group3']+0.2
df['Group4']=(df['Group4']*1.1)+4
df['Group5']=(df['Group5']*1.1)-1
# Add gender column.
df['Gender']=np.concatenate([np.repeat('Male',20),np.repeat('Female',20)])
# bsc.__version__
f,c=bsc.contrastplot(data=df,
idx=(('Group1','Group3','Group2'),
('Control','Group4')),
color_col='Gender',
custom_palette={'Male':'blue',
'Female':'red'},
float_contrast=True,
swarm_label='my swarm',
contrast_label='contrast',
show_means='bars',
means_width=0.5,
fig_size=(10,8))
f.savefig('testfig.svg',format='svg')
| #! /usr/bin/env python
# Load Libraries
import matplotlib as mpl
mpl.use('SVG')
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set(style='ticks',context='talk')
import bootstrap_contrast as bsc
import pandas as pd
import numpy as np
import scipy as sp
# Dummy dataset
dataset=list()
for seed in [10,11,12,13,14,15]:
np.random.seed(seed) # fix the seed so we get the same numbers each time.
dataset.append(np.random.randn(40))
df=pd.DataFrame(dataset).T
cols=['Control','Group1','Group2','Group3','Group4','Group5']
df.columns=cols
# Create some upwards/downwards shifts.
df['Group2']=df['Group2']-0.1
df['Group3']=df['Group3']+0.2
df['Group4']=(df['Group4']*1.1)+4
df['Group5']=(df['Group5']*1.1)-1
# Add gender column.
df['Gender']=np.concatenate([np.repeat('Male',20),np.repeat('Female',20)])
# bsc.__version__
f,c=bsc.contrastplot(data=df,
idx=(('Group1','Group3','Group2'),
('Control','Group4')),
color_col='Gender',
custom_palette={'Male':'blue',
'Female':'red'},
float_contrast=True,
show_means='bars',
means_width=0.5,
fig_size=(10,8))
f.savefig('testfig.svg',format='svg')
| mit | Python |
0ca45e92a92e71d080af6e2104f4f625e31559f0 | Tweak mysql query string in test. | ContinuumIO/blaze,ContinuumIO/blaze | blaze/compute/tests/test_mysql_compute.py | blaze/compute/tests/test_mysql_compute.py | from __future__ import absolute_import, print_function, division
from getpass import getuser
import pytest
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('pymysql')
from odo import odo, drop, discover
import pandas as pd
import numpy as np
from blaze import symbol, compute
from blaze.utils import example, normalize
from blaze.interactive import iscoretype, iscorescalar, iscoresequence
@pytest.yield_fixture(scope='module')
def data():
try:
t = odo(
example('nyc.csv'),
'mysql+pymysql://%s@localhost/test::nyc' % getuser()
)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t.bind
finally:
drop(t)
@pytest.fixture
def db(data):
return symbol('test', discover(data))
def test_agg_sql(db, data):
subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']]
expr = subset[subset.passenger_count < 4].passenger_count.min()
result = compute(expr, data, return_type='native')
expected = """
select
min(alias.passenger_count) as passenger_count_min
from
(select
nyc.passenger_count as passenger_count
from
nyc
where nyc.passenger_count < %s) as alias
"""
assert normalize(str(result)) == normalize(expected)
def test_agg_compute(db, data):
subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']]
expr = subset[subset.passenger_count < 4].passenger_count.min()
result = compute(expr, data, return_type='native')
passenger_count = odo(compute(db.nyc.passenger_count, {db: data}, return_type='native'), pd.Series)
assert passenger_count[passenger_count < 4].min() == result.scalar()
def test_core_compute(db, data):
assert isinstance(compute(db.nyc, data, return_type='core'), pd.DataFrame)
assert isinstance(compute(db.nyc.passenger_count, data, return_type='core'), pd.Series)
assert iscorescalar(compute(db.nyc.passenger_count.mean(), data, return_type='core'))
assert isinstance(compute(db.nyc, data, return_type=list), list)
| from __future__ import absolute_import, print_function, division
from getpass import getuser
import pytest
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('pymysql')
from odo import odo, drop, discover
import pandas as pd
import numpy as np
from blaze import symbol, compute
from blaze.utils import example, normalize
from blaze.interactive import iscoretype, iscorescalar, iscoresequence
@pytest.yield_fixture(scope='module')
def data():
try:
t = odo(
example('nyc.csv'),
'mysql+pymysql://%s@localhost/test::nyc' % getuser()
)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t.bind
finally:
drop(t)
@pytest.fixture
def db(data):
return symbol('test', discover(data))
def test_agg_sql(db, data):
subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']]
expr = subset[subset.passenger_count < 4].passenger_count.min()
result = compute(expr, data, return_type='native')
expected = """
select
min(alias.passenger_count) as passenger_count_min
from
(select
nyc.passenger_count as passenger_count
from
nyc
where nyc.passenger_count < %(passenger_count_1)s) as alias
"""
assert normalize(str(result)) == normalize(expected)
def test_agg_compute(db, data):
subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']]
expr = subset[subset.passenger_count < 4].passenger_count.min()
result = compute(expr, data, return_type='native')
passenger_count = odo(compute(db.nyc.passenger_count, {db: data}, return_type='native'), pd.Series)
assert passenger_count[passenger_count < 4].min() == result.scalar()
def test_core_compute(db, data):
assert isinstance(compute(db.nyc, data, return_type='core'), pd.DataFrame)
assert isinstance(compute(db.nyc.passenger_count, data, return_type='core'), pd.Series)
assert iscorescalar(compute(db.nyc.passenger_count.mean(), data, return_type='core'))
assert isinstance(compute(db.nyc, data, return_type=list), list)
| bsd-3-clause | Python |
a6c4540877e00df93fb5de3ce76e3a7393c1c587 | Change notes. | jgehrcke/timegaps,jgehrcke/timegaps | timegaps.py | timegaps.py | # -*- coding: utf-8 -*-
# Copyright 2014 Jan-Philip Gehrcke. See LICENSE file for details.
"""
Feature brainstorm:
- reference implementation with cmdline interface
- comprehensive API for systematic unit testing and library usage
- remove or move or noop mode
- extensive logging
- parse mtime from path (file/dirname)
- symlink support (elaborate specifics)
- file system entry input via positional cmdline args or via null-character
separated paths at stdin
- add a mode where time-encoding nullchar-separated strings are read as
input and then filtered. The output is a set of rejected strings (no
involvement of the file system at all, just timestamp filtering)
"""
import os
import sys
import logging
import time
from logging.handlers import RotatingFileHandler
from deletebytime import Filter, FileSystemEntry
YEARS = 1
MONTHS = 12
WEEKS = 6
DAYS = 8
HOURS = 48
ZERO_HOURS_KEEP_COUNT = 5
LOGFILE_PATH = "/mnt/two_3TB_disks/jpg_private/home/progg0rn/nas_scripts/delete_pc_backups/delete_backups.log"
def main():
paths = sys.argv[1:]
log.info("Got %s backup paths via cmdline.", len(backup_dirs))
backup_times = [time_from_dirname(d) for d in backup_dirs]
items_with_time = zip(backup_dirs, backup_times)
items_to_keep = filter_items(items_with_time)
keep_dirs = [i[0] for i in items_to_keep]
keep_dirs_str = "\n".join(keep_dirs)
log.info("Keep these %s directories:\n%s", len(keep_dirs), keep_dirs_str)
delete_paths = [p for p in backup_dirs if p not in keep_dirs]
log.info("Delete %s paths", len(delete_paths))
for p in delete_paths:
delete_backup_dir(p)
if __name__ == "__main__":
log = logging.getLogger()
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
fh = RotatingFileHandler(
LOGFILE_PATH,
mode='a',
maxBytes=500*1024,
backupCount=30,
encoding='utf-8')
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
log.addHandler(ch)
log.addHandler(fh)
main()
if __name__ == "__main__":
main()
| # -*- coding: utf-8 -*-
# Copyright 2014 Jan-Philip Gehrcke. See LICENSE file for details.
"""
Feature brainstorm:
- reference implementation with cmdline interface
- comprehensive API for systematic unit testing and library usage
- remove or move or noop mode
- extensive logging
- parse mtime from path (file/dirname)
- symlink support (elaborate specifics)
- file system entry input via positional cmdline args or via null-character
separated paths at stdin
TODO:
- rename to timegaps
"""
import os
import sys
import logging
import time
from logging.handlers import RotatingFileHandler
from deletebytime import Filter, FileSystemEntry
YEARS = 1
MONTHS = 12
WEEKS = 6
DAYS = 8
HOURS = 48
ZERO_HOURS_KEEP_COUNT = 5
LOGFILE_PATH = "/mnt/two_3TB_disks/jpg_private/home/progg0rn/nas_scripts/delete_pc_backups/delete_backups.log"
def main():
paths = sys.argv[1:]
log.info("Got %s backup paths via cmdline.", len(backup_dirs))
backup_times = [time_from_dirname(d) for d in backup_dirs]
items_with_time = zip(backup_dirs, backup_times)
items_to_keep = filter_items(items_with_time)
keep_dirs = [i[0] for i in items_to_keep]
keep_dirs_str = "\n".join(keep_dirs)
log.info("Keep these %s directories:\n%s", len(keep_dirs), keep_dirs_str)
delete_paths = [p for p in backup_dirs if p not in keep_dirs]
log.info("Delete %s paths", len(delete_paths))
for p in delete_paths:
delete_backup_dir(p)
if __name__ == "__main__":
log = logging.getLogger()
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
fh = RotatingFileHandler(
LOGFILE_PATH,
mode='a',
maxBytes=500*1024,
backupCount=30,
encoding='utf-8')
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
log.addHandler(ch)
log.addHandler(fh)
main()
if __name__ == "__main__":
main()
| mit | Python |
4bc436ac4d441987d602b3af10517125c78c56e0 | remove use of BeautifulSoup from parse_paragraph_as_list | StoDevX/course-data-tools,StoDevX/course-data-tools | lib/parse_paragraph_as_list.py | lib/parse_paragraph_as_list.py | def parse_paragraph_as_list(string_with_br):
paragraph = ' '.join(string_with_br.split())
lines = [s.strip() for s in paragraph.split('<br>')]
return [l for l in lines if l]
| from bs4 import BeautifulSoup
def parse_paragraph_as_list(string_with_br):
strings = BeautifulSoup(string_with_br, 'html.parser').strings
splitted = [' '.join(s.split()).strip() for s in strings]
return [s for s in splitted if s]
| mit | Python |
ac6ce056e6b05531d81c550ae3e1e1d688ece4a0 | Make serializer commet more clear | codertx/lightil | jwt_auth/serializers.py | jwt_auth/serializers.py | from .models import User
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(max_length=20, min_length=8, trim_whitespace=False, write_only=True)
class Meta:
model = User
fields = ('id', 'nickname', 'username', 'email', 'password')
# serializer's default `create` method will call `model.objects.create`
# method to create new instance, override to create user correctly.
def create(self, validated_data):
return User.objects.create_user(**validated_data)
# since the password cannot be changed directly
# override to update user correctly
def update(self, instance, validated_data):
if 'password' in validated_data:
instance.set_password(validated_data['password'])
instance.nickname = validated_data.get('nickname', instance.nickname)
instance.save()
return instance
| from .models import User
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(max_length=20, min_length=8, trim_whitespace=False, write_only=True)
class Meta:
model = User
fields = ('id', 'nickname', 'username', 'email', 'password')
# default `create` method call `model.objects.create` method to create new instance
# override to create user correctly
def create(self, validated_data):
return User.objects.create_user(**validated_data)
# since the password cannot be changed directly
# override to update user correctly
def update(self, instance, validated_data):
if 'password' in validated_data:
instance.set_password(validated_data['password'])
instance.nickname = validated_data.get('nickname', instance.nickname)
instance.save()
return instance
| mit | Python |
b7a84ce7f0049229693fe12bf7a8bb1a7177d3b6 | convert values to float before multiplying with pi | philipkimmey/django-geo | django_geo/distances.py | django_geo/distances.py | import math
class distances:
@staticmethod
def geographic_distance(lat1, lng1, lat2, lng2):
lat1 = float(lat1)
lng1 = float(lng1)
lat2 = float(lat2)
lng2 = float(lng2)
lat1 = (lat1 * math.pi) / 180
lng1 = (lng1 * math.pi) / 180
lat2 = (lat2 * math.pi) / 180
lng2 = (lng2 * math.pi) / 180
a = (math.sin(lat1)*math.sin(lat2))+(math.cos(lat1)*math.cos(lat2)*math.cos(lng2 - lng1))
return math.acos(a) * 6371.01
@staticmethod
def max_variation_lat(distance):
max_variation = abs((180 * distance) / (6371.01 * math.pi))
return max_variation
@staticmethod
def max_variation_lon(address_latitude, distance):
top = math.sin(distance / 6371.01)
bottom = math.cos((math.pi * address_latitude)/180)
ratio = top / bottom
if -1 > ratio or ratio > 1:
max_variation = 100
else:
max_variation = abs(math.asin(ratio) * (180 / math.pi))
return max_variation
| import math
class distances:
@staticmethod
def geographic_distance(lat1, lng1, lat2, lng2):
lat1 = (lat1 * math.pi) / 180
lng1 = (lng1 * math.pi) / 180
lat2 = (lat2 * math.pi) / 180
lng2 = (lng2 * math.pi) / 180
a = (math.sin(lat1)*math.sin(lat2))+(math.cos(lat1)*math.cos(lat2)*math.cos(lng2 - lng1))
return math.acos(a) * 6371.01
@staticmethod
def max_variation_lat(distance):
max_variation = abs((180 * distance) / (6371.01 * math.pi))
return max_variation
@staticmethod
def max_variation_lon(address_latitude, distance):
top = math.sin(distance / 6371.01)
bottom = math.cos((math.pi * address_latitude)/180)
ratio = top / bottom
if -1 > ratio or ratio > 1:
max_variation = 100
else:
max_variation = abs(math.asin(ratio) * (180 / math.pi))
return max_variation
| mit | Python |
4259019196c473431d4291f2910ab0164e319ffb | update simu.py for 0.3.0. | ryos36/polyphony-tutorial,ryos36/polyphony-tutorial,ryos36/polyphony-tutorial | bin/simu.py | bin/simu.py | #!/usr/bin/env python3
import sys
import os
import traceback
import subprocess
IVERILOG_PATH = 'iverilog'
ROOT_DIR = '.' + os.path.sep
TEST_DIR = ROOT_DIR + 'tests'
TMP_DIR = ROOT_DIR + '.tmp'
sys.path.append(ROOT_DIR)
from polyphony.compiler.__main__ import compile_main, logging_setting
from polyphony.compiler.env import env
def exec_test(casefile_path, output=True, compile_only=False):
casefile = os.path.basename(casefile_path)
casename, _ = os.path.splitext(casefile)
try:
compile_main(casefile_path, casename, TMP_DIR, debug_mode=output)
except Exception as e:
print('[COMPILE PYTHON] FAILED:' + casefile_path)
if env.dev_debug_mode:
traceback.print_exc()
print(e)
return
if compile_only:
return
for testbench in env.testbenches:
simulate_verilog(testbench.orig_name, casename, casefile_path, output)
def simulate_verilog(testname, casename, casefile_path, output):
hdl_files = ['{}{}{}.v'.format(TMP_DIR, os.path.sep, casename), '{}{}{}.v'.format(TMP_DIR, os.path.sep, testname)]
exec_name = '{}{}{}'.format(TMP_DIR, os.path.sep, testname)
args = ('{} -I {} -W all -o {} -s {}'.format(IVERILOG_PATH, TMP_DIR, exec_name, testname)).split(' ')
args += hdl_files
try:
subprocess.check_call(args)
except Exception as e:
print('[COMPILE HDL] FAILED:' + casefile_path)
return
try:
out = subprocess.check_output([exec_name])
lines = out.decode('utf-8').split('\n')
for line in lines:
if output:
print(line)
if 'FAILED' in line:
raise Exception()
except Exception as e:
print('[SIMULATION] FAILED:' + casefile_path)
print(e)
if __name__ == '__main__':
if not os.path.exists(TMP_DIR):
os.mkdir(TMP_DIR)
if len(sys.argv) > 1:
# import profile
# profile.run("exec_test(sys.argv[1])")
exec_test(sys.argv[1])
| #!/usr/bin/env python3
import sys
import os
import traceback
import logging
import profile
from subprocess import call, check_call, check_output
ROOT_DIR = './'
TEST_DIR = ROOT_DIR+'tests'
TMP_DIR = ROOT_DIR+'.tmp'
sys.path.append(ROOT_DIR)
from polyphony.compiler.__main__ import compile_main, logging_setting
from polyphony.compiler.env import env
def exec_test(test, output=True, compile_only=False):
casefile = os.path.basename(test)
casename, _ = os.path.splitext(casefile)
try:
compile_main(test, casename, TMP_DIR, debug_mode=output)
except Exception as e:
print('[COMPILE PYTHON] FAILED:'+test)
if env.dev_debug_mode:
traceback.print_exc()
print(e)
return
if compile_only:
return
hdl_files = ['{}/{}.v'.format(TMP_DIR, casename), '{}/{}_test.v'.format(TMP_DIR, casename)]
exec_name = '{}/test'.format(TMP_DIR)
args = ('iverilog -I {} -W all -o {} -s test'.format(TMP_DIR, exec_name)).split(' ')
args += hdl_files
try:
check_call(args)
except Exception as e:
print('[COMPILE HDL] FAILED:'+test)
return
try:
out = check_output([exec_name])
lines = out.decode('utf-8').split('\n')
for line in lines:
if output:
print(line)
if 'FAILED' in line:
raise Exception()
except Exception as e:
print('[SIMULATION] FAILED:'+test)
print(e)
if __name__ == '__main__':
if not os.path.exists(TMP_DIR):
os.mkdir(TMP_DIR)
if len(sys.argv) > 1:
#profile.run("exec_test(sys.argv[1])")
exec_test(sys.argv[1])
| mit | Python |
fd9a553868ce46ceef2b23e79347dd262b63ebae | fix build instructions on Linux | 0xfeedface/node_raptor,0xfeedface/node_raptor,0xfeedface/node_raptor,0xfeedface/node_raptor | binding.gyp | binding.gyp | { "targets": [ {
"target_name": "bindings",
"include_dirs": [
"<(raptor_prefix)/include/raptor2"
],
"sources": [
"src/bindings.cc",
"src/parser.cc",
"src/parser_wrapper.cc",
"src/serializer.cc",
"src/serializer_wrapper.cc",
"src/statement.cc",
"src/statement_wrapper.cc",
"src/uri.cc",
"src/world.cc",
"src/message.cc"
],
"link_settings": {
"libraries": [ "-lraptor2" ]
},
"conditions": [
[ "OS=='mac'", {
"variables": {
"raptor_prefix": "/usr/local"
},
"xcode_settings": {
"GCC_ENABLE_CPP_EXCEPTIONS": "YES",
"OTHER_CPLUSPLUSFLAGS": [
"-std=c++11",
"-stdlib=libc++",
"-mmacosx-version-min=10.7"
]
}
} ],
[ "OS!='win'", {
"variables": {
"raptor_prefix": "/usr"
},
"cflags_cc": [ "-std=c++11", "-fexceptions" ]
} ]
]
} ] }
| { "targets": [ {
"target_name": "bindings",
"variables": {
"raptor_prefix": "/usr/local"
},
"include_dirs": [
"<(raptor_prefix)/include/raptor2"
],
"sources": [
"src/bindings.cc",
"src/parser.cc",
"src/parser_wrapper.cc",
"src/serializer.cc",
"src/serializer_wrapper.cc",
"src/statement.cc",
"src/statement_wrapper.cc",
"src/uri.cc",
"src/world.cc",
"src/message.cc"
],
"cflags!": [ "-fno-exceptions" ],
"cflags_cc!": [ "-std=c++11", "-fno-exceptions" ],
"link_settings": {
"libraries": [ "-lraptor2" ]
},
"conditions": [ [
"OS=='mac'", {
"xcode_settings": {
"GCC_ENABLE_CPP_EXCEPTIONS": "YES",
"OTHER_CPLUSPLUSFLAGS": [
"-std=c++11",
"-stdlib=libc++",
"-mmacosx-version-min=10.7"
]
}
}
] ]
} ] }
| apache-2.0 | Python |
2f924fc35d0724e7638e741fd466228649077e10 | Update action_after_build destination | elaberge/node-zipfile,elaberge/node-zipfile,elaberge/node-zipfile,elaberge/node-zipfile | binding.gyp | binding.gyp | {
'includes': [ 'deps/common-libzip.gypi' ],
'variables': {
'shared_libzip%':'false',
'shared_libzip_includes%':'/usr/lib',
'shared_libzip_libpath%':'/usr/include'
},
'targets': [
{
'target_name': 'node_zipfile',
'conditions': [
['shared_libzip == "false"', {
'dependencies': [
'deps/libzip.gyp:libzip'
]
},
{
'libraries': [
'-L<@(shared_libzip_libpath)',
'-lzip'
],
'include_dirs': [
'<@(shared_libzip_includes)',
'<@(shared_libzip_libpath)/libzip/include',
]
}
]
],
'sources': [
'src/node_zipfile.cpp'
],
},
{
'target_name': 'action_after_build',
'type': 'none',
'dependencies': [ 'node_zipfile' ],
'copies': [
{
'files': [ '<(PRODUCT_DIR)/node_zipfile.node' ],
'destination': './lib/binding/'
}
],
'conditions': [
['OS=="win"', {
'copies': [
{
'files': [ '<(PRODUCT_DIR)/libzip.dll' ],
'destination': 'lib/'
}
]
}]
]
}
]
}
| {
'includes': [ 'deps/common-libzip.gypi' ],
'variables': {
'shared_libzip%':'false',
'shared_libzip_includes%':'/usr/lib',
'shared_libzip_libpath%':'/usr/include'
},
'targets': [
{
'target_name': 'node_zipfile',
'conditions': [
['shared_libzip == "false"', {
'dependencies': [
'deps/libzip.gyp:libzip'
]
},
{
'libraries': [
'-L<@(shared_libzip_libpath)',
'-lzip'
],
'include_dirs': [
'<@(shared_libzip_includes)',
'<@(shared_libzip_libpath)/libzip/include',
]
}
]
],
'sources': [
'src/node_zipfile.cpp'
],
},
{
'target_name': 'action_after_build',
'type': 'none',
'dependencies': [ 'node_zipfile' ],
'copies': [
{
'files': [ '<(PRODUCT_DIR)/node_zipfile.node' ],
'destination': './lib/'
}
],
'conditions': [
['OS=="win"', {
'copies': [
{
'files': [ '<(PRODUCT_DIR)/libzip.dll' ],
'destination': 'lib/'
}
]
}]
]
}
]
}
| bsd-3-clause | Python |
db99ecacca94f5c045d8d024dd34c96e23d828df | Adjust binding.gyp for building on Linux too | elafargue/nodhelium,elafargue/nodhelium,elafargue/nodhelium,elafargue/nodhelium | binding.gyp | binding.gyp | {
"targets": [
{
"target_name": "helium",
"sources": [ "helium.cc", "helium_wrapper.cc" ],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ],
"include_dirs" : [
"<!(node -e \"require('nan')\")"
],
'libraries': [ '-lhelium'],
'conditions': [
[ 'OS=="mac"', {
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES'
}
}
]]
}
]
}
| {
"targets": [
{
"target_name": "helium",
"sources": [ "helium.cc", "helium_wrapper.cc" ],
"include_dirs" : [
"<!(node -e \"require('nan')\")"
],
'libraries': [ '-lhelium'],
'conditions': [
[ 'OS=="mac"', {
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES'
}
}]
]
}
]
}
| mit | Python |
c2f563215fcc62d6e595446f5acbd1969484ddb7 | move end timer command to the correct location | edlongman/thescoop,edlongman/thescoop,edlongman/thescoop | clean_db.py | clean_db.py | import MySQLdb, config, urllib, cgi, datetime, time
sql = MySQLdb.connect(host="localhost",
user=config.username,
passwd=config.passwd,
db=config.db)
sql.query("SELECT `id` FROM `feedurls`")
db_feed_query=sql.store_result()
rss_urls=db_feed_query.fetch_row(0)
table_name = "stories"
date_from = datetime.datetime.strptime(raw_input("start date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y")
date_to = datetime.datetime.strptime(raw_input("end date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y")
for rss_url_data in rss_urls:
feed_id=rss_url_data[0]
i = date_from
while i <= date_to:
t0=time.clock()
whereclause="`date_added` = '" + i.strftime("%Y-%m-%d") + "'"
whereclause+=" AND `feedid`= "+ str(feed_id) +""
query="DELETE FROM stories WHERE " + whereclause
query+=" AND `url` NOT IN (SELECT * FROM (SELECT `url` FROM stories WHERE "+whereclause
query+=" ORDER BY `points` DESC LIMIT 0,20) AS TAB);"
sql.query(query)
sql.commit()
print(i.strftime("%d/%m/%Y")+","+str(time.clock()-t0))
i += datetime.timedelta(days=1)
| import MySQLdb, config, urllib, cgi, datetime, time
sql = MySQLdb.connect(host="localhost",
user=config.username,
passwd=config.passwd,
db=config.db)
sql.query("SELECT `id` FROM `feedurls`")
db_feed_query=sql.store_result()
rss_urls=db_feed_query.fetch_row(0)
table_name = "stories"
date_from = datetime.datetime.strptime(raw_input("start date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y")
date_to = datetime.datetime.strptime(raw_input("end date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y")
for rss_url_data in rss_urls:
feed_id=rss_url_data[0]
i = date_from
while i <= date_to:
t0=time.clock()
whereclause="`date_added` = '" + i.strftime("%Y-%m-%d") + "'"
whereclause+=" AND `feedid`= "+ str(feed_id) +""
query="DELETE FROM stories WHERE " + whereclause
query+=" AND `url` NOT IN (SELECT * FROM (SELECT `url` FROM stories WHERE "+whereclause
query+=" ORDER BY `points` DESC LIMIT 0,20) AS TAB);"
print(i.strftime("%d/%m/%Y")+","+str(time.clock()-t0))
sql.query(query)
sql.commit()
i += datetime.timedelta(days=1)
| apache-2.0 | Python |
d0b457b5bde040af623b78409f778a1c39a09807 | Hide main | Mause/pytransperth,Mause/pytransperth | transperth/livetimes.py | transperth/livetimes.py | import json
from itertools import chain
from os.path import join, dirname
import requests
from lxml import etree
URL = (
'http://livetimes.transperth.wa.gov.au/LiveTimes.asmx'
'/GetSercoTimesForStation'
)
ASSETS = join(dirname(__file__), 'assets')
with open(join(ASSETS, 'train_stations.json')) as fh:
TRAIN_STATIONS = json.load(fh)
TRAIN_STATIONS_SET = (list(station.values())[0] for station in TRAIN_STATIONS)
TRAIN_STATIONS_SET = set(chain.from_iterable(TRAIN_STATIONS_SET))
def times_for_station(station_name):
if station_name not in TRAIN_STATIONS_SET:
raise Exception('Bad station')
r = requests.get(
URL,
params={
'stationname': station_name
}
)
return _parse_trips(r.content)
def _parse_trips(trips):
root = etree.fromstring(trips)
root = root.find('{http://services.pta.wa.gov.au/}Trips')
trips = root.findall('{http://services.pta.wa.gov.au/}SercoTrip')
trips = [
{
etree.QName(el).localname: el.text
for el in trip
}
for trip in trips
]
for trip in trips:
trip.update({
'PatternFullDisplay': trip['PatternFullDisplay'].split(', '),
'Pattern': trip['Pattern'].split(','),
'Cancelled': trip['Cancelled'] == 'True'
})
return trips
def _main():
from pprint import pprint
pprint(times_for_station('Perth Underground Stn'))
if __name__ == '__main__':
_main()
| import json
from itertools import chain
from os.path import join, dirname
import requests
from lxml import etree
URL = (
'http://livetimes.transperth.wa.gov.au/LiveTimes.asmx'
'/GetSercoTimesForStation'
)
ASSETS = join(dirname(__file__), 'assets')
with open(join(ASSETS, 'train_stations.json')) as fh:
TRAIN_STATIONS = json.load(fh)
TRAIN_STATIONS_SET = (list(station.values())[0] for station in TRAIN_STATIONS)
TRAIN_STATIONS_SET = set(chain.from_iterable(TRAIN_STATIONS_SET))
def times_for_station(station_name):
if station_name not in TRAIN_STATIONS_SET:
raise Exception('Bad station')
r = requests.get(
URL,
params={
'stationname': station_name
}
)
return _parse_trips(r.content)
def _parse_trips(trips):
root = etree.fromstring(trips)
root = root.find('{http://services.pta.wa.gov.au/}Trips')
trips = root.findall('{http://services.pta.wa.gov.au/}SercoTrip')
trips = [
{
etree.QName(el).localname: el.text
for el in trip
}
for trip in trips
]
for trip in trips:
trip.update({
'PatternFullDisplay': trip['PatternFullDisplay'].split(', '),
'Pattern': trip['Pattern'].split(','),
'Cancelled': trip['Cancelled'] == 'True'
})
return trips
def main():
from pprint import pprint
pprint(times_for_station('Perth Underground Stn'))
if __name__ == '__main__':
main()
| mit | Python |
3ee4d2f80f58cb0068eaeb3b7f5c4407ce8e60d0 | add text information about progress of downloading | Victoria1807/VK-Photos-Downloader | vk-photos-downloader.py | vk-photos-downloader.py | #!/usr/bin/python3.5
#-*- coding: UTF-8 -*-
import vk, os, time
from urllib.request import urlretrieve
token = input("Enter a token: ") # vk token
#Authorization
session = vk.Session(access_token=str(token))
vk_api = vk.API(session)
count = 0 # count of down. photos
perc = 0 # percent of down. photos
breaked = 0 # unsuccessful down.
time_now = time.time() # current time
url = input("Enter a URL of album: ") # url of album
folder_name = input("Enter a name of folder for download photos: ") # fold. for photo
print("-------------------------------------------")
owner_id = url.split('album')[1].split('_')[0] # id of owner
album_id = url.split('album')[1].split('_')[1][0:-1] # id of album
photos_count = vk_api.photos.getAlbums(owner_id=owner_id, album_ids=album_id)[0]['size'] # count of ph. in albums
album_title = vk_api.photos.getAlbums(owner_id=owner_id, album_ids=album_id)[0]['title'] # albums title
photos_information = vk_api.photos.get(owner_id=owner_id, album_id=album_id) # dictionaries of photos information
print("A title of album - {}".format(album_title))
print("Photos in album - {}".format(photos_count))
print("------------------")
if not os.path.exists(folder_name):
os.makedirs(folder_name + '/' + album_title) # creating a folder for download photos
print("Created a folder for photo.")
print("---------------------------")
else:
print("A folder with this name already exists!")
exit()
photos_link = [] # photos link
for i in photos_information:
photos_link.append(i['src_xxbig'])
photo_name = 0 # photo name
for i in photos_link:
photo_name += 1
try:
urlretrieve(i, folder_name + '/' + album_title + '/' + str(photo_name) + '.jpg') # download photos
count += 1
perc = (100 * count) / photos_count
print("Download {} of {} photos. ({}%)".format(count, photos_count, round(perc, 2)))
except:
print("An error occurred, file skipped.")
breaked += 1
minutes = int((time.time() - time_now) // 60)
seconds = int((time.time() - time_now) % 60)
print("------------------------")
print("Successful download {} photos.".format(count))
print("Skipped {} photos.".format(breaked))
print("Time spent: {}.{} minutes.".format(minutes, seconds))
| #!/usr/bin/python3.5
#-*- coding: UTF-8 -*-
import vk, os, time
from urllib.request import urlretrieve
token = input("Enter a token: ")
#Authorization
session = vk.Session(access_token=str(token))
vk_api = vk.API(session)
count = 0 # count of down. photos
perc = 0 # percent of down. photos
breaked = 0 # unsuccessful down.
time_now = time.time() # current time
url = input("Enter a URL of album: ") # url of album
folder_name = input("Enter a name of folder for download photos: ") # fold. for photo
owner_id = url.split('album')[1].split('_')[0] # id of owner
album_id = url.split('album')[1].split('_')[1][0:-1] # id of album
photos_count = vk_api.photos.getAlbums(owner_id=owner_id, album_ids=album_id)[0]['size'] # count of ph. in albums
album_title = vk_api.photos.getAlbums(owner_id=owner_id, album_ids=album_id)[0]['title'] # albums title
photos_information = vk_api.photos.get(owner_id=owner_id, album_id=album_id) # dictionaries of photos information
photos_link = [] # photos link
for i in photos_information:
photos_link.append(i['src_xxbig'])
if not os.path.exists(folder_name):
os.makedirs(folder_name + '/' + album_title) # creating a folder for download photos
qw = 'ok'
else:
print("A folder with this name already exists!")
exit()
photo_name = 0 # photo name
for i in photos_link:
photo_name += 1
urlretrieve(i, folder_name + '/' + album_title + '/' + str(photo_name) + '.jpg') # download photos
| mit | Python |
cc894ecf36a95d18fc84a4866c5a1902d291ccbe | Use non-lazy `gettext` where sufficient | homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps | byceps/blueprints/site/ticketing/forms.py | byceps/blueprints/site/ticketing/forms.py | """
byceps.blueprints.site.ticketing.forms
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from flask import g
from flask_babel import gettext, lazy_gettext
from wtforms import StringField
from wtforms.validators import InputRequired, ValidationError
from ....services.consent import (
consent_service,
subject_service as consent_subject_service,
)
from ....services.user import service as user_service
from ....util.l10n import LocalizedForm
def validate_user(form, field):
screen_name = field.data.strip()
user = user_service.find_user_by_screen_name(
screen_name, case_insensitive=True
)
if user is None:
raise ValidationError(gettext('Unknown username'))
if (not user.initialized) or user.suspeded or user.deleted:
raise ValidationError(gettext('The user account is not active.'))
user = user.to_dto()
required_consent_subjects = (
consent_subject_service.get_subjects_required_for_brand(g.brand_id)
)
required_consent_subject_ids = {
subject.id for subject in required_consent_subjects
}
if not consent_service.has_user_consented_to_all_subjects(
user.id, required_consent_subject_ids
):
raise ValidationError(
gettext(
'User "%(screen_name)s" has not yet given all necessary '
'consents. Logging in again is required.',
screen_name=user.screen_name,
)
)
field.data = user
class SpecifyUserForm(LocalizedForm):
user = StringField(
lazy_gettext('Username'), [InputRequired(), validate_user]
)
| """
byceps.blueprints.site.ticketing.forms
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from flask import g
from flask_babel import lazy_gettext
from wtforms import StringField
from wtforms.validators import InputRequired, ValidationError
from ....services.consent import (
consent_service,
subject_service as consent_subject_service,
)
from ....services.user import service as user_service
from ....util.l10n import LocalizedForm
def validate_user(form, field):
screen_name = field.data.strip()
user = user_service.find_user_by_screen_name(
screen_name, case_insensitive=True
)
if user is None:
raise ValidationError(lazy_gettext('Unknown username'))
if (not user.initialized) or user.suspeded or user.deleted:
raise ValidationError(lazy_gettext('The user account is not active.'))
user = user.to_dto()
required_consent_subjects = (
consent_subject_service.get_subjects_required_for_brand(g.brand_id)
)
required_consent_subject_ids = {
subject.id for subject in required_consent_subjects
}
if not consent_service.has_user_consented_to_all_subjects(
user.id, required_consent_subject_ids
):
raise ValidationError(
lazy_gettext(
'User "%(screen_name)s" has not yet given all necessary '
'consents. Logging in again is required.',
screen_name=user.screen_name,
)
)
field.data = user
class SpecifyUserForm(LocalizedForm):
user = StringField(
lazy_gettext('Username'), [InputRequired(), validate_user]
)
| bsd-3-clause | Python |
862885b5ea2b4d04c8980c257d3cdf644dd60f0c | Set the version to 0.1.6 final | xujun10110/king-phisher,drptbl/king-phisher,drptbl/king-phisher,securestate/king-phisher,hdemeyer/king-phisher,0x0mar/king-phisher,securestate/king-phisher,wolfthefallen/king-phisher,drptbl/king-phisher,securestate/king-phisher,securestate/king-phisher,securestate/king-phisher,wolfthefallen/king-phisher,zigitax/king-phisher,zigitax/king-phisher,guitarmanj/king-phisher,zigitax/king-phisher,zigitax/king-phisher,zeroSteiner/king-phisher,xujun10110/king-phisher,zeroSteiner/king-phisher,xujun10110/king-phisher,guitarmanj/king-phisher,wolfthefallen/king-phisher,zeroSteiner/king-phisher,zeroSteiner/king-phisher,zeroSteiner/king-phisher,wolfthefallen/king-phisher,hdemeyer/king-phisher,xujun10110/king-phisher,guitarmanj/king-phisher,zigitax/king-phisher,xujun10110/king-phisher,guitarmanj/king-phisher,drptbl/king-phisher,hdemeyer/king-phisher,wolfthefallen/king-phisher,0x0mar/king-phisher,hdemeyer/king-phisher | king_phisher/version.py | king_phisher/version.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/version.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
version_info = collections.namedtuple('version_info', ['major', 'minor', 'micro'])(0, 1, 6)
"""A tuple representing the version information in the format ('major', 'minor', 'micro')"""
version_label = ''
"""A version lable such as alpha or beta."""
version = "{0}.{1}.{2}".format(version_info.major, version_info.minor, version_info.micro)
"""A string representing the full version information."""
# distutils_version is compatible with distutils.version classes
distutils_version = version
"""A string sutiable for being parsed by :py:mod:`distutils.version` classes."""
if version_label:
version += '-' + version_label
distutils_version += version_label[0]
if version_label[-1].isdigit():
distutils_version += version_label[-1]
else:
distutils_version += '0'
rpc_api_version = 2
"""An integer representing the current version of the RPC API, used for compatibility checks."""
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/version.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
version_info = collections.namedtuple('version_info', ['major', 'minor', 'micro'])(0, 1, 6)
"""A tuple representing the version information in the format ('major', 'minor', 'micro')"""
version_label = 'beta'
"""A version lable such as alpha or beta."""
version = "{0}.{1}.{2}".format(version_info.major, version_info.minor, version_info.micro)
"""A string representing the full version information."""
# distutils_version is compatible with distutils.version classes
distutils_version = version
"""A string sutiable for being parsed by :py:mod:`distutils.version` classes."""
if version_label:
version += '-' + version_label
distutils_version += version_label[0]
if version_label[-1].isdigit():
distutils_version += version_label[-1]
else:
distutils_version += '0'
rpc_api_version = 2
"""An integer representing the current version of the RPC API, used for compatibility checks."""
| bsd-3-clause | Python |
7c8d7a456634d15f8c13548e2cfd6be9440f7c65 | Add handler for 403 forbidden (User does not have Atmosphere access, but was correctly authenticated) | CCI-MOC/GUI-Frontend,CCI-MOC/GUI-Frontend,CCI-MOC/GUI-Frontend,CCI-MOC/GUI-Frontend,CCI-MOC/GUI-Frontend | troposphere/__init__.py | troposphere/__init__.py | import logging
from flask import Flask
from flask import render_template, redirect, url_for, request, abort
import requests
from troposphere import settings
from troposphere.cas import (cas_logoutRedirect, cas_loginRedirect,
cas_validateTicket)
from troposphere.oauth import generate_access_token
logger = logging.getLogger(__name__)
app = Flask(__name__)
def get_maintenance():
"""
Returns a list of maintenance records along with a boolean to indicate
whether or not login should be disabled
"""
return ([], False)
@app.route('/')
def redirect_app():
return "Redirect"
@app.errorhandler(503)
def handle_maintenance():
return "We're undergoing maintenance"
@app.route('/login', methods=['GET', 'POST'])
def login():
"""
CAS Login : Phase 1/3 Call CAS Login
"""
records, disabled_login = get_maintenance()
if disabled_login:
abort(503)
#if request.method == "POST" and 'next' in request.form:
return cas_loginRedirect('/application/')
#else:
#return "Login please"
@app.route('/logout')
def logout():
#django_logout(request)
if request.POST.get('cas', False):
return cas_logoutRedirect()
return redirect(settings.REDIRECT_URL + '/login')
@app.route('/CAS_serviceValidater')
def cas_service_validator():
"""
Method expects 2 GET parameters: 'ticket' & 'sendback'
After a CAS Login:
Redirects the request based on the GET param 'ticket'
Unauthorized Users are returned a 401
Authorized Users are redirected to the GET param 'sendback'
"""
logger.debug('GET Variables:%s' % request.args)
sendback = request.args.get('sendback', None)
ticket = request.args.get('ticket', None)
if not ticket:
logger.info("No Ticket received in GET string")
abort(400)
user = cas_validateTicket(ticket, sendback)
logger.debug(user + " successfully authenticated against CAS")
# Now check Groupy
key = open(settings.OAUTH_PRIVATE_KEY, 'r').read()
try:
token = generate_access_token(key, user)
logger.debug("TOKEN: " + token)
return redirect(sendback)
except:
abort(403)
@app.errorhandler(403)
def no_user(e):
logger.debug(e)
return "You're not an Atmopshere user"
#@app.route('/CASlogin', defaults={'path': ''})
#@app.route('/CASlogin/<redirect>')
# """
# url(r'^CASlogin/(?P<redirect>.*)$', 'authentication.cas_loginRedirect'),
# """
# pass
@app.route('/application', defaults={'path': ''})
@app.route('/application/', defaults={'path': ''})
@app.route('/application/<path:path>')
def application(path):
return render_template('application.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| import logging
from flask import Flask
from flask import render_template, redirect, url_for, request
import requests
from troposphere import settings
from troposphere.cas import (cas_logoutRedirect, cas_loginRedirect,
cas_validateTicket)
from troposphere.oauth import generate_access_token
logger = logging.getLogger(__name__)
app = Flask(__name__)
def get_maintenance():
"""
Returns a list of maintenance records along with a boolean to indicate
whether or not login should be disabled
"""
return ([], False)
@app.route('/')
def redirect_app():
return "Redirect"
@app.errorhandler(503)
def handle_maintenance():
return "We're undergoing maintenance"
@app.route('/login', methods=['GET', 'POST'])
def login():
"""
CAS Login : Phase 1/3 Call CAS Login
"""
records, disabled_login = get_maintenance()
if disabled_login:
abort(503)
#if request.method == "POST" and 'next' in request.form:
return cas_loginRedirect('/application/')
#else:
#return "Login please"
@app.route('/logout')
def logout():
#django_logout(request)
if request.POST.get('cas', False):
return cas_logoutRedirect()
return redirect(settings.REDIRECT_URL + '/login')
@app.route('/CAS_serviceValidater')
def cas_service_validator():
"""
Method expects 2 GET parameters: 'ticket' & 'sendback'
After a CAS Login:
Redirects the request based on the GET param 'ticket'
Unauthorized Users are returned a 401
Authorized Users are redirected to the GET param 'sendback'
"""
logger.debug('GET Variables:%s' % request.args)
sendback = request.args.get('sendback', None)
ticket = request.args.get('ticket', None)
if not ticket:
logger.info("No Ticket received in GET string")
abort(400)
user = cas_validateTicket(ticket, sendback)
logger.debug(user + " successfully authenticated against CAS")
# Now check Groupy
key = open(settings.OAUTH_PRIVATE_KEY, 'r').read()
token = generate_access_token(key, user)
logger.debug("TOKEN: " + token)
return redirect(sendback)
@app.route('/no_user')
def no_user():
return "You're not an Atmopshere user"
#@app.route('/CASlogin', defaults={'path': ''})
#@app.route('/CASlogin/<redirect>')
# """
# url(r'^CASlogin/(?P<redirect>.*)$', 'authentication.cas_loginRedirect'),
# """
# pass
@app.route('/application', defaults={'path': ''})
@app.route('/application/', defaults={'path': ''})
@app.route('/application/<path:path>')
def application(path):
return render_template('application.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| apache-2.0 | Python |
b7be60eff8e0c82741dda674824aa748e33e7fdd | convert pui.py to pywiki framework | legoktm/legobot-old,legoktm/legobot-old | trunk/toolserver/pui.py | trunk/toolserver/pui.py | #!usr/bin/python
# -*- coding: utf-8 -*
#
# (C) Legoktm 2008-2009, MIT License
#
import re
sys.path.append(os.environ['HOME'] + '/pywiki')
import wiki
page = wiki.Page('Wikipedia:Possibly unfree images')
wikitext = state0 = page.get()
wikitext = re.compile(r'\n==New listings==', re.IGNORECASE).sub(r'\n*[[/{{subst:#time:Y F j|-14 days}}]]\n==New listings==', wikitext)
EditMsg = 'Adding new day to holding cell'
wiki.showDiff(state0, wikitext)
page.put(wikitext,EditMsg) | #!usr/bin/python
# -*- coding: utf-8 -*
#
# (C) Legoktm 2008-2009, MIT License
#
import re, sys, os
sys.path.append(os.environ['HOME'] + '/pyenwiki')
import wikipedia
site = wikipedia.getSite()
page = wikipedia.Page(site, 'Wikipedia:Possibly unfree images')
wikitext = state0 = page.get()
wikitext = re.compile(r'\n==New listings==', re.IGNORECASE).sub(r'\n*[[/{{subst:#time:Y F j|-14 days}}]]\n==New listings==', wikitext)
EditMsg = 'Adding new day to holding cell'
wikipedia.showDiff(state0, wikitext)
wikipedia.setAction(EditMsg)
page.put(wikitext) | mit | Python |
d6029a7b2e39ff6222ca3d6788d649b14bbf35e3 | add smoother to denominator as well | datamade/sunspots | trending.py | trending.py | import googleanalytics as ga
import collections
import numpy
import datetime
SMOOTHER = 20
WINDOW = 8
GROWTH_THRESHOLD = 0.02
def trend(counts) :
X, Y = zip(*counts)
X = numpy.array([x.toordinal() for x in X])
X -= datetime.date.today().toordinal()
A = numpy.array([numpy.ones(len(X)), X])
Y = numpy.log(numpy.array(Y))
w = numpy.linalg.lstsq(A.T,Y)[0]
return w
profile = ga.authenticate(identity='sunspot',
account='Illinois Campaign for Political Reform',
webproperty='Illinois Sunshine',
profile='Illinois Sunshine')
totals = profile.core.query.metrics('pageviews').\
daily(days=-WINDOW)
totals = {date : count for date, count in totals.rows}
pages = profile.core.query.metrics('pageviews').\
dimensions('pagepath').\
daily(days=-WINDOW)
page_counts = collections.defaultdict(dict)
normalized_page_counts = collections.defaultdict(dict)
for date, page, count in pages.rows :
page_counts[page][date] = count
normalized_page_counts[page][date] = (count + SMOOTHER)/(totals[date] + SMOOTHER)
for counts in normalized_page_counts.values() :
for date in totals.keys() - counts.keys() :
counts[date] = SMOOTHER/(totals[date] + SMOOTHER)
for page, counts in normalized_page_counts.items() :
b0, b1 = trend(counts.items())
if b1 > GROWTH_THRESHOLD and page.startswith('/committees/') :
print(page, b0, b1)
for count in sorted(page_counts[page].items()) :
print(count)
| import googleanalytics as ga
import collections
import numpy
import datetime
SMOOTHER = 20
WINDOW = 8
GROWTH_THRESHOLD = 0.03
def trend(counts) :
X, Y = zip(*counts)
X = numpy.array([x.toordinal() for x in X])
X -= datetime.date.today().toordinal()
A = numpy.array([numpy.ones(len(X)), X])
Y = numpy.log(numpy.array(Y))
w = numpy.linalg.lstsq(A.T,Y)[0]
return w
profile = ga.authenticate(identity='sunspot',
account='Illinois Campaign for Political Reform',
webproperty='Illinois Sunshine',
profile='Illinois Sunshine')
totals = profile.core.query.metrics('pageviews').\
daily(days=-WINDOW)
totals = {date : count for date, count in totals.rows}
pages = profile.core.query.metrics('pageviews').\
dimensions('pagepath').\
daily(days=-WINDOW)
page_counts = collections.defaultdict(dict)
normalized_page_counts = collections.defaultdict(dict)
for date, page, count in pages.rows :
page_counts[page][date] = count
normalized_page_counts[page][date] = (count + SMOOTHER)/totals[date]
for counts in normalized_page_counts.values() :
for date in totals.keys() - counts.keys() :
counts[date] = SMOOTHER/totals[date]
for page, counts in normalized_page_counts.items() :
b0, b1 = trend(counts.items())
if b1 > GROWTH_THRESHOLD and page.startswith('/committees/') :
print(page, b0, b1)
for count in sorted(page_counts[page].items()) :
print(count)
| mit | Python |
656c0f44c27f64d14dde7cbbfdec31906dab4c51 | Add params to request docs. | habnabit/treq,hawkowl/treq,cyli/treq,FxIII/treq,hawkowl/treq,glyph/treq,alex/treq,alex/treq,ldanielburr/treq,mithrandi/treq,inspectlabs/treq | treq/api.py | treq/api.py | from treq.client import HTTPClient
def head(url, **kwargs):
"""
Make a ``HEAD`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).head(url, **kwargs)
def get(url, headers=None, **kwargs):
"""
Make a ``GET`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).get(url, headers=headers, **kwargs)
def post(url, data=None, **kwargs):
"""
Make a ``POST`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).post(url, data=data, **kwargs)
def put(url, data=None, **kwargs):
"""
Make a ``PUT`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).put(url, data=data, **kwargs)
def delete(url, **kwargs):
"""
Make a ``DELETE`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).delete(url, **kwargs)
def request(method, url, **kwargs):
"""
Make an HTTP request.
:param str method: HTTP method. Example: ``'GET'``, ``'HEAD'``. ``'PUT'``,
``'POST'``.
:param str url: http or https URL, which may include query arguments.
:param headers: Optional HTTP Headers to send with this request.
:type headers: Headers or None
:param params: Optional paramters to be append as the query string to
the URL, any query string parameters in the URL already will be
preserved.
:type params: dict w/ str or list of str values, list of 2-tuples, or None.
:param data: Optional request body.
:type data: str, file-like, IBodyProducer, or None
:param reactor: Optional twisted reactor.
:param bool persistent: Use peristent HTTP connections. Default: ``True``
:param bool allow_redirects: Follow HTTP redirects. Default: ``True``
:rtype: Deferred that fires with an IResponse provider.
"""
return _client(**kwargs).request(method, url, **kwargs)
#
# Private API
#
def _client(*args, **kwargs):
return HTTPClient.with_config(**kwargs)
| from treq.client import HTTPClient
def head(url, **kwargs):
"""
Make a ``HEAD`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).head(url, **kwargs)
def get(url, headers=None, **kwargs):
"""
Make a ``GET`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).get(url, headers=headers, **kwargs)
def post(url, data=None, **kwargs):
"""
Make a ``POST`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).post(url, data=data, **kwargs)
def put(url, data=None, **kwargs):
"""
Make a ``PUT`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).put(url, data=data, **kwargs)
def delete(url, **kwargs):
"""
Make a ``DELETE`` request.
See :py:func:`treq.request`
"""
return _client(**kwargs).delete(url, **kwargs)
def request(method, url, **kwargs):
"""
Make an HTTP request.
:param str method: HTTP method. Example: ``'GET'``, ``'HEAD'``. ``'PUT'``,
``'POST'``.
:param str url: http or https URL, which may include query arguments.
:param headers: Optional HTTP Headers to send with this request.
:type headers: Headers or None
:param data: Optional request body.
:type data: str, file-like, IBodyProducer, or None
:param reactor: Optional twisted reactor.
:param bool persistent: Use peristent HTTP connections. Default: ``True``
:param bool allow_redirects: Follow HTTP redirects. Default: ``True``
:rtype: Deferred that fires with an IResponse provider.
"""
return _client(**kwargs).request(method, url, **kwargs)
#
# Private API
#
def _client(*args, **kwargs):
return HTTPClient.with_config(**kwargs)
| mit | Python |
0ba512b0e8eb6b5055261afb2962d3bfc5e2fda5 | Add some playback stream headers | katajakasa/koel-ampache-bridge | src/playback.py | src/playback.py | # -*- coding: utf-8 -*-
import mimetypes
import os
from flask import Response, request
from werkzeug.datastructures import Headers
import audiotranscode
from utils import generate_random_key
from tables import Song
import config
def stream_audio():
song = Song.get_one(id=request.args.get('id'))
# A hack to get my local dev env working
path = song.path
if config.DEBUG:
cut = '/mnt/storage/audio/music/'
path = os.path.join(config.MUSIC_DIR, song.path[len(cut):])
# Find the file and guess type
mime = mimetypes.guess_type(path)[0]
ext = mimetypes.guess_extension(mime)
# Transcoding if required
transcode = False
if ext not in ['.mp3', '.ogg']:
transcode = True
mime = "audio/mpeg"
ext = '.mp3'
# Send some extra headers
headers = Headers()
headers.add('Content-Transfer-Encoding', 'binary')
headers.add('Content-Length', os.path.getsize(path))
def generate_audio():
if not transcode:
with open(path, "rb") as handle:
data = handle.read(1024)
while data:
yield data
data = handle.read(1024)
else:
tc = audiotranscode.AudioTranscode()
for data in tc.transcode_stream(path, 'mp3'):
yield data
return Response(generate_audio(), mimetype=mime, headers=headers)
| # -*- coding: utf-8 -*-
import mimetypes
import os
from flask import Response, request
import audiotranscode
from tables import Song
import config
def stream_audio():
song = Song.get_one(id=request.args.get('id'))
# A hack to get my local dev env working
path = song.path
if config.DEBUG:
cut = '/mnt/storage/audio/music/'
path = os.path.join(config.MUSIC_DIR, song.path[len(cut):])
# Find the file and guess type
mime = mimetypes.guess_type(path)[0]
ext = mimetypes.guess_extension(mime)
# Transcoding if required
transcode = False
if ext not in ['.mp3', '.ogg']:
transcode = True
mime = "audio/mpeg"
def generate_audio():
if not transcode:
with open(path, "rb") as handle:
data = handle.read(1024)
while data:
yield data
data = handle.read(1024)
else:
tc = audiotranscode.AudioTranscode()
for data in tc.transcode_stream(path, 'mp3'):
yield data
return Response(generate_audio(), mimetype=mime)
| mit | Python |
e6487a2c623638b540b707c895a97eac1fc31979 | Update connection_test.py to work with Python3.7 | channable/icepeak,channable/icepeak,channable/icepeak | server/integration-tests/connection_test.py | server/integration-tests/connection_test.py | #!/usr/bin/env python3.7
"""
Test PUTing some data into Icepeak and getting it back over a websocket.
Requires a running Icepeak instance.
Requirements can be installed with: pip install requests websockets
"""
import asyncio
import json
import requests
import websockets
# 1. Put some data into icepeak over HTTP
new_data = {'status': 'freezing'}
requests.put('http://localhost:3000/so/cool',
json.dumps(new_data))
# 2. Get the data back over a websocket
async def hello(uri):
async with websockets.connect(uri) as websocket:
result = await websocket.recv()
parsed_result = json.loads(result)
assert new_data == parsed_result, 'Input data: {} is different from output data: {}'.format(
new_data, parsed_result)
print('Initial data was successfully sent to client!')
asyncio.get_event_loop().run_until_complete(
hello('ws://localhost:3000/so/cool'))
| #!/usr/bin/env python2.7
from __future__ import absolute_import, division, unicode_literals
import json
import requests
import websocket
# 1. Put some data into icepeak over HTTP
new_data = {'status': 'freezing'}
requests.put('http://localhost:3000/so/cool',
json.dumps(new_data))
# 2. Get the data back over a websocket
conn = websocket.create_connection("ws://localhost:3000/so/cool")
result = conn.recv()
parsed_result = json.loads(result)
assert new_data == parsed_result, 'Input data: {} is different from output data: {}'.format(
new_data, parsed_result)
print 'Initial data was successfully sent to client!'
| bsd-3-clause | Python |
69b9c641f144633b94aca47212af446971286454 | add tests | ISISComputingGroup/EPICS-inst_servers,ISISComputingGroup/EPICS-inst_servers | server_common/test_modules/test_autosave.py | server_common/test_modules/test_autosave.py | from __future__ import unicode_literals, absolute_import, print_function, division
import unittest
import shutil
import os
from server_common.autosave import AutosaveFile
TEMP_FOLDER = os.path.join("C:\\", "instrument", "var", "tmp", "autosave_tests")
class TestAutosave(unittest.TestCase):
def setUp(self):
self.autosave = AutosaveFile(service_name="unittests", file_name="test_file", folder=TEMP_FOLDER)
try:
os.makedirs(TEMP_FOLDER)
except:
pass
def test_GIVEN_no_existing_file_WHEN_get_parameter_from_autosave_THEN_default_returned(self):
default = object()
self.assertEqual(self.autosave.read_parameter("some_random_parameter", default), default)
def test_GIVEN_parameter_saved_WHEN_get_parameter_from_autosave_THEN_saved_value_returned(self):
value = "test_value"
self.autosave.write_parameter("parameter", value)
self.assertEqual(self.autosave.read_parameter("parameter", None), value)
def test_GIVEN_different_parameter_saved_WHEN_get_parameter_from_autosave_THEN_saved_value_returned(self):
value = "test_value"
self.autosave.write_parameter("other_parameter", value)
self.assertEqual(self.autosave.read_parameter("parameter", None), None)
def tearDown(self):
try:
shutil.rmtree(TEMP_FOLDER)
except:
pass
| import unittest
class TestAutosave(unittest.TestCase):
def setUp(self):
pass
| bsd-3-clause | Python |
6a4f4031b0aac1c8859424703088df903746a6c8 | change command doc string | efiop/dvc,efiop/dvc,dmpetrov/dataversioncontrol,dmpetrov/dataversioncontrol | dvc/command/get.py | dvc/command/get.py | import argparse
import logging
from .base import append_doc_link
from .base import CmdBaseNoRepo
from dvc.exceptions import DvcException
logger = logging.getLogger(__name__)
class CmdGet(CmdBaseNoRepo):
def run(self):
from dvc.repo import Repo
try:
Repo.get(
self.args.url,
path=self.args.path,
out=self.args.out,
rev=self.args.rev,
)
return 0
except DvcException:
logger.exception(
"failed to get '{}' from '{}'".format(
self.args.path, self.args.url
)
)
return 1
def add_parser(subparsers, parent_parser):
GET_HELP = "Download/copy files or directories from Git repository."
get_parser = subparsers.add_parser(
"get",
parents=[parent_parser],
description=append_doc_link(GET_HELP, "get"),
help=GET_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
get_parser.add_argument(
"url", help="URL of Git repository to download from."
)
get_parser.add_argument(
"path", help="Path to a file or directory within the repository."
)
get_parser.add_argument(
"-o",
"--out",
nargs="?",
help="Destination path to copy/download files to.",
)
get_parser.add_argument(
"--rev", nargs="?", help="Repository git revision."
)
get_parser.set_defaults(func=CmdGet)
| import argparse
import logging
from .base import append_doc_link
from .base import CmdBaseNoRepo
from dvc.exceptions import DvcException
logger = logging.getLogger(__name__)
class CmdGet(CmdBaseNoRepo):
def run(self):
from dvc.repo import Repo
try:
Repo.get(
self.args.url,
path=self.args.path,
out=self.args.out,
rev=self.args.rev,
)
return 0
except DvcException:
logger.exception(
"failed to get '{}' from '{}'".format(
self.args.path, self.args.url
)
)
return 1
def add_parser(subparsers, parent_parser):
GET_HELP = "Download/copy files or directories from git repository."
get_parser = subparsers.add_parser(
"get",
parents=[parent_parser],
description=append_doc_link(GET_HELP, "get"),
help=GET_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
get_parser.add_argument(
"url", help="URL of Git repository to download from."
)
get_parser.add_argument(
"path", help="Path to a file or directory within the repository."
)
get_parser.add_argument(
"-o",
"--out",
nargs="?",
help="Destination path to copy/download files to.",
)
get_parser.add_argument(
"--rev", nargs="?", help="Repository git revision."
)
get_parser.set_defaults(func=CmdGet)
| apache-2.0 | Python |
4a6846b969746b79f1acd0e0615232d97ed54b1f | replace import-time cluster dependencies (#1544) | vishnu2kmohan/dcos-commons,mesosphere/dcos-commons,mesosphere/dcos-commons,vishnu2kmohan/dcos-commons,vishnu2kmohan/dcos-commons,vishnu2kmohan/dcos-commons,vishnu2kmohan/dcos-commons,mesosphere/dcos-commons,mesosphere/dcos-commons,mesosphere/dcos-commons | frameworks/template/tests/test_sanity.py | frameworks/template/tests/test_sanity.py | import pytest
import sdk_install
import sdk_utils
from tests import config
@pytest.fixture(scope='module', autouse=True)
def configure_package(configure_security):
try:
sdk_install.uninstall(config.PACKAGE_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME))
# note: this package isn't released to universe, so there's nothing to test_upgrade() with
sdk_install.install(
config.PACKAGE_NAME,
sdk_utils.get_foldered_name(config.SERVICE_NAME),
config.DEFAULT_TASK_COUNT,
additional_options={"service": { "name": sdk_utils.get_foldered_name(config.SERVICE_NAME) } })
yield # let the test session execute
finally:
sdk_install.uninstall(config.PACKAGE_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME))
@pytest.mark.sanity
@pytest.mark.smoke
def test_install():
pass # package installed and appeared healthy!
| import pytest
import sdk_install
import sdk_utils
from tests import config
FOLDERED_SERVICE_NAME = sdk_utils.get_foldered_name(config.SERVICE_NAME)
@pytest.fixture(scope='module', autouse=True)
def configure_package(configure_security):
try:
sdk_install.uninstall(config.PACKAGE_NAME, FOLDERED_SERVICE_NAME)
# note: this package isn't released to universe, so there's nothing to test_upgrade() with
sdk_install.install(
config.PACKAGE_NAME,
FOLDERED_SERVICE_NAME,
config.DEFAULT_TASK_COUNT,
additional_options={"service": { "name": FOLDERED_SERVICE_NAME } })
yield # let the test session execute
finally:
sdk_install.uninstall(config.PACKAGE_NAME, FOLDERED_SERVICE_NAME)
@pytest.mark.sanity
@pytest.mark.smoke
def test_install():
pass # package installed and appeared healthy!
| apache-2.0 | Python |
0eef0efbe716feb3dc02fb45a756496d5517966c | Update docs. | faneshion/MatchZoo,faneshion/MatchZoo | matchzoo/models/naive_model.py | matchzoo/models/naive_model.py | """Naive model with a simplest structure for testing purposes."""
import keras
from matchzoo import engine
class NaiveModel(engine.BaseModel):
"""
Naive model with a simplest structure for testing purposes.
Bare minimum functioning model. The best choice to get things rolling.
The worst choice to fit and evaluate performance.
"""
def build(self):
"""Build."""
x_in = self._make_inputs()
x = keras.layers.concatenate(x_in)
x_out = self._make_output_layer()(x)
self._backend = keras.models.Model(inputs=x_in, outputs=x_out)
| """Naive model with a simplest structure for testing purposes."""
import keras
from matchzoo import engine
class NaiveModel(engine.BaseModel):
"""Naive model with a simplest structure for testing purposes."""
def build(self):
"""Build."""
x_in = self._make_inputs()
x = keras.layers.concatenate(x_in)
x_out = self._make_output_layer()(x)
self._backend = keras.models.Model(inputs=x_in, outputs=x_out)
| apache-2.0 | Python |
0859bb58a4fa24f5e278e95da491a2b4409f0b2b | Tag 0.5.3 | koordinates/python-client,koordinates/python-client | koordinates/__init__.py | koordinates/__init__.py | # -*- coding: utf-8 -*-
"""
Koordinates Python API Client Library
:copyright: (c) Koordinates Limited.
:license: BSD, see LICENSE for more details.
"""
__version__ = "0.5.3"
from .exceptions import (
KoordinatesException,
ClientError,
ClientValidationError,
InvalidAPIVersion,
ServerError,
BadRequest,
AuthenticationError,
Forbidden,
NotFound,
NotAllowed,
Conflict,
RateLimitExceeded,
InternalServerError,
ServiceUnvailable,
)
from .client import Client
from .layers import Layer, Table
from .licenses import License
from .metadata import Metadata
from .publishing import Publish
from .sets import Set
from .sources import Source, UploadSource
from .users import Group, User
from .permissions import Permission
from .exports import Export, CropLayer, DownloadError
| # -*- coding: utf-8 -*-
"""
Koordinates Python API Client Library
:copyright: (c) Koordinates Limited.
:license: BSD, see LICENSE for more details.
"""
__version__ = "0.5.0"
from .exceptions import (
KoordinatesException,
ClientError,
ClientValidationError,
InvalidAPIVersion,
ServerError,
BadRequest,
AuthenticationError,
Forbidden,
NotFound,
NotAllowed,
Conflict,
RateLimitExceeded,
InternalServerError,
ServiceUnvailable,
)
from .client import Client
from .layers import Layer, Table
from .licenses import License
from .metadata import Metadata
from .publishing import Publish
from .sets import Set
from .sources import Source, UploadSource
from .users import Group, User
from .permissions import Permission
from .exports import Export, CropLayer, DownloadError
| bsd-3-clause | Python |
b6554b00fdb0387a27671eeb39589dc7e7109f6e | Add collecter function | LeoIsaac/piccolle-v2,LeoIsaac/piccolle-v2 | app/main.py | app/main.py | from flask import Flask, request, jsonify
from urllib.request import urlopen
from bs4 import BeautifulSoup
app = Flask(__name__)
app.config.update(
DEBUG=True
)
@app.route("/")
def index():
url = request.args.get('url', '')
res = collecter(url)
return jsonify(res)
if __name__ == "__main__":
app.run()
def collecter(url):
"""
画像のスクレイピングを行い、結果をjsonで返す
@param url スクレイピングしたいURL
@return スクレイピング結果のjson
"""
if(url == ""):
return
count = 0
pic = {}
html = urlopen(url)
soup = BeautifulSoup(html, "html.parser")
for a in soup.find_all("a"):
text = str(a.string)
if text.endswith("jpg") or text.endswith("png"):
count += 1
pic.update({count: text})
return pic
| from flask import Flask
app = Flask(__name__)
app.config.update(
DEBUG=True
)
@app.route("/")
def index():
return "Hello python"
if __name__ == "__main__":
app.run()
| apache-2.0 | Python |
7b58f59ec288dd055cf931dd47c4e8e59bb9ad1d | update atx-agent version | openatx/uiautomator2,openatx/uiautomator2,openatx/uiautomator2 | uiautomator2/version.py | uiautomator2/version.py | # coding: utf-8
#
__apk_version__ = '1.1.5'
# 1.1.5 waitForExists use UiObject2 method first then fallback to UiObject.waitForExists
# 1.1.4 add ADB_EDITOR_CODE broadcast support, fix bug (toast捕获导致app闪退)
# 1.1.3 use thread to make watchers.watched faster, try to fix input method type multi
# 1.1.2 fix count error when have child && sync watched, to prevent watchers.remove error
# 1.1.1 support toast capture
# 1.1.0 update uiautomator-v18:2.1.2 -> uiautomator-v18:2.1.3 (This version fixed setWaitIdleTimeout not working bug)
# 1.0.14 catch NullException, add gps mock support
# 1.0.13 whatsinput suppoort, but not very well
# 1.0.12 add toast support
# 1.0.11 add auto install support
# 1.0.10 fix service not started bug
# 1.0.9 fix apk version code and version name
# ERR: 1.0.8 bad version number. show ip on notification
# ERR: 1.0.7 bad version number. new input method, some bug fix
__atx_agent_version__ = '0.4.6'
# 0.4.6 fix download dns resolve error (sometimes)
# 0.4.5 add http log, change atx-agent -d into atx-agent server -d
# 0.4.4 this version is gone
# 0.4.3 ignore sigint to prevent atx-agent quit
# 0.4.2 hot fix, close upgrade-self
# 0.4.1 fix app-download time.Timer panic error, use safe-time.Timer instead.
# 0.4.0 add go-daemon lib. use safe-time.Timer to prevent panic error. this will make it run longer
# 0.3.6 support upload zip and unzip, fix minicap rotation error when atx-agent is killed -9
# 0.3.5 hot fix for session
# 0.3.4 fix session() sometimes can not get mainActivity error
# 0.3.3 /shell support timeout
# 0.3.2 fix dns resolve error when network changes
# 0.3.0 use github.com/codeskyblue/heartbeat library instead of websocket, add /whatsinput
# 0.2.1 support occupy /minicap connection
# 0.2.0 add session support
# 0.1.8 fix screenshot always the same image. (BUG in 0.1.7), add /shell/stream add timeout for /shell
# 0.1.7 fix dns resolve error in /install
# 0.1.6 change download logic. auto fix orientation
# 0.1.5 add singlefight for minicap and minitouch, proxy dial-timeout change 30 to 10
# 0.1.4 phone remote control
# 0.1.2 /download support
# 0.1.1 minicap buildin | # coding: utf-8
#
__apk_version__ = '1.1.5'
# 1.1.5 waitForExists use UiObject2 method first then fallback to UiObject.waitForExists
# 1.1.4 add ADB_EDITOR_CODE broadcast support, fix bug (toast捕获导致app闪退)
# 1.1.3 use thread to make watchers.watched faster, try to fix input method type multi
# 1.1.2 fix count error when have child && sync watched, to prevent watchers.remove error
# 1.1.1 support toast capture
# 1.1.0 update uiautomator-v18:2.1.2 -> uiautomator-v18:2.1.3 (This version fixed setWaitIdleTimeout not working bug)
# 1.0.14 catch NullException, add gps mock support
# 1.0.13 whatsinput suppoort, but not very well
# 1.0.12 add toast support
# 1.0.11 add auto install support
# 1.0.10 fix service not started bug
# 1.0.9 fix apk version code and version name
# ERR: 1.0.8 bad version number. show ip on notification
# ERR: 1.0.7 bad version number. new input method, some bug fix
__atx_agent_version__ = '0.4.5'
# 0.4.5 add http log, change atx-agent -d into atx-agent server -d
# 0.4.4 this version is gone
# 0.4.3 ignore sigint to prevent atx-agent quit
# 0.4.2 hot fix, close upgrade-self
# 0.4.1 fix app-download time.Timer panic error, use safe-time.Timer instead.
# 0.4.0 add go-daemon lib. use safe-time.Timer to prevent panic error. this will make it run longer
# 0.3.6 support upload zip and unzip, fix minicap rotation error when atx-agent is killed -9
# 0.3.5 hot fix for session
# 0.3.4 fix session() sometimes can not get mainActivity error
# 0.3.3 /shell support timeout
# 0.3.2 fix dns resolve error when network changes
# 0.3.0 use github.com/codeskyblue/heartbeat library instead of websocket, add /whatsinput
# 0.2.1 support occupy /minicap connection
# 0.2.0 add session support
# 0.1.8 fix screenshot always the same image. (BUG in 0.1.7), add /shell/stream add timeout for /shell
# 0.1.7 fix dns resolve error in /install
# 0.1.6 change download logic. auto fix orientation
# 0.1.5 add singlefight for minicap and minitouch, proxy dial-timeout change 30 to 10
# 0.1.4 phone remote control
# 0.1.2 /download support
# 0.1.1 minicap buildin | mit | Python |
0c9accce7b3df8889ecf57b6df89a36628cb908c | add timeout for running scheduler | randy3k/UnitTesting,randy3k/UnitTesting,randy3k/UnitTesting,randy3k/UnitTesting | sbin/run_scheduler.py | sbin/run_scheduler.py | import subprocess
import tempfile
import time, os
import re
import sys
# cd ~/.config/sublime-text-3/Packages/UnitTesting
# python sbin/run_scheduler.py PACKAGE
# script directory
__dir__ = os.path.dirname(os.path.abspath(__file__))
version = int(subprocess.check_output(["subl","--version"]).decode('utf8').strip()[-4])
# sublime package directory
if sys.platform == "darwin":
sublime_package = os.path.expanduser("~/Library/Application Support/Sublime Text %d/Packages" % version)
elif "linux" in sys.platform:
sublime_package = os.path.expanduser("~/.config/sublime-text-%d/Packages" % version)
sys.path.append(os.path.join(sublime_package, "UnitTesting"))
from jsonio import *
package = sys.argv[1] if len(sys.argv)>1 else "UnitTesting"
outdir = os.path.join(sublime_package, "User", "UnitTesting", "tests_output")
outfile = os.path.join(outdir, package)
# remove output
if os.path.exists(outfile): os.unlink(outfile)
# add schedule
jpath = os.path.join(sublime_package, "User", "UnitTesting", "schedule.json")
j = jsonio(jpath)
schedule = j.load()
if not any([s['package']==package for s in schedule]):
schedule.append({'package': package})
j.save(schedule)
tasks = subprocess.check_output(['ps', 'xw']).decode('utf8')
sublime_is_running = "Sublime" in tasks or "sublime_text" in tasks
if sublime_is_running:
subprocess.Popen(["subl", "-b", "--command", "unit_testing_run_scheduler"])
else:
subprocess.Popen(["subl"])
# wait until the file has something
startt = time.time()
while (not os.path.exists(outfile) or os.stat(outfile).st_size == 0):
sys.stdout.write('.')
sys.stdout.flush()
if time.time()-startt > 60:
print("Timeout: Sublime Text is not responding")
sys.exit(1)
time.sleep(1)
print("\nstart to read output")
# todo: use notification instead of polling
with open(outfile, 'r') as f:
while True:
result = f.read()
m = re.search("^(OK|FAILED|ERROR)", result, re.MULTILINE)
# break when OK or Failed
if m: break
time.sleep(0.2)
f.seek(0)
result = f.read()
print(result)
success = m.group(0)=="OK"
if not success:
sys.exit(1)
| import subprocess
import tempfile
import time, os
import re
import sys
# cd ~/.config/sublime-text-3/Packages/UnitTesting
# python sbin/run_scheduler.py PACKAGE
# script directory
__dir__ = os.path.dirname(os.path.abspath(__file__))
version = int(subprocess.check_output(["subl","--version"]).decode('utf8').strip()[-4])
# sublime package directory
if sys.platform == "darwin":
sublime_package = os.path.expanduser("~/Library/Application Support/Sublime Text %d/Packages" % version)
elif "linux" in sys.platform:
sublime_package = os.path.expanduser("~/.config/sublime-text-%d/Packages" % version)
sys.path.append(os.path.join(sublime_package, "UnitTesting"))
from jsonio import *
package = sys.argv[1] if len(sys.argv)>1 else "UnitTesting"
outdir = os.path.join(sublime_package, "User", "UnitTesting", "tests_output")
outfile = os.path.join(outdir, package)
# remove output
if os.path.exists(outfile): os.unlink(outfile)
# add schedule
jpath = os.path.join(sublime_package, "User", "UnitTesting", "schedule.json")
j = jsonio(jpath)
schedule = j.load()
if not any([s['package']==package for s in schedule]):
schedule.append({'package': package})
j.save(schedule)
tasks = subprocess.check_output(['ps', 'xw']).decode('utf8')
sublime_is_running = "Sublime" in tasks or "sublime_text" in tasks
if sublime_is_running:
subprocess.Popen(["subl", "-b", "--command", "unit_testing_run_scheduler"])
else:
subprocess.Popen(["subl"])
# wait until the file has something
while (not os.path.exists(outfile) or os.stat(outfile).st_size == 0):
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(1)
print("\nstart to read output")
# todo: use notification instead of polling
with open(outfile, 'r') as f:
while True:
result = f.read()
m = re.search("^(OK|FAILED|ERROR)", result, re.MULTILINE)
# break when OK or Failed
if m: break
time.sleep(0.2)
f.seek(0)
result = f.read()
print(result)
success = m.group(0)=="OK"
if not success:
sys.exit(1)
| mit | Python |
42f4ed206a9c79799b9bb0b13b829c8cf9c979e4 | write to file | pedsm/deepHack,pedsm/deepHack,pedsm/deepHack | scraper/parse_dump.py | scraper/parse_dump.py | #!/usr/bin/python
# Simple script to parse the devpost dump and place results in a json
import os
import json
from multiprocessing import Pool
from bs4 import BeautifulSoup
OUTPUT_FNAME="devpostdump.json"
DUMP_DIR = "output/"
projects = [os.path.join(DUMP_DIR, f) for f in os.listdir(DUMP_DIR)]
# projects = projects[:100]
projects_json = []
def process_project(inp):
i, project = inp
print "%d %s" % (i, project)
proj_html = BeautifulSoup(open(project, 'r').read(), 'html.parser')
proj_data = {}
proj_data['name'] = proj_html.find(id='app-title').string
proj_data['id'] = project[len(DUMP_DIR):]
# Number of likes and comments
num_likes = proj_html.find('span', { 'class' : 'ss-heart' }).next_sibling.next_sibling
proj_data['num_likes'] = int(num_likes.string) if num_likes is not None else 0
num_comments = proj_html.find('span', { 'class' : 'ss-quote' }).next_sibling.next_sibling
proj_data['num_comments'] = int(num_comments.string) if num_comments is not None else 0
# Length of the description
proj_data['description_length'] = len(proj_html.find(id="app-details").get_text())
# Number of contributors
proj_data['num_contributors'] = len(proj_html.find_all('li', { 'class' : 'software-team-member' }))
# Tags
proj_data['tags'] = sorted([tag.string for tag in proj_html.find_all('span', { 'class' : 'cp-tag' })])
# Hackathon details
hackathon_deets = proj_html.find('div', { 'class' : 'software-list-content' })
if hackathon_deets:
proj_data['hackathon_name'] = hackathon_deets.find('a').string
proj_data['num_prizes'] = len(hackathon_deets.find_all('span', { 'class' : 'winner' }))
return proj_data
if __name__ == '__main__':
num_cores = multiprocessing.cpu_count()
p = Pool(num_cores)
j = p.map(process_project, enumerate(projects[:1000]))
print "Creating json file"
with open(OUTPUT_FNAME, "w+") as f:
f.write(json.dump(j))
| #!/usr/bin/python
# Simple script to parse the devpost dump and place results in a json
import os
import json
from multiprocessing import Pool
from bs4 import BeautifulSoup
OUTPUT_FNAME="devpostdump.json"
DUMP_DIR = "output/"
projects = [os.path.join(DUMP_DIR, f) for f in os.listdir(DUMP_DIR)]
# projects = projects[:100]
projects_json = []
def process_project(inp):
i, project = inp
print "%d %s" % (i, project)
proj_html = BeautifulSoup(open(project, 'r').read(), 'html.parser')
proj_data = {}
proj_data['name'] = proj_html.find(id='app-title').string
proj_data['id'] = project[len(DUMP_DIR):]
# Number of likes and comments
num_likes = proj_html.find('span', { 'class' : 'ss-heart' }).next_sibling.next_sibling
proj_data['num_likes'] = int(num_likes.string) if num_likes is not None else 0
num_comments = proj_html.find('span', { 'class' : 'ss-quote' }).next_sibling.next_sibling
proj_data['num_comments'] = int(num_comments.string) if num_comments is not None else 0
# Length of the description
proj_data['description_length'] = len(proj_html.find(id="app-details").get_text())
# Number of contributors
proj_data['num_contributors'] = len(proj_html.find_all('li', { 'class' : 'software-team-member' }))
# Tags
proj_data['tags'] = sorted([tag.string for tag in proj_html.find_all('span', { 'class' : 'cp-tag' })])
# Hackathon details
hackathon_deets = proj_html.find('div', { 'class' : 'software-list-content' })
if hackathon_deets:
proj_data['hackathon_name'] = hackathon_deets.find('a').string
proj_data['num_prizes'] = len(hackathon_deets.find_all('span', { 'class' : 'winner' }))
return proj_data
if __name__ == '__main__':
num_cores = multiprocessing.cpu_count()
p = Pool(num_cores)
json = p.map(process_project, enumerate(projects[:1000]))
| mit | Python |
c8a010e6e9a917c50843dd10303f8f9497b4687c | Bump version | cosenal/waterbutler,hmoco/waterbutler,Ghalko/waterbutler,felliott/waterbutler,TomBaxter/waterbutler,chrisseto/waterbutler,rafaeldelucena/waterbutler,Johnetordoff/waterbutler,CenterForOpenScience/waterbutler,RCOSDP/waterbutler,icereval/waterbutler,kwierman/waterbutler,rdhyee/waterbutler | waterbutler/__init__.py | waterbutler/__init__.py | __version__ = '0.2.3'
__import__("pkg_resources").declare_namespace(__name__)
| __version__ = '0.2.2'
__import__("pkg_resources").declare_namespace(__name__)
| apache-2.0 | Python |
b0e3886ee24689f1eb249e0ed3c66d887b317f60 | Delete table test | cioc/grpc-rocksdb,cioc/grpc-rocksdb,cioc/grpc-rocksdb | tst/test.py | tst/test.py | #!/usr/bin/python
import grpc
import keyvalue_pb2
import os
import sys
if __name__ == '__main__':
conn_str = os.environ['GRPCROCKSDB_PORT'].split("/")[2]
print "Connecting on: " + conn_str
channel = grpc.insecure_channel(conn_str)
stub = keyvalue_pb2.KeyValueStub(channel)
create_table_res = stub.CreateTable(keyvalue_pb2.CreateTableReq(tablename='test-table-1'))
put_res = stub.Put(keyvalue_pb2.PutReq(tablename='test-table-1',item=keyvalue_pb2.Item(key='myKey', value='12345')))
get_res = stub.Get(keyvalue_pb2.GetReq(tablename='test-table-1',key='myKey'))
assert get_res.item.value == "12345"
try:
put_res = stub.Put(keyvalue_pb2.PutReq(tablename='test-table-1',item=keyvalue_pb2.Item(key='myKey', value='99999'),condition="hello"))
print "Condition should not be met!"
sys.exit(1)
except Exception:
pass
get_res = stub.Get(keyvalue_pb2.GetReq(tablename='test-table-1',key='myKey'))
assert get_res.item.value == "12345"
put_res = stub.Put(keyvalue_pb2.PutReq(tablename='test-table-1',item=keyvalue_pb2.Item(key='myKey', value='99999'),condition="12345"))
get_res = stub.Get(keyvalue_pb2.GetReq(tablename='test-table-1',key='myKey'))
assert get_res.item.value == "99999"
delete_table_res = stub.DeleteTable(keyvalue_pb2.DeleteTableReq(tablename='test-table-1'))
| #!/usr/bin/python
import grpc
import keyvalue_pb2
import os
import sys
if __name__ == '__main__':
conn_str = os.environ['GRPCROCKSDB_PORT'].split("/")[2]
print "Connecting on: " + conn_str
channel = grpc.insecure_channel(conn_str)
stub = keyvalue_pb2.KeyValueStub(channel)
create_table_res = stub.CreateTable(keyvalue_pb2.CreateTableReq(tablename='test-table-1'))
put_res = stub.Put(keyvalue_pb2.PutReq(tablename='test-table-1',item=keyvalue_pb2.Item(key='myKey', value='12345')))
get_res = stub.Get(keyvalue_pb2.GetReq(tablename='test-table-1',key='myKey'))
assert get_res.item.value == "12345"
try:
put_res = stub.Put(keyvalue_pb2.PutReq(tablename='test-table-1',item=keyvalue_pb2.Item(key='myKey', value='99999'),condition="hello"))
print "Condition should not be met!"
sys.exit(1)
except Exception:
pass
get_res = stub.Get(keyvalue_pb2.GetReq(tablename='test-table-1',key='myKey'))
assert get_res.item.value == "12345"
put_res = stub.Put(keyvalue_pb2.PutReq(tablename='test-table-1',item=keyvalue_pb2.Item(key='myKey', value='99999'),condition="12345"))
get_res = stub.Get(keyvalue_pb2.GetReq(tablename='test-table-1',key='myKey'))
assert get_res.item.value == "99999"
| mit | Python |
96340529a8d5702ce8c880aa66966b2971b96449 | change method | MadsJensen/malthe_alpha_project,MadsJensen/malthe_alpha_project | calc_cov.py | calc_cov.py | import mne
import sys
from mne import compute_covariance
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from my_settings import *
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=180e-6 #
)
subject = sys.argv[1]
epochs = mne.read_epochs(epochs_folder + "%s_trial_start-epo.fif" % subject)
epochs.drop_bad_epochs(reject)
fig = epochs.plot_drop_log(subject=subject, show=False)
fig.savefig(epochs_folder + "pics/%s_drop_log.png" % subject)
# Make noise cov
cov = compute_covariance(epochs, tmin=None, tmax=-0.2,
method="factor_analysis")
mne.write_cov(mne_folder + "%s-cov.fif" % subject, cov)
| import mne
import sys
from mne import compute_covariance
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from my_settings import *
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=180e-6 #
)
subject = sys.argv[1]
epochs = mne.read_epochs(epochs_folder + "%s_trial_start-epo.fif" % subject)
epochs.drop_bad_epochs(reject)
fig = epochs.plot_drop_log(subject=subject, show=False)
fig.savefig(epochs_folder + "pics/%s_drop_log.png" % subject)
# Make noise cov
cov = compute_covariance(epochs, tmin=None, tmax=-0.2, method="shrunk")
mne.write_cov(mne_folder + "%s-cov.fif" % subject, cov)
| mit | Python |
4be984747a41e5ab966b12afe9074a0e611faee2 | Add license text to resampling.py | talhaHavadar/RobotLocalization | resampling.py | resampling.py | """
MIT License
Copyright (c) 2017 Talha Can Havadar
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
@author Talha Can Havadar (talhaHavadar)
"""
import random
from collections import Counter
class ResamplingWheel(object):
"""
A Class implementation for resampling wheel
Creates an imaginary wheel that consist of weighted portions.
According to these weights, you can pick an index value.
Index with more weights has more chance to be picked up.
"""
def __init__(self, initiate_with=None):
self.wheel = []
self.max_weight = None
self.is_resampled = False
self.beta = 0.0
self.last_index = 0
if initiate_with is not None and isinstance(initiate_with, list):
self.wheel = initiate_with
self.length = len(self.wheel)
if self.length > 0:
self.max_weight = max(self.wheel)
self.last_index = int(random.random() * self.length)
def get_pick_index(self):
"""
Returns an index value according to given data.
Given data's length and weights matter
"""
if not self.is_resampled:
self.__resample__()
while self.beta > self.wheel[self.last_index]:
self.beta -= self.wheel[self.last_index]
self.last_index = (self.last_index + 1) % self.length
self.is_resampled = False
return self.last_index
def __resample__(self):
self.beta += random.random() * 2.0 * self.max_weight
self.is_resampled = True
def __len__(self):
return len(self.wheel)
if __name__ == "__main__":
DATA = [10, 11, 12, 13, 14]
SAMPLING = ResamplingWheel([5, 2, 1, 1, 1])
SAMPLED = []
print("Length of the sampling wheel:", len(SAMPLING))
for i in range(100):
index = SAMPLING.get_pick_index()
print(DATA[index])
SAMPLED.append(DATA[index])
print(Counter(SAMPLED))
| """
@author Talha Can Havadar (talhaHavadar)
"""
import random
from collections import Counter
class ResamplingWheel(object):
"""
A Class implementation for resampling wheel
Creates an imaginary wheel that consist of weighted portions.
According to these weights, you can pick an index value.
Index with more weights has more chance to be picked up.
"""
def __init__(self, initiate_with=None):
self.wheel = []
self.max_weight = None
self.is_resampled = False
self.beta = 0.0
self.last_index = 0
if initiate_with is not None and isinstance(initiate_with, list):
self.wheel = initiate_with
self.length = len(self.wheel)
if self.length > 0:
self.max_weight = max(self.wheel)
self.last_index = int(random.random() * self.length)
def get_pick_index(self):
"""
Returns an index value according to given data.
Given data's length and weights matter
"""
if not self.is_resampled:
self.__resample__()
while self.beta > self.wheel[self.last_index]:
self.beta -= self.wheel[self.last_index]
self.last_index = (self.last_index + 1) % self.length
self.is_resampled = False
return self.last_index
def __resample__(self):
self.beta += random.random() * 2.0 * self.max_weight
self.is_resampled = True
def __len__(self):
return len(self.wheel)
if __name__ == "__main__":
DATA = [10, 11, 12, 13, 14]
SAMPLING = ResamplingWheel([5, 2, 1, 1, 1])
SAMPLED = []
print("Length of the sampling wheel:", len(SAMPLING))
for i in range(100):
index = SAMPLING.get_pick_index()
print(DATA[index])
SAMPLED.append(DATA[index])
print(Counter(SAMPLED))
| mit | Python |
5fa9e88e9402a4ca12f2f54298d397bc7b54728b | Revert "deactivated test for non-existent 'references'" | codethesaurus/codethesaur.us,codethesaurus/codethesaur.us | web/tests/test_views.py | web/tests/test_views.py | from django.test import TestCase, Client
from django.urls import reverse
from web.views import index, about, compare, reference
class TestViews(TestCase):
def test_index_view_GET(self):
url = reverse('index')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'index.html')
self.assertTemplateUsed(response, 'base.html')
def test_about_view_GET(self):
url = reverse('about')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'about.html')
self.assertTemplateUsed(response, 'base.html')
def test_compare_view_GET(self):
url = reverse('compare') + '?concept=data_types&lang1=python&lang2=java'
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'compare.html')
self.assertTemplateUsed(response, 'base.html')
def test_reference_view_GET(self):
url = reverse('reference') + '?concept=data_types&lang=python'
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'reference.html')
self.assertTemplateUsed(response, 'base.html')
| from django.test import TestCase, Client
from django.urls import reverse
from web.views import index, about, compare, reference
class TestViews(TestCase):
def test_index_view_GET(self):
url = reverse('index')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'index.html')
self.assertTemplateUsed(response, 'base.html')
def test_about_view_GET(self):
url = reverse('about')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'about.html')
self.assertTemplateUsed(response, 'base.html')
def test_compare_view_GET(self):
url = reverse('compare') + '?concept=data_types&lang1=python&lang2=java'
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'compare.html')
self.assertTemplateUsed(response, 'base.html')
def test_reference_view_GET(self):
pass # Uncomment these tests when 'reference' section is made
# url = reverse('reference') + '?concept=data_types&lang=python'
# response = self.client.get(url)
# self.assertEquals(response.status_code, 200)
# self.assertTemplateUsed(response, 'reference.html')
# self.assertTemplateUsed(response, 'base.html')
| agpl-3.0 | Python |
a5fddaefdedef18b0b6b7d3b2ec65f64eaaaad65 | fix date time bug | edlongman/thescoop,edlongman/thescoop,edlongman/thescoop | clean_db.py | clean_db.py | import MySQLdb, config, urllib, cgi, datetime
from datetime import datetime
sql = MySQLdb.connect(host="localhost",
user=config.username,
passwd=config.passwd,
db=config.test_db)
sql.query("SELECT `id` FROM `feedurls`")
db_feed_query=sql.store_result()
rss_urls=db_feed_query.fetch_row(0)
table_name = "stories"
date_from = datetime.strptime(raw_input("start date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y")
date_to = datetime.strptime(raw_input("end date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y")
for rss_url_data in rss_urls:
feed_id=rss_url_data[0]
i = date_from
while i <= date_to:
print i.strftime("%d/%m/%Y")
| import MySQLdb, config, urllib, cgi, datetime
from datetime import datetime
sql = MySQLdb.connect(host="localhost",
user=config.username,
passwd=config.passwd,
db=config.test_db)
sql.query("SELECT `id` FROM `feedurls`")
db_feed_query=sql.store_result()
rss_urls=db_feed_query.fetch_row(0)
table_name = "stories"
date_from = datetime.strptime(raw_input("start date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y")
date_to = datetime.strptime(raw_input("end date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y")
for rss_url_data in rss_urls:
feed_id=rss_url_data[0]
i = start_date
while i <= end_date:
print end_dates
| apache-2.0 | Python |
d64460c8bbbe045dcdf9f737562a31d84044acce | Change package name to 'cirm' to avoid confusion. | informatics-isi-edu/microscopy,informatics-isi-edu/microscopy,informatics-isi-edu/microscopy,informatics-isi-edu/microscopy | rest/setup.py | rest/setup.py | #
# Copyright 2012 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.core import setup
setup(name="cirm-rest",
description="cirm web application",
version="0.1",
package_dir={"": "src"},
packages=["cirm"],
requires=["web.py", "psycopg2"],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| #
# Copyright 2012 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.core import setup
setup(name="cirm-rest",
description="cirm web application",
version="0.1",
package_dir={"": "src"},
packages=["cirmrest"],
requires=["web.py", "psycopg2"],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| apache-2.0 | Python |
7484c8d4ab699ee16bc867cdff1e7ec699dbb142 | Add profiling support to Melange. By assigning profile_main_as_logs or profile_main_as_html to main variable you can turn on profiling. profile_main_as_logs will log profile data to App Engine console logs, profile_main_as_html will show profile data as html at the bottom of the page. If you want to profile app on deployed app just set the profiling function and deploy it. | SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange | app/main.py | app/main.py | #!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__authors__ = [
# alphabetical order by last name, please
'"Augie Fackler" <durin42@gmail.com>',
]
import logging
import os
import sys
from google.appengine.ext.webapp import util
# Remove the standard version of Django.
for k in [k for k in sys.modules if k.startswith('django')]:
del sys.modules[k]
# Force sys.path to have our own directory first, in case we want to import
# from it. This lets us replace the built-in Django
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
sys.path.insert(0, os.path.abspath('django.zip'))
ultimate_sys_path = None
# Force Django to reload its settings.
from django.conf import settings
settings._target = None
# Must set this env var before importing any part of Django
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django.core.handlers.wsgi
import django.core.signals
import django.db
# Log errors.
def log_exception(*args, **kwds):
logging.exception('Exception in request:')
# Log all exceptions detected by Django.
django.core.signals.got_request_exception.connect(log_exception)
# Unregister the rollback event handler.
django.core.signals.got_request_exception.disconnect(
django.db._rollback_on_exception)
def profile_main_as_html():
"""Main program for profiling. Profiling data added as HTML to the page.
"""
import cProfile
import pstats
import StringIO
prof = cProfile.Profile()
prof = prof.runctx('real_main()', globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
# stats.strip_dirs() # Don't; too many modules are named __init__.py.
# 'time', 'cumulative' or 'calls'
stats.sort_stats('time')
# Optional arg: how many to print
stats.print_stats()
# The rest is optional.
# stats.print_callees()
# stats.print_callers()
print '\n<hr>'
print '<h1>Profile data</h1>'
print '<pre>'
print stream.getvalue()[:1000000]
print '</pre>'
def profile_main_as_logs():
"""Main program for profiling. Profiling data logged.
"""
import cProfile
import pstats
import StringIO
prof = cProfile.Profile()
prof = prof.runctx("real_main()", globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
stats.sort_stats('time') # Or cumulative
stats.print_stats(80) # 80 = how many to print
# The rest is optional.
# stats.print_callees()
# stats.print_callers()
logging.info("Profile data:\n%s", stream.getvalue())
def real_main():
"""Main program without profiling.
"""
global ultimate_sys_path
if ultimate_sys_path is None:
ultimate_sys_path = list(sys.path)
else:
sys.path[:] = ultimate_sys_path
# Create a Django application for WSGI.
application = django.core.handlers.wsgi.WSGIHandler()
# Run the WSGI CGI handler with that application.
util.run_wsgi_app(application)
main = real_main
if __name__ == '__main__':
main()
| #!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__authors__ = [
# alphabetical order by last name, please
'"Augie Fackler" <durin42@gmail.com>',
]
import logging
import os
import sys
from google.appengine.ext.webapp import util
# Remove the standard version of Django.
for k in [k for k in sys.modules if k.startswith('django')]:
del sys.modules[k]
# Force sys.path to have our own directory first, in case we want to import
# from it. This lets us replace the built-in Django
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
sys.path.insert(0, os.path.abspath('django.zip'))
ultimate_sys_path = None
# Force Django to reload its settings.
from django.conf import settings
settings._target = None
# Must set this env var before importing any part of Django
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django.core.handlers.wsgi
import django.core.signals
import django.db
# Log errors.
def log_exception(*args, **kwds):
logging.exception('Exception in request:')
# Log all exceptions detected by Django.
django.core.signals.got_request_exception.connect(log_exception)
# Unregister the rollback event handler.
django.core.signals.got_request_exception.disconnect(
django.db._rollback_on_exception)
def main():
global ultimate_sys_path
if ultimate_sys_path is None:
ultimate_sys_path = list(sys.path)
else:
sys.path[:] = ultimate_sys_path
# Create a Django application for WSGI.
application = django.core.handlers.wsgi.WSGIHandler()
# Run the WSGI CGI handler with that application.
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| apache-2.0 | Python |
fc05512b3ad40f6571ee3d942e4829a19e2a465e | Add core.models.Sensor | HeisenbergPeople/weather-station-site,HeisenbergPeople/weather-station-site,HeisenbergPeople/weather-station-site | sensor/core/models.py | sensor/core/models.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.db import models
class GenericSensor(models.Model):
"""Represents a sensor abstracting away the specifics of what it measures.
A sensor measures one kind of thing. A physical device might have
multiple logical sensors.
"""
name = models.CharField(max_length=256)
model = models.CharField(max_length=128)
class Meta:
unique_together = [('name', 'model')]
class Sensor(models.Model):
"""Base class for specific sensor types."""
generic_sensor = models.OneToOneField(GenericSensor)
class Meta:
abstract = True | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.db import models
class GenericSensor(models.Model):
"""Represents a sensor abstracting away the specifics of what it measures.
A sensor measures one kind of thing. A physical device might have
multiple logical sensors.
"""
name = models.CharField(max_length=256)
model = models.CharField(max_length=128)
class Meta:
unique_together = [('name', 'model')] | mpl-2.0 | Python |
d9d9b993edc8baebf69b446d40f0a05260a041d5 | Remove prints | martinlunde/RealBack,martinlunde/RealBack,martinlunde/RealBack | emailauth/tests.py | emailauth/tests.py | from django.test import Client, TestCase
from emailauth import forms
c = Client()
class FormTests(TestCase):
def test_creation_form(self):
form_data = {'email': 'test@test.com', 'password1': 'test1234', 'password2': 'test1234'}
form = forms.UserCreationForm(form_data)
# Testing if form is valid, and that the fields are working.
self.assertTrue(form.is_valid())
def test_form_save(self):
form_data = {'email': 'test@test.com', 'password1': 'test1234', 'password2': 'test1234'}
form = forms.UserCreationForm(form_data)
# Testing if form is valid, and that the fields are working.
self.assertTrue(form.is_valid())
user = form.save()
# Testing if save function is returning properly
self.assertEqual(str(user), 'test@test.com')
def test_not_identically_passwords(self):
form_data = {'email': 'test@test.com', 'password1': '1234test', 'password2': 'test1234'}
form = forms.UserCreationForm(form_data)
# Testing if form is invalid when passwords are not matching.
self.assertFalse(form.is_valid())
def test_register_by_post(self):
# Testing register trough post-request
get_response = c.get('/register/')
post_response_wrong = c.post('/register/', {
'username': 'testUser',
'password1': 'test1234',
'password2': 'test1234',
})
post_response = c.post('/register/', {
'email': 'test@test.com',
'password1': 'testPass1234',
'password2': 'testPass1234',
})
self.assertEqual(get_response.status_code, 200)
self.assertNotEqual(post_response_wrong.status_code, 302)
self.assertEqual(post_response.status_code, 302)
| from django.test import Client, TestCase
from emailauth import forms
c = Client()
class FormTests(TestCase):
def test_creation_form(self):
form_data = {'email': 'test@test.com', 'password1': 'test1234', 'password2': 'test1234'}
form = forms.UserCreationForm(form_data)
# Testing if form is valid, and that the fields are working.
self.assertTrue(form.is_valid())
def test_form_save(self):
form_data = {'email': 'test@test.com', 'password1': 'test1234', 'password2': 'test1234'}
form = forms.UserCreationForm(form_data)
# Testing if form is valid, and that the fields are working.
self.assertTrue(form.is_valid())
user = form.save()
# Testing if save function is returning properly
self.assertEqual(str(user), 'test@test.com')
def test_not_identically_passwords(self):
form_data = {'email': 'test@test.com', 'password1': '1234test', 'password2': 'test1234'}
form = forms.UserCreationForm(form_data)
# Testing if form is invalid when passwords are not matching.
self.assertFalse(form.is_valid())
def test_register_by_post(self):
# Testing register trough post-request
get_response = c.get('/register/')
print(get_response.status_code)
post_response_wrong = c.post('/register/', {
'username': 'testuser@test.com',
'password1': 'test1234',
'password2': 'test1234',
})
print(post_response_wrong.status_code)
post_response = c.post('/register/', {
'email': 'test@test.com',
'password1': 'testPass1234',
'password2': 'testPass1234',
})
print(post_response.status_code)
self.assertEqual(get_response.status_code, 200)
self.assertNotEqual(post_response_wrong.status_code, 302)
self.assertEqual(post_response.status_code, 302)
| mit | Python |
265e9added53d1eee1291b9e0b5a10bc7dfe19c8 | Make sure we don't have section A before doing the extra round of manipulation | uw-it-aca/myuw,fanglinfang/myuw,fanglinfang/myuw,uw-it-aca/myuw,uw-it-aca/myuw,uw-it-aca/myuw,fanglinfang/myuw | myuw_mobile/test/dao/canvas.py | myuw_mobile/test/dao/canvas.py | from django.test import TestCase
from django.test.client import RequestFactory
from myuw_mobile.dao.canvas import get_indexed_data_for_regid
from myuw_mobile.dao.canvas import get_indexed_by_decrosslisted
from myuw_mobile.dao.schedule import _get_schedule
from myuw_mobile.dao.term import get_current_quarter
class TestCanvas(TestCase):
def test_crosslinks(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File'):
data = get_indexed_data_for_regid("12345678901234567890123456789012")
physics = data['2013,spring,PHYS,121/A']
self.assertEquals(physics.course_url, 'https://canvas.uw.edu/courses/149650')
has_section_a = '2013,spring,TRAIN,100/A' in data
self.assertFalse(has_section_a)
train = data['2013,spring,TRAIN,100/B']
self.assertEquals(train.course_url, 'https://canvas.uw.edu/courses/249650')
def test_crosslinks_lookup(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File'):
data = get_indexed_data_for_regid("12345678901234567890123456789012")
now_request = RequestFactory().get("/")
now_request.session = {}
term = get_current_quarter(now_request)
schedule = _get_schedule("12345678901234567890123456789012", term)
canvas_data_by_course_id = get_indexed_by_decrosslisted(data, schedule.sections)
physics = data['2013,spring,PHYS,121/A']
self.assertEquals(physics.course_url, 'https://canvas.uw.edu/courses/149650')
train = data['2013,spring,TRAIN,100/A']
self.assertEquals(train.course_url, 'https://canvas.uw.edu/courses/249650')
| from django.test import TestCase
from django.test.client import RequestFactory
from myuw_mobile.dao.canvas import get_indexed_data_for_regid
from myuw_mobile.dao.canvas import get_indexed_by_decrosslisted
from myuw_mobile.dao.schedule import _get_schedule
from myuw_mobile.dao.term import get_current_quarter
class TestCanvas(TestCase):
def test_crosslinks(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File'):
data = get_indexed_data_for_regid("12345678901234567890123456789012")
physics = data['2013,spring,PHYS,121/A']
self.assertEquals(physics.course_url, 'https://canvas.uw.edu/courses/149650')
train = data['2013,spring,TRAIN,100/B']
self.assertEquals(train.course_url, 'https://canvas.uw.edu/courses/249650')
def test_crosslinks_lookup(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File'):
data = get_indexed_data_for_regid("12345678901234567890123456789012")
now_request = RequestFactory().get("/")
now_request.session = {}
term = get_current_quarter(now_request)
schedule = _get_schedule("12345678901234567890123456789012", term)
canvas_data_by_course_id = get_indexed_by_decrosslisted(data, schedule.sections)
physics = data['2013,spring,PHYS,121/A']
self.assertEquals(physics.course_url, 'https://canvas.uw.edu/courses/149650')
train = data['2013,spring,TRAIN,100/A']
self.assertEquals(train.course_url, 'https://canvas.uw.edu/courses/249650')
| apache-2.0 | Python |
ae948a2dfdd62af2ba98a0ee506ddd48504ee64b | bump version to 0.6-dev | msabramo/validictory,ahassany/validictory,mgrandi/validictory,MeilleursAgents/validictory,stxnext/validictory,soulrebel/validictory,nicolaiarocci/validictory,enotodden/validictory,brotchie/validictory,travelbird/validictory,trigger-corp/validictory,alonho/validictory,dokai/validictory,fvieira/validictory,rhettg/validictory,ceymard/validictory,pombredanne/validictory,talos/validictory,kyleconroy/validictory,filod/validictory,dmr/validictory,andretw/validmore,andrecp/validictory,sunlightlabs/validictory,jalaziz/validictory,Lujeni/validictory | validictory/__init__.py | validictory/__init__.py | #!/usr/bin/env python
from validictory.validator import SchemaValidator
__all__ = [ 'validate', 'SchemaValidator' ]
__version__ = '0.6.0-dev'
def validate(data, schema, validator_cls=SchemaValidator):
'''
Validates a parsed json document against the provided schema. If an
error is found a ValueError is raised.
``data`` is a python dictionary object of parsed json data.
``schema`` is a python dictionary object representing the schema.
If ``validator_cls`` is provided that class will be used to validate
the given ``schema`` against the given ``data``. The given class should
be a subclass of the SchemaValidator class.
'''
v = validator_cls()
return v.validate(data,schema)
if __name__ == '__main__':
import sys
import json
if len(sys.argv) == 2:
if sys.argv[1] == "--help":
raise SystemExit("%s SCHEMAFILE [INFILE]" % (sys.argv[0],))
schemafile = open(sys.argv[1], 'rb')
infile = sys.stdin
elif len(sys.argv) == 3:
schemafile = open(sys.argv[1], 'rb')
infile = open(sys.argv[2], 'rb')
else:
raise SystemExit("%s SCHEMAFILE [INFILE]" % (sys.argv[0],))
try:
obj = json.load(infile)
schema = json.load(schemafile)
validate(obj, schema)
except ValueError, e:
raise SystemExit(e)
| #!/usr/bin/env python
from validictory.validator import SchemaValidator
__all__ = [ 'validate', 'SchemaValidator' ]
__version__ = '0.5.0'
def validate(data, schema, validator_cls=SchemaValidator):
'''
Validates a parsed json document against the provided schema. If an
error is found a ValueError is raised.
``data`` is a python dictionary object of parsed json data.
``schema`` is a python dictionary object representing the schema.
If ``validator_cls`` is provided that class will be used to validate
the given ``schema`` against the given ``data``. The given class should
be a subclass of the SchemaValidator class.
'''
v = validator_cls()
return v.validate(data,schema)
if __name__ == '__main__':
import sys
import json
if len(sys.argv) == 2:
if sys.argv[1] == "--help":
raise SystemExit("%s SCHEMAFILE [INFILE]" % (sys.argv[0],))
schemafile = open(sys.argv[1], 'rb')
infile = sys.stdin
elif len(sys.argv) == 3:
schemafile = open(sys.argv[1], 'rb')
infile = open(sys.argv[2], 'rb')
else:
raise SystemExit("%s SCHEMAFILE [INFILE]" % (sys.argv[0],))
try:
obj = json.load(infile)
schema = json.load(schemafile)
validate(obj, schema)
except ValueError, e:
raise SystemExit(e)
| mit | Python |
db033a9560ee97b5281adbf05f3f452943d592d7 | Add test_get_on_call and test_weekly | wking/django-on-call | django_on_call/tests.py | django_on_call/tests.py | import datetime
from django.test import TestCase
from .models import OnCall
class SimpleTest(TestCase):
def test_get_on_call(self):
"""Test the basic OnCall.get_on_call functionality
"""
on_call = OnCall(slug='test', rule='on_call = "Alice"')
self.assertEqual(on_call.get_on_call(), 'Alice')
def test_weekly(self):
"""Test a week-on round robin
"""
on_call = OnCall(slug='test', rule='\n'.join([
'handlers = ["Alice", "Bob", "Charlie"]',
'week = int(now.strftime("%W"))',
'on_call = handlers[week % len(handlers)]',
]))
for now, expected in [
(datetime.datetime(2013, 1, 1), 'Alice'),
(datetime.datetime(2013, 1, 8), 'Bob'),
(datetime.datetime(2013, 1, 15), 'Charlie'),
(datetime.datetime(2013, 1, 22), 'Alice'),
]:
self.assertEqual(on_call.get_on_call(now=now), expected)
| """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| bsd-2-clause | Python |
781e20bc3f465bdaac50f0f2a637b037d892c054 | Remove premature optimisation | Rypac/sublime-format | src/registry.py | src/registry.py | from .formatters import *
class FormatRegistry():
def __init__(self):
self.__formatters = [
ClangFormat(), ElmFormat(), GoFormat(), JavaScriptFormat(),
PythonFormat(), RustFormat(), TerraformFormat()
]
@property
def all(self):
return self.__formatters
@property
def enabled(self):
return [x for x in self.all if x.format_on_save]
def find(self, predicate, default=None):
return next((x for x in self.all if predicate(x)), default)
def by_view(self, view):
source = view.scope_name(0).split(' ')[0]
return self.find(lambda x: x.source == source)
def by_name(self, name):
return self.find(lambda x: x.name == name)
| from .formatters import *
class FormatRegistry():
def __init__(self):
self.__registered_formatters = [
ClangFormat(), ElmFormat(), GoFormat(), JavaScriptFormat(),
PythonFormat(), RustFormat(), TerraformFormat()
]
self.__source_formatter_lookup_table = {}
for formatter in self.__registered_formatters:
self.__source_formatter_lookup_table[formatter.source] = formatter
@property
def all(self):
return self.__registered_formatters
@property
def enabled(self):
return [x for x in self.all if x.format_on_save]
def find(self, predicate, default=None):
return next((x for x in self.all if predicate(x)), default)
def by_view(self, view):
source = view.scope_name(0).split(' ')[0]
return self.__source_formatter_lookup_table.get(source)
def by_name(self, name):
return self.find(lambda x: x.name == name)
| mit | Python |
945e7d1ef165054891a0ac574d52f6a1c3b7a162 | Add long help | goodwinxp/ATFGenerator,goodwinxp/ATFGenerator,goodwinxp/ATFGenerator | code_gen.py | code_gen.py | import sys
import getopt
from config import CONFIG
from ida_code_gen import IdaCodeGen
from ida_parser import IdaInfoParser
def print_help():
print 'Options:'
print ' -d, --database Path to database from arguments. Default = ' + CONFIG['database']
print ' -o, --out_dir Path to output directory for code generation. Default = ' + CONFIG['out_dir']
print ' -v, --verbose Verbose mode programm. Default = ' + str(CONFIG['verbose'])
print 'Example:'
print ' python code_gen.py -v --database C:/ida_info.sqlite3 --out_dir C:/code_gen/'
pass
def main(argv):
try:
opts, args = getopt.getopt(argv, 'hvdo', ['help', ''verbose', 'database=', 'out_dir='])
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == ('-h', '--help'):
print_help()
sys.exit()
if opt in ('-v', '--verbose'):
CONFIG['verbose'] = True
continue
if opt in ('-d', '--database'):
CONFIG['database'] = arg
continue
if opt in ('-o', '--out_dir'):
CONFIG['out_dir'] = arg
continue
if CONFIG['verbose']:
print 'database: ' + CONFIG['database']
print 'out_dir: ' + CONFIG['out_dir']
print 'verbose: ' + str(CONFIG['verbose'])
parser = IdaInfoParser(CONFIG['database'])
parser.start()
code_gen = IdaCodeGen(CONFIG['database'], CONFIG['out_dir'])
code_gen.start()
if __name__ == '__main__':
main(sys.argv[1:])
| import sys
import getopt
from config import CONFIG
from ida_code_gen import IdaCodeGen
from ida_parser import IdaInfoParser
def print_help():
print 'Options:'
print ' -d, --database Path to database from arguments. Default = ' + CONFIG['database']
print ' -o, --out_dir Path to output directory for code generation. Default = ' + CONFIG['out_dir']
print ' -v, --verbose Verbose mode programm. Default = ' + str(CONFIG['verbose'])
print 'Example:'
print ' python code_gen.py -v --database C:/ida_info.sqlite3 --out_dir C:/code_gen/'
pass
def main(argv):
try:
opts, args = getopt.getopt(argv, 'hvdo', ['verbose', 'database=', 'out_dir='])
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_help()
sys.exit()
if opt in ('-v', '--verbose'):
CONFIG['verbose'] = True
continue
if opt in ('-d', '--database'):
CONFIG['database'] = arg
continue
if opt in ('-o', '--out_dir'):
CONFIG['out_dir'] = arg
continue
if CONFIG['verbose']:
print 'database: ' + CONFIG['database']
print 'out_dir: ' + CONFIG['out_dir']
print 'verbose: ' + str(CONFIG['verbose'])
parser = IdaInfoParser(CONFIG['database'])
parser.start()
code_gen = IdaCodeGen(CONFIG['database'], CONFIG['out_dir'])
code_gen.start()
if __name__ == '__main__':
main(sys.argv[1:])
| mit | Python |
2ad94140360f893ad46b1b972e753f2a78b5f779 | print function | konomae/lastpass-python,dhercher/lastpass-python | example/example.py | example/example.py | # coding: utf-8
import json
import os
import lastpass
with open(os.path.join(os.path.dirname(__file__), 'credentials.json')) as f:
credentials = json.load(f)
username = str(credentials['username'])
password = str(credentials['password'])
try:
# First try without a multifactor password
vault = lastpass.Vault.open_remote(username, password)
except lastpass.LastPassIncorrectGoogleAuthenticatorCodeError as e:
# Get the code
multifactor_password = input('Enter Google Authenticator code:')
# And now retry with the code
vault = lastpass.Vault.open_remote(username, password, multifactor_password)
except lastpass.LastPassIncorrectYubikeyPasswordError as e:
# Get the code
multifactor_password = input('Enter Yubikey password:')
# And now retry with the code
vault = lastpass.Vault.open_remote(username, password, multifactor_password)
for index, i in enumerate(vault.accounts):
print("{} {} {} {} {} {} {}".format(index + 1, i.id, i.name, i.username, i.password, i.url, i.group))
| # coding: utf-8
import json
import os
import lastpass
with open(os.path.join(os.path.dirname(__file__), 'credentials.json')) as f:
credentials = json.load(f)
username = str(credentials['username'])
password = str(credentials['password'])
try:
# First try without a multifactor password
vault = lastpass.Vault.open_remote(username, password)
except lastpass.LastPassIncorrectGoogleAuthenticatorCodeError as e:
# Get the code
multifactor_password = input('Enter Google Authenticator code:')
# And now retry with the code
vault = lastpass.Vault.open_remote(username, password, multifactor_password)
except lastpass.LastPassIncorrectYubikeyPasswordError as e:
# Get the code
multifactor_password = input('Enter Yubikey password:')
# And now retry with the code
vault = lastpass.Vault.open_remote(username, password, multifactor_password)
for index, i in enumerate(vault.accounts):
print index+1, i.id, i.name, i.username, i.password, i.url, i.group
| mit | Python |
cefa0a94582e40f92c48d6c91cf393c9b0310713 | fix geojson in sources dir | OpenBounds/Processing | validate.py | validate.py |
import json
import re
import click
import jsonschema
import utils
@click.command()
@click.argument('schema', type=click.File('r'), required=True)
@click.argument('jsonfiles', type=click.Path(exists=True), required=True)
def validate(schema, jsonfiles):
"""Validate a JSON files against a JSON schema.
\b
SCHEMA: JSON schema to validate against. Required.
JSONFILE: JSON files to validate. Required.
"""
schema = json.loads(schema.read())
for path in utils.get_files(jsonfiles):
if path.startswith('sources'):
regex = r'sources/[A-Z]{2}/[A-Z]{2}/[a-z-]+.json'
elif path.startswith('generated'):
regex = r'generated/[A-Z]{2}/[A-Z]{2}/[a-z-]+.geojson'
else:
regex = r''
if not re.compile(regex).match(path):
raise AssertionError('Path does not match spec for ' + path)
with open(path) as f:
jsonfile = json.loads(f.read())
jsonschema.validate(jsonfile, schema)
if __name__ == '__main__':
validate()
|
import json
import re
import click
import jsonschema
import utils
@click.command()
@click.argument('schema', type=click.File('r'), required=True)
@click.argument('jsonfiles', type=click.Path(exists=True), required=True)
def validate(schema, jsonfiles):
"""Validate a JSON files against a JSON schema.
\b
SCHEMA: JSON schema to validate against. Required.
JSONFILE: JSON files to validate. Required.
"""
schema = json.loads(schema.read())
for path in utils.get_files(jsonfiles):
regex = r'(sources|generated)/[A-Z]{2}/[A-Z]{2}/[a-z-]+.(geo)?json'
if not re.compile(regex).match(path):
raise AssertionError('Source path does not match spec for ' + path)
with open(path) as f:
jsonfile = json.loads(f.read())
jsonschema.validate(jsonfile, schema)
if __name__ == '__main__':
validate()
| mit | Python |
7e16a9feb88023a03363aee5be552a2f15b825fc | 修复 waiting 状态下颜色错误的问题 | wangmingjob/OnlineJudge,Timeship/OnlineJudge-QDU,uestcxl/OnlineJudge,wangmingjob/OnlineJudge,wwj718/OnlineJudge,wangmingjob/OnlineJudge,hxsf/OnlineJudge,Timeship/OnlineJudge-1,hxsf/OnlineJudge,hxsf/OnlineJudge,Timeship/OnlineJudge-1,Timeship/OnlineJudge-1,wangmingjob/OnlineJudge,uestcxl/OnlineJudge,hxsf/OnlineJudge,Timeship/OnlineJudge-QDU,Timeship/OnlineJudge-QDU,uestcxl/OnlineJudge,wwj718/OnlineJudge,Timeship/OnlineJudge-QDU,Timeship/OnlineJudge-1,wwj718/OnlineJudge | utils/templatetags/submission.py | utils/templatetags/submission.py | # coding=utf-8
def translate_result(value):
results = {
0: "Accepted",
1: "Runtime Error",
2: "Time Limit Exceeded",
3: "Memory Limit Exceeded",
4: "Compile Error",
5: "Format Error",
6: "Wrong Answer",
7: "System Error",
8: "Waiting"
}
return results[value]
def translate_id(submission_item):
return submission_item["_id"]
def translate_language(value):
return {1: "C", 2: "C++", 3: "Java"}[value]
def translate_result_class(value):
if value == 0:
return "success"
elif value == 8:
return "info"
return "danger"
from django import template
register = template.Library()
register.filter("translate_result", translate_result)
register.filter("translate_id", translate_id)
register.filter("translate_language", translate_language)
register.filter("translate_result_class", translate_result_class) | # coding=utf-8
def translate_result(value):
results = {
0: "Accepted",
1: "Runtime Error",
2: "Time Limit Exceeded",
3: "Memory Limit Exceeded",
4: "Compile Error",
5: "Format Error",
6: "Wrong Answer",
7: "System Error",
8: "Waiting"
}
return results[value]
def translate_id(submission_item):
return submission_item["_id"]
def translate_language(value):
return {1: "C", 2: "C++", 3: "Java"}[value]
def translate_result_class(value):
if value == 0:
return "success"
elif value == "8":
return "info"
return "danger"
from django import template
register = template.Library()
register.filter("translate_result", translate_result)
register.filter("translate_id", translate_id)
register.filter("translate_language", translate_language)
register.filter("translate_result_class", translate_result_class) | mit | Python |
d17a88ac9ef8e3806c7ac60d31df62a1041939cb | Add sum_of_spreads | skearnes/muv | muv/spatial.py | muv/spatial.py | """
Spatial statistics.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
import numpy as np
def spread(d, t):
"""
Calculate the spread between two sets of compounds.
Given a matrix containing distances between two sets of compounds, A
and B, calculate the fraction of compounds in set A that are closer
than t to any compound in set B.
Parameters
----------
d : ndarray
Distance matrix with compounds from set A on first axis.
t : float
Distance threshold.
"""
s = np.mean(np.any(d < t, axis=1))
return s
def sum_of_spreads(d, coeff, min_t=0, max_t=3, step=None):
"""
Calculate the sum of spreads across a range of distance thresholds.
Parameters
----------
d : ndarray
Distance matrix with compounds from set A on first axis.
coeff : float
Coefficient used to rescale distance thresholds.
min_t : float, optional (default 0)
Minimum distance threshold (before rescaling).
max_t : float, optional (default 3)
Maximum distance threshold (before rescaling).
step : float, optional
Step size for determining values to sample between min_t and max_t.
If not provided, defaults to max_t / 500.
"""
if step is None:
step = max_t / 500.
n_steps = int((max_t - min_t) / step)
thresholds = coeff * np.linspace(min_t, max_t, n_steps)
ss = np.sum([spread(d, t) for t in thresholds])
return ss
| """
Spatial statistics.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
import numpy as np
def spread(d, t):
"""
Calculate the spread between two sets of compounds.
Given a matrix containing distances between two sets of compounds, A
and B, calculate the fraction of compounds in set A that are closer
than t to any compound in set B.
Parameters
----------
d : ndarray
Distance matrix with compounds from set A on first axis.
t : float
Distance threshold.
"""
p = np.mean(np.any(d < t, axis=1))
return p
| bsd-3-clause | Python |
e05736cd36bc595070dda78e91bcb1b4bcfd983c | Remove deprecated usage of `reflect` constructor param | globality-corp/microcosm-postgres,globality-corp/microcosm-postgres | microcosm_postgres/operations.py | microcosm_postgres/operations.py | """
Common database operations.
"""
from sqlalchemy import MetaData
from sqlalchemy.exc import ProgrammingError
from microcosm_postgres.migrate import main
from microcosm_postgres.models import Model
def stamp_head(graph):
"""
Stamp the database with the current head revision.
"""
main(graph, "stamp", "head")
def get_current_head(graph):
"""
Get the current database head revision, if any.
"""
session = new_session(graph)
try:
result = session.execute("SELECT version_num FROM alembic_version")
except ProgrammingError:
return None
else:
return result.scalar()
finally:
session.close()
def create_all(graph):
"""
Create all database tables.
"""
head = get_current_head(graph)
if head is None:
Model.metadata.create_all(graph.postgres)
stamp_head(graph)
def drop_all(graph):
"""
Drop all database tables.
"""
Model.metadata.drop_all(graph.postgres)
drop_alembic_table(graph)
def drop_alembic_table(graph):
"""
Drop the alembic version table.
"""
try:
graph.postgres.execute("DROP TABLE alembic_version;")
except ProgrammingError:
return False
else:
return True
# Cached database metadata instance
_metadata = None
def recreate_all(graph):
"""
Drop and add back all database tables, or reset all data associated with a database.
Intended mainly for testing, where a test database may either need to be re-initialized
or cleared out between tests
"""
global _metadata
if _metadata is None:
# First-run, the test database/metadata needs to be initialized
drop_all(graph)
create_all(graph)
_metadata = MetaData(bind=graph.postgres)
_metadata.reflect()
return
# Otherwise, truncate all existing tables
connection = graph.postgres.connect()
transaction = connection.begin()
for table in reversed(_metadata.sorted_tables):
connection.execute(table.delete())
transaction.commit()
def new_session(graph, expire_on_commit=False):
"""
Create a new session.
"""
return graph.sessionmaker(expire_on_commit=expire_on_commit)
| """
Common database operations.
"""
from sqlalchemy import MetaData
from sqlalchemy.exc import ProgrammingError
from microcosm_postgres.migrate import main
from microcosm_postgres.models import Model
def stamp_head(graph):
"""
Stamp the database with the current head revision.
"""
main(graph, "stamp", "head")
def get_current_head(graph):
"""
Get the current database head revision, if any.
"""
session = new_session(graph)
try:
result = session.execute("SELECT version_num FROM alembic_version")
except ProgrammingError:
return None
else:
return result.scalar()
finally:
session.close()
def create_all(graph):
"""
Create all database tables.
"""
head = get_current_head(graph)
if head is None:
Model.metadata.create_all(graph.postgres)
stamp_head(graph)
def drop_all(graph):
"""
Drop all database tables.
"""
Model.metadata.drop_all(graph.postgres)
drop_alembic_table(graph)
def drop_alembic_table(graph):
"""
Drop the alembic version table.
"""
try:
graph.postgres.execute("DROP TABLE alembic_version;")
except ProgrammingError:
return False
else:
return True
# Cached database metadata instance
_metadata = None
def recreate_all(graph):
"""
Drop and add back all database tables, or reset all data associated with a database.
Intended mainly for testing, where a test database may either need to be re-initialized
or cleared out between tests
"""
global _metadata
if _metadata is None:
# First-run, the test database/metadata needs to be initialized
drop_all(graph)
create_all(graph)
_metadata = MetaData(bind=graph.postgres, reflect=True)
return
# Otherwise, truncate all existing tables
connection = graph.postgres.connect()
transaction = connection.begin()
for table in reversed(_metadata.sorted_tables):
connection.execute(table.delete())
transaction.commit()
def new_session(graph, expire_on_commit=False):
"""
Create a new session.
"""
return graph.sessionmaker(expire_on_commit=expire_on_commit)
| apache-2.0 | Python |
c7f8fd75dd5b41a059b65e9cea54d875d1f57655 | Change self to PortStatCollector. | ramjothikumar/Diamond,jaingaurav/Diamond,timchenxiaoyu/Diamond,TAKEALOT/Diamond,MichaelDoyle/Diamond,acquia/Diamond,rtoma/Diamond,Clever/Diamond,russss/Diamond,zoidbergwill/Diamond,socialwareinc/Diamond,actmd/Diamond,anandbhoraskar/Diamond,Ssawa/Diamond,zoidbergwill/Diamond,stuartbfox/Diamond,Netuitive/netuitive-diamond,mzupan/Diamond,gg7/diamond,eMerzh/Diamond-1,rtoma/Diamond,mfriedenhagen/Diamond,python-diamond/Diamond,works-mobile/Diamond,dcsquared13/Diamond,h00dy/Diamond,dcsquared13/Diamond,Precis/Diamond,szibis/Diamond,actmd/Diamond,tusharmakkar08/Diamond,jriguera/Diamond,gg7/diamond,works-mobile/Diamond,gg7/diamond,jaingaurav/Diamond,ramjothikumar/Diamond,signalfx/Diamond,Basis/Diamond,cannium/Diamond,Ssawa/Diamond,joel-airspring/Diamond,works-mobile/Diamond,timchenxiaoyu/Diamond,russss/Diamond,Nihn/Diamond-1,tusharmakkar08/Diamond,python-diamond/Diamond,TAKEALOT/Diamond,skbkontur/Diamond,Netuitive/netuitive-diamond,MichaelDoyle/Diamond,russss/Diamond,bmhatfield/Diamond,tuenti/Diamond,skbkontur/Diamond,skbkontur/Diamond,Precis/Diamond,Nihn/Diamond-1,python-diamond/Diamond,actmd/Diamond,szibis/Diamond,Basis/Diamond,timchenxiaoyu/Diamond,h00dy/Diamond,Ensighten/Diamond,cannium/Diamond,TAKEALOT/Diamond,tusharmakkar08/Diamond,eMerzh/Diamond-1,hvnsweeting/Diamond,actmd/Diamond,jumping/Diamond,codepython/Diamond,Clever/Diamond,jaingaurav/Diamond,mzupan/Diamond,Ensighten/Diamond,cannium/Diamond,bmhatfield/Diamond,Ormod/Diamond,Ensighten/Diamond,Netuitive/netuitive-diamond,eMerzh/Diamond-1,Nihn/Diamond-1,Ormod/Diamond,rtoma/Diamond,EzyInsights/Diamond,h00dy/Diamond,acquia/Diamond,stuartbfox/Diamond,tuenti/Diamond,Netuitive/Diamond,dcsquared13/Diamond,Slach/Diamond,signalfx/Diamond,Nihn/Diamond-1,MichaelDoyle/Diamond,Basis/Diamond,jaingaurav/Diamond,jumping/Diamond,jriguera/Diamond,timchenxiaoyu/Diamond,joel-airspring/Diamond,Clever/Diamond,Basis/Diamond,codepython/Diamond,tuenti/Diamond,Ssawa/Diamond,mfriedenhagen/Diamond,anandbhoraskar/Diamond,tuenti/Diamond,cannium/Diamond,joel-airspring/Diamond,russss/Diamond,anandbhoraskar/Diamond,Ormod/Diamond,stuartbfox/Diamond,signalfx/Diamond,Netuitive/Diamond,Slach/Diamond,zoidbergwill/Diamond,jumping/Diamond,stuartbfox/Diamond,mfriedenhagen/Diamond,zoidbergwill/Diamond,Precis/Diamond,MichaelDoyle/Diamond,joel-airspring/Diamond,EzyInsights/Diamond,tusharmakkar08/Diamond,mfriedenhagen/Diamond,szibis/Diamond,hamelg/Diamond,acquia/Diamond,codepython/Diamond,gg7/diamond,ramjothikumar/Diamond,skbkontur/Diamond,anandbhoraskar/Diamond,signalfx/Diamond,jriguera/Diamond,hvnsweeting/Diamond,Ormod/Diamond,acquia/Diamond,jriguera/Diamond,Ensighten/Diamond,jumping/Diamond,bmhatfield/Diamond,Netuitive/netuitive-diamond,EzyInsights/Diamond,TAKEALOT/Diamond,dcsquared13/Diamond,szibis/Diamond,hvnsweeting/Diamond,socialwareinc/Diamond,h00dy/Diamond,rtoma/Diamond,Clever/Diamond,bmhatfield/Diamond,socialwareinc/Diamond,ramjothikumar/Diamond,Ssawa/Diamond,Slach/Diamond,Netuitive/Diamond,mzupan/Diamond,hamelg/Diamond,socialwareinc/Diamond,Netuitive/Diamond,hamelg/Diamond,mzupan/Diamond,EzyInsights/Diamond,hamelg/Diamond,hvnsweeting/Diamond,Slach/Diamond,works-mobile/Diamond,codepython/Diamond,Precis/Diamond,eMerzh/Diamond-1 | src/collectors/portstat/portstat.py | src/collectors/portstat/portstat.py | """
The PortStatCollector collects metrics about ports listed in config file.
##### Dependencies
* psutil
"""
from collections import Counter
import psutil
import diamond.collector
class PortStatCollector(diamond.collector.Collector):
def __init__(self, *args, **kwargs):
super(PortStatCollector, self).__init__(*args, **kwargs)
self.ports = {}
for port_name, cfg in self.config['port'].items():
port_cfg = {}
for key in ('number',):
port_cfg[key] = cfg.get(key, [])
self.ports[port_name] = port_cfg
def get_default_config_help(self):
config_help = super(PortStatCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
config = super(PortStatCollector, self).get_default_config()
config.update({
'path': 'port',
'port': {},
})
return config
@staticmethod
def get_port_stats(port):
"""
Iterate over connections and count states for specified port
:param port: port for which stats are collected
:return: Counter with port states
"""
cnts = Counter()
for c in psutil.net_connections():
c_port = c.laddr[1]
if c_port != port:
continue
status = c.status.lower()
cnts[status] += 1
return cnts
def collect(self):
"""
Overrides the Collector.collect method
"""
for port_name, port_cfg in self.ports.iteritems():
port = int(port_cfg['number'])
stats = PortStatCollector.get_port_stats(port)
for stat_name, stat_value in stats.iteritems():
metric_name = '%s.%s' % (port_name, stat_name)
self.publish(metric_name, stat_value)
| """
The PortStatCollector collects metrics about ports listed in config file.
##### Dependencies
* psutil
"""
from collections import Counter
import psutil
import diamond.collector
class PortStatCollector(diamond.collector.Collector):
def __init__(self, *args, **kwargs):
super(PortStatCollector, self).__init__(*args, **kwargs)
self.ports = {}
for port_name, cfg in self.config['port'].items():
port_cfg = {}
for key in ('number',):
port_cfg[key] = cfg.get(key, [])
self.ports[port_name] = port_cfg
def get_default_config_help(self):
config_help = super(PortStatCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
config = super(PortStatCollector, self).get_default_config()
config.update({
'path': 'port',
'port': {},
})
return config
@staticmethod
def get_port_stats(port):
"""
Iterate over connections and count states for specified port
:param port: port for which stats are collected
:return: Counter with port states
"""
cnts = Counter()
for c in psutil.net_connections():
c_port = c.laddr[1]
if c_port != port:
continue
status = c.status.lower()
cnts[status] += 1
return cnts
def collect(self):
"""
Overrides the Collector.collect method
"""
for port_name, port_cfg in self.ports.iteritems():
port = int(port_cfg['number'])
stats = self.get_port_stats(port)
for stat_name, stat_value in stats.iteritems():
metric_name = '%s.%s' % (port_name, stat_name)
self.publish(metric_name, stat_value)
| mit | Python |
0744dba6a52c42dbe6f9ba360e5311a1f90c3550 | Fix python 3 compatibility issue in DNSimple driver. | Kami/libcloud,Kami/libcloud,curoverse/libcloud,carletes/libcloud,t-tran/libcloud,jimbobhickville/libcloud,mgogoulos/libcloud,ZuluPro/libcloud,sahildua2305/libcloud,ByteInternet/libcloud,pquentin/libcloud,mistio/libcloud,samuelchong/libcloud,vongazman/libcloud,pquentin/libcloud,mistio/libcloud,t-tran/libcloud,wrigri/libcloud,techhat/libcloud,illfelder/libcloud,mathspace/libcloud,andrewsomething/libcloud,vongazman/libcloud,carletes/libcloud,Scalr/libcloud,iPlantCollaborativeOpenSource/libcloud,erjohnso/libcloud,niteoweb/libcloud,illfelder/libcloud,vongazman/libcloud,supertom/libcloud,watermelo/libcloud,Scalr/libcloud,DimensionDataCBUSydney/libcloud,mgogoulos/libcloud,mbrukman/libcloud,atsaki/libcloud,illfelder/libcloud,iPlantCollaborativeOpenSource/libcloud,iPlantCollaborativeOpenSource/libcloud,DimensionDataCBUSydney/libcloud,Scalr/libcloud,t-tran/libcloud,aleGpereira/libcloud,atsaki/libcloud,StackPointCloud/libcloud,supertom/libcloud,andrewsomething/libcloud,cryptickp/libcloud,DimensionDataCBUSydney/libcloud,watermelo/libcloud,wuyuewen/libcloud,Kami/libcloud,niteoweb/libcloud,mathspace/libcloud,techhat/libcloud,wido/libcloud,samuelchong/libcloud,StackPointCloud/libcloud,mgogoulos/libcloud,wido/libcloud,mistio/libcloud,SecurityCompass/libcloud,apache/libcloud,lochiiconnectivity/libcloud,techhat/libcloud,mbrukman/libcloud,apache/libcloud,StackPointCloud/libcloud,lochiiconnectivity/libcloud,cryptickp/libcloud,wuyuewen/libcloud,samuelchong/libcloud,carletes/libcloud,ByteInternet/libcloud,aleGpereira/libcloud,mbrukman/libcloud,SecurityCompass/libcloud,erjohnso/libcloud,curoverse/libcloud,jimbobhickville/libcloud,NexusIS/libcloud,ZuluPro/libcloud,andrewsomething/libcloud,sahildua2305/libcloud,niteoweb/libcloud,wuyuewen/libcloud,ZuluPro/libcloud,atsaki/libcloud,erjohnso/libcloud,curoverse/libcloud,supertom/libcloud,ByteInternet/libcloud,lochiiconnectivity/libcloud,wido/libcloud,aleGpereira/libcloud,apache/libcloud,cryptickp/libcloud,wrigri/libcloud,sahildua2305/libcloud,wrigri/libcloud,NexusIS/libcloud,NexusIS/libcloud,jimbobhickville/libcloud,SecurityCompass/libcloud,pquentin/libcloud,mathspace/libcloud,watermelo/libcloud | libcloud/common/dnsimple.py | libcloud/common/dnsimple.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.utils.py3 import httplib
from libcloud.common.base import ConnectionUserAndKey
from libcloud.common.base import JsonResponse
class DNSimpleDNSResponse(JsonResponse):
def success(self):
"""
Determine if our request was successful.
The meaning of this can be arbitrary; did we receive OK status? Did
the node get created? Were we authenticated?
:rtype: ``bool``
:return: ``True`` or ``False``
"""
# response.success() only checks for 200 and 201 codes. Should we
# add 204?
return self.status in [httplib.OK, httplib.CREATED, httplib.NO_CONTENT]
class DNSimpleDNSConnection(ConnectionUserAndKey):
host = 'api.dnsimple.com'
responseCls = DNSimpleDNSResponse
def add_default_headers(self, headers):
"""
Add headers that are necessary for every request
This method adds ``token`` to the request.
"""
# TODO: fijarse sobre que info se paso como parametro y en base
# a esto, fijar el header
headers['X-DNSimple-Token'] = '%s:%s' % (self.user_id, self.key)
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
return headers
| # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
from libcloud.common.base import ConnectionUserAndKey
from libcloud.common.base import JsonResponse
class DNSimpleDNSResponse(JsonResponse):
def success(self):
"""
Determine if our request was successful.
The meaning of this can be arbitrary; did we receive OK status? Did
the node get created? Were we authenticated?
:rtype: ``bool``
:return: ``True`` or ``False``
"""
# response.success() only checks for 200 and 201 codes. Should we
# add 204?
return self.status in [httplib.OK, httplib.CREATED, httplib.NO_CONTENT]
class DNSimpleDNSConnection(ConnectionUserAndKey):
host = 'api.dnsimple.com'
responseCls = DNSimpleDNSResponse
def add_default_headers(self, headers):
"""
Add headers that are necessary for every request
This method adds ``token`` to the request.
"""
# TODO: fijarse sobre que info se paso como parametro y en base
# a esto, fijar el header
headers['X-DNSimple-Token'] = '%s:%s' % (self.user_id, self.key)
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
return headers
| apache-2.0 | Python |
725b246a0bbb437a5a0efeb16b58d3942f3b14cc | Update the example client. | flowroute/txjason | examples/client.py | examples/client.py | from twisted.internet import defer, endpoints, task
from txjason.netstring import JSONRPCClientFactory
from txjason.client import JSONRPCClientError
client = JSONRPCClientFactory('127.0.0.1', 7080)
@defer.inlineCallbacks
def main(reactor, description):
endpoint = endpoints.clientFromString(reactor, description)
client = JSONRPCClientFactory(endpoint)
try:
r = yield client.callRemote('bar.foo')
except JSONRPCClientError as e:
print e
r = yield client.callRemote('bar.add', 1, 2)
print "add result: %s" % str(r)
r = yield client.callRemote('bar.whoami')
print "whaomi result: %s" % str(r)
task.react(main, ['tcp:127.0.0.1:7080'])
| from twisted.internet import reactor, defer
from txjason.netstring import JSONRPCClientFactory
from txjason.client import JSONRPCClientError
client = JSONRPCClientFactory('127.0.0.1', 7080)
@defer.inlineCallbacks
def stuff():
try:
r = yield client.callRemote('bar.foo')
except JSONRPCClientError as e:
print e
r = yield client.callRemote('bar.add', 1, 2)
print "add result: %s" % str(r)
r = yield client.callRemote('bar.whoami')
print "whaomi result: %s" % str(r)
reactor.callWhenRunning(stuff)
reactor.run()
| mit | Python |
5dddadb98340fec6afda80fd1a8ee1eda907b60a | print exports to terminal | willmcgugan/rich | examples/export.py | examples/export.py | """
Demonstrates export console output
"""
from rich.console import Console
from rich.table import Table
console = Console(record=True)
def print_table():
table = Table(title="Star Wars Movies")
table.add_column("Released", style="cyan", no_wrap=True)
table.add_column("Title", style="magenta")
table.add_column("Box Office", justify="right", style="green")
table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690")
table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889")
table.add_row("Dec 16, 2016", "Rogue One: A Star Wars Story", "$1,332,439,889")
console.print(table)
# Prints table
print_table()
# Get console output as text
file1 = "table_export_plaintext.txt"
text = console.export_text()
with open(file1, "w") as file:
file.write(text)
print(f"Exported console output as plain text to {file1}")
# Calling print_table again because console output buffer
# is flushed once export function is called
print_table()
# Get console output as html
# use clear=False so output is not flushed after export
file2 = "table_export_html.html"
html = console.export_html(clear=False)
with open(file2, "w") as file:
file.write(html)
print(f"Exported console output as html to {file2}")
# Export text output to table_export.txt
file3 = "table_export_plaintext2.txt"
console.save_text(file3, clear=False)
print(f"Exported console output as plain text to {file3}")
# Export html output to table_export.html
file4 = "table_export_html2.html"
console.save_html(file4)
print(f"Exported console output as html to {file4}")
| """
Demonstrates export console output
"""
from rich.console import Console
from rich.table import Table
console = Console(record=True)
def print_table():
table = Table(title="Star Wars Movies")
table.add_column("Released", style="cyan", no_wrap=True)
table.add_column("Title", style="magenta")
table.add_column("Box Office", justify="right", style="green")
table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690")
table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889")
table.add_row("Dec 16, 2016", "Rogue One: A Star Wars Story", "$1,332,439,889")
console.print(table, justify="center")
# Prints table
print_table()
# Get console output as text
text = console.export_text()
with open("plaintext_export.txt", "w") as file:
file.write(text)
# Calling print_table again because console output buffer
# is flushed once export function is called
print_table()
# Get console output as html
# use clear=False so output is not flushed after export
html = console.export_html(clear=False)
with open("html_export.html", "w") as file:
file.write(html)
# Export text output to table_export.txt
console.save_text("rich_export.txt", clear=False)
# Export html output to table_export.html
console.save_html("rich_export.html")
| mit | Python |
1741c7258ebdcef412442cebab33409290496df0 | Add network example | ktkirk/HSSI,ktkirk/HSSI,ktkirk/HSSI | IoT/iot_utils.py | IoT/iot_utils.py | from __future__ import print_function
import sys, signal, atexit
import json
__author__ = 'KT Kirk'
__all__ = ['keys', 'atexit', 'signal']
## Exit handlers ##
# This function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit, including functions from myUVSensor
def exitHandler():
print("Exiting")
try:
sys.exit(0)
except KeyError:
pass
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# Load data.sparkfun.com keys file
with open("keys_n1YRX98dq9C6X0LrZdvD.json") as json_file:
keys = json.load(json_file)
| from __future__ import print_function
import sys, signal, atexit
import json
__author__ = 'KT Kirk'
__all__ = ['keys', 'atexit', 'signal']
## Exit handlers ##
# This function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit, including functions from myUVSensor
def exitHandler():
print("Exiting")
try:
sys.exit(0)
except KeyError:
pass
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# Load data.sparkfun.com keys file
with open("keys_n1YRX98dq9C6X0LrZdvD.json") as json_file:
keys = json.load(json_file) | bsd-2-clause | Python |
b07243a6fb11dbbd487ba37620f7c8f4fc89449a | bump version to v1.10.5 | simomarsili/ndd | ndd/package.py | ndd/package.py | # -*- coding: utf-8 -*-
"""Template package file"""
__title__ = 'ndd'
__version__ = '1.10.5'
__author__ = 'Simone Marsili'
__summary__ = ''
__url__ = 'https://github.com/simomarsili/ndd'
__email__ = 'simo.marsili@gmail.com'
__license__ = 'BSD 3-Clause'
__copyright__ = 'Copyright (c) 2020, Simone Marsili'
__classifiers__ = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
| # -*- coding: utf-8 -*-
"""Template package file"""
__title__ = 'ndd'
__version__ = '1.10.4'
__author__ = 'Simone Marsili'
__summary__ = ''
__url__ = 'https://github.com/simomarsili/ndd'
__email__ = 'simo.marsili@gmail.com'
__license__ = 'BSD 3-Clause'
__copyright__ = 'Copyright (c) 2020, Simone Marsili'
__classifiers__ = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
| bsd-3-clause | Python |
5848a9c64744eacf8d90a86335e948ed17ef8346 | Correct path to workflows | ASaiM/framework,ASaiM/framework | src/prepare_asaim/import_workflows.py | src/prepare_asaim/import_workflows.py | #!/usr/bin/env python
import os
from bioblend import galaxy
admin_email = os.environ.get('GALAXY_DEFAULT_ADMIN_USER', 'admin@galaxy.org')
admin_pass = os.environ.get('GALAXY_DEFAULT_ADMIN_PASSWORD', 'admin')
url = "http://localhost:8080"
gi = galaxy.GalaxyInstance(url=url, email=admin_email, password=admin_pass)
wf = galaxy.workflows.WorkflowClient(gi)
wf.import_workflow_from_local_path('asaim_main_workflow.ga')
wf.import_workflow_from_local_path('asaim_taxonomic_result_comparative_analysis.ga')
wf.import_workflow_from_local_path('asaim_functional_result_comparative_analysis.ga')
wf.import_workflow_from_local_path('asaim_go_slim_terms_comparative_analysis.ga')
wf.import_workflow_from_local_path('asaim_taxonomically_related_functional_result_comparative_analysis.ga') | #!/usr/bin/env python
import os
from bioblend import galaxy
admin_email = os.environ.get('GALAXY_DEFAULT_ADMIN_USER', 'admin@galaxy.org')
admin_pass = os.environ.get('GALAXY_DEFAULT_ADMIN_PASSWORD', 'admin')
url = "http://localhost:8080"
gi = galaxy.GalaxyInstance(url=url, email=admin_email, password=admin_pass)
wf = galaxy.workflows.WorkflowClient(gi)
wf.import_workflow_from_local_path('/home/galaxy/asaim_main_workflow.ga')
wf.import_workflow_from_local_path('/home/galaxy/asaim_taxonomic_result_comparative_analysis.ga')
wf.import_workflow_from_local_path('/home/galaxy/asaim_functional_result_comparative_analysis.ga')
wf.import_workflow_from_local_path('/home/galaxy/asaim_go_slim_terms_comparative_analysis.ga')
wf.import_workflow_from_local_path('/home/galaxy/asaim_taxonomically_related_functional_result_comparative_analysis.ga') | apache-2.0 | Python |
0d31cbfd3042a1e7255ed833715112504fe608ae | Revert types | daeyun/dshinpy,daeyun/dshinpy | dshin/nn/types.py | dshin/nn/types.py | """
TensorFlow type annotation aliases.
"""
import typing
import tensorflow as tf
Value = typing.Union[tf.Variable, tf.Tensor]
Values = typing.Sequence[Value]
Named = typing.Union[tf.Variable, tf.Tensor, tf.Operation]
NamedSeq = typing.Sequence[Named]
Tensors = typing.Sequence[tf.Tensor]
Variables = typing.Sequence[tf.Variable]
Operations = typing.Sequence[tf.Operation]
| """
TensorFlow type annotation aliases.
"""
import typing
import tensorflow as tf
Value = (tf.Variable, tf.Tensor)
Values = typing.Sequence[Value]
Named = (tf.Variable, tf.Tensor, tf.Operation)
NamedSeq = typing.Sequence[Named]
Tensors = typing.Sequence[tf.Tensor]
Variables = typing.Sequence[tf.Variable]
Operations = typing.Sequence[tf.Operation]
| mpl-2.0 | Python |
8a3ae1b809d886f647f13574cc9b416b17c27b7c | Remove VERSION variable from api.py | ivankliuk/duckduckpy | duckduckpy/api.py | duckduckpy/api.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __init__ import __version__
from collections import namedtuple
from duckduckpy.utils import camel_to_snake_case
SERVER_HOST = 'api.duckduckgo.com'
USER_AGENT = 'duckduckpy {0}'.format(__version__)
ICON_KEYS = set(['URL', 'Width', 'Height'])
RESULT_KEYS = set(['FirstURL', 'Icon', 'Result', 'Text'])
CONTENT_KEYS = set(['data_type', 'label', 'sort_order', 'value', 'wiki_order'])
META_KEYS = set(['data_type', 'label', 'value'])
INFOBOX_KEYS = set(['content', 'meta'])
RESPONSE_KEYS = set([
'Redirect', 'Definition', 'ImageWidth', 'Infobox', 'RelatedTopics',
'ImageHeight', 'Heading', 'Answer', 'AbstractText', 'Type', 'ImageIsLogo',
'DefinitionSource', 'AbstractURL', 'Abstract', 'DefinitionURL', 'Results',
'Entity', 'AnswerType', 'AbstractSource', 'Image'])
camel_to_snake_case_set = lambda seq: set(map(camel_to_snake_case, seq))
Icon = namedtuple('Icon', camel_to_snake_case_set(ICON_KEYS))
Result = namedtuple('Result', camel_to_snake_case_set(RESULT_KEYS))
Content = namedtuple('Content', camel_to_snake_case_set(CONTENT_KEYS))
Meta = namedtuple('Meta', camel_to_snake_case_set(META_KEYS))
Infobox = namedtuple('Infobox', camel_to_snake_case_set(INFOBOX_KEYS))
Response = namedtuple('Response', camel_to_snake_case_set(RESPONSE_KEYS))
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import namedtuple
from utils import camel_to_snake_case
SERVER_HOST = 'api.duckduckgo.com'
VERSION = '0.1-alpha'
USER_AGENT = 'duckduckpy {0}'.format(VERSION)
ICON_KEYS = set(['URL', 'Width', 'Height'])
RESULT_KEYS = set(['FirstURL', 'Icon', 'Result', 'Text'])
CONTENT_KEYS = set(['data_type', 'label', 'sort_order', 'value', 'wiki_order'])
META_KEYS = set(['data_type', 'label', 'value'])
INFOBOX_KEYS = set(['content', 'meta'])
RESPONSE_KEYS = set([
'Redirect', 'Definition', 'ImageWidth', 'Infobox', 'RelatedTopics',
'ImageHeight', 'Heading', 'Answer', 'AbstractText', 'Type', 'ImageIsLogo',
'DefinitionSource', 'AbstractURL', 'Abstract', 'DefinitionURL', 'Results',
'Entity', 'AnswerType', 'AbstractSource', 'Image'])
camel_to_snake_case_set = lambda seq: set(map(camel_to_snake_case, seq))
Icon = namedtuple('Icon', camel_to_snake_case_set(ICON_KEYS))
Result = namedtuple('Result', camel_to_snake_case_set(RESULT_KEYS))
Content = namedtuple('Content', camel_to_snake_case_set(CONTENT_KEYS))
Meta = namedtuple('Meta', camel_to_snake_case_set(META_KEYS))
Infobox = namedtuple('Infobox', camel_to_snake_case_set(INFOBOX_KEYS))
Response = namedtuple('Response', camel_to_snake_case_set(RESPONSE_KEYS))
| mit | Python |
70f588282e1777945e113e73dbca83f77355f0f9 | Test git permission | IEEERobotics/bot,IEEERobotics/bot,deepakiam/bot,deepakiam/bot,IEEERobotics/bot,deepakiam/bot | driver/omni_driver.py | driver/omni_driver.py | import driver
import lib.lib as lib
from hardware.dmcc_motor import DMCCMotorSet
class OmniDriver(driver.Driver):
#Vijay was here
#Chad was here | import driver
import lib.lib as lib
from hardware.dmcc_motor import DMCCMotorSet
class OmniDriver(driver.Driver):
#Vijay was here
| bsd-2-clause | Python |
a3f12245163a9165f45f4ee97b6e4e67cdd29783 | Update decipher.py | khemritolya/decipher.py | decipher.py | decipher.py | #
# decipher.py (c) Luis Hoderlein
#
# BUILT: Apr 21, 2016
#
# This program can brute force Cesarian ciphers
# It gives you all possible outputs, meaning you still have to chose the output you want
#
# imports
import string
# adds padding to make output inline
def pad(num):
if num < 10:
return "0"+str(num)
else:
return str(num)
# declare vars + ask for input
raw_txt = raw_input("Enter ciphertext: ")
raw_int = []
txt = ""
spaces = []
# make all lower case (necessary)
raw_txt = raw_txt.lower()
# log spaces + remove them
for i in range(0, len(raw_txt)):
if raw_txt[i] != " ":
txt = txt + raw_txt[i]
else:
spaces.append(i);
# turn chars into ints
for i in range(0, len(txt)):
raw_int.append(string.lowercase.index(txt[i]))
# loop through every possible solution (26 of them), using i has cipher number
# and print all possible solution + add the spaces again
# to prevent some weird bug, possible int has to be reassigned every time
for i in range(0, 26):
possible_int = []
for j in range(0, len(raw_int)):
possible_int.append(raw_int[j])
possible_txt = ""
for j in range(0, len(possible_int)):
possible_int[j] = possible_int[j]+i
if possible_int[j] >= 26:
possible_int[j] = possible_int[j] - 26
possible_txt = possible_txt + string.lowercase[possible_int[j]]
del possible_int
for j in range(0, len(spaces)):
possible_txt = possible_txt[:spaces[j]] + " " +possible_txt[spaces[j]:]
print "Solution "+pad(i)+" is: "+possible_txt
| #
# decipher.py (c) Luis Hoderlein
#
# BUILT: Apr 21, 2016
#
# This program can brute force Cesarian ciphers
# It gives you all possible outputs, meaning you still have to chose the output you want
#
import string
def pad(num):
if num < 10:
return "0"+str(num)
else:
return str(num)
raw_txt = raw_input("Enter ciphertext: ")
raw_int = []
txt = ""
spaces = []
raw_txt = raw_txt.lower()
for i in range(0, len(raw_txt)):
if raw_txt[i] != " ":
txt = txt + raw_txt[i]
else:
spaces.append(i);
for i in range(0, len(txt)):
raw_int.append(string.lowercase.index(txt[i]))
for i in range(0, 26):
possible_int = []
for j in range(0, len(raw_int)):
possible_int.append(raw_int[j])
possible_txt = ""
for j in range(0, len(possible_int)):
possible_int[j] = possible_int[j]+i
if possible_int[j] >= 26:
possible_int[j] = possible_int[j] - 26
possible_txt = possible_txt + string.lowercase[possible_int[j]]
del possible_int
for j in range(0, len(spaces)):
possible_txt = possible_txt[:spaces[j]] + " " +possible_txt[spaces[j]:]
print "Solution "+pad(i)+" is "+possible_txt
| apache-2.0 | Python |
f421b2997494ca546c6479e4246456e56b816e60 | Add Robert EVT ID too | pebble/libpebble2 | libpebble2/util/hardware.py | libpebble2/util/hardware.py | __author__ = 'katharine'
class PebbleHardware(object):
UNKNOWN = 0
TINTIN_EV1 = 1
TINTIN_EV2 = 2
TINTIN_EV2_3 = 3
TINTIN_EV2_4 = 4
TINTIN_V1_5 = 5
BIANCA = 6
SNOWY_EVT2 = 7
SNOWY_DVT = 8
SPALDING_EVT = 9
BOBBY_SMILES = 10
SPALDING = 11
SILK_EVT = 12
ROBERT_EVT = 13
SILK = 14
TINTIN_BB = 0xFF
TINTIN_BB2 = 0xFE
SNOWY_BB = 0xFD
SNOWY_BB2 = 0xFC
SPALDING_BB2 = 0xFB
SILK_BB = 0xFA
ROBERT_BB = 0xF9
SILK_BB2 = 0xF8
PLATFORMS = {
UNKNOWN: 'unknown',
TINTIN_EV1: 'aplite',
TINTIN_EV2: 'aplite',
TINTIN_EV2_3: 'aplite',
TINTIN_EV2_4: 'aplite',
TINTIN_V1_5: 'aplite',
BIANCA: 'aplite',
SNOWY_EVT2: 'basalt',
SNOWY_DVT: 'basalt',
BOBBY_SMILES: 'basalt',
SPALDING_EVT: 'chalk',
SPALDING: 'chalk',
SILK_EVT: 'diorite',
SILK: 'diorite',
TINTIN_BB: 'aplite',
TINTIN_BB2: 'aplite',
SNOWY_BB: 'basalt',
SNOWY_BB2: 'basalt',
SPALDING_BB2: 'chalk',
SILK_BB: 'diorite',
ROBERT_BB: 'emery',
SILK_BB2: 'diorite',
}
@classmethod
def hardware_platform(cls, hardware):
return cls.PLATFORMS.get(hardware, 'unknown')
| __author__ = 'katharine'
class PebbleHardware(object):
UNKNOWN = 0
TINTIN_EV1 = 1
TINTIN_EV2 = 2
TINTIN_EV2_3 = 3
TINTIN_EV2_4 = 4
TINTIN_V1_5 = 5
BIANCA = 6
SNOWY_EVT2 = 7
SNOWY_DVT = 8
SPALDING_EVT = 9
BOBBY_SMILES = 10
SPALDING = 11
SILK_EVT = 12
SILK = 14
TINTIN_BB = 0xFF
TINTIN_BB2 = 0xFE
SNOWY_BB = 0xFD
SNOWY_BB2 = 0xFC
SPALDING_BB2 = 0xFB
SILK_BB = 0xFA
ROBERT_BB = 0xF9
SILK_BB2 = 0xF8
PLATFORMS = {
UNKNOWN: 'unknown',
TINTIN_EV1: 'aplite',
TINTIN_EV2: 'aplite',
TINTIN_EV2_3: 'aplite',
TINTIN_EV2_4: 'aplite',
TINTIN_V1_5: 'aplite',
BIANCA: 'aplite',
SNOWY_EVT2: 'basalt',
SNOWY_DVT: 'basalt',
BOBBY_SMILES: 'basalt',
SPALDING_EVT: 'chalk',
SPALDING: 'chalk',
SILK_EVT: 'diorite',
SILK: 'diorite',
TINTIN_BB: 'aplite',
TINTIN_BB2: 'aplite',
SNOWY_BB: 'basalt',
SNOWY_BB2: 'basalt',
SPALDING_BB2: 'chalk',
SILK_BB: 'diorite',
ROBERT_BB: 'emery',
SILK_BB2: 'diorite',
}
@classmethod
def hardware_platform(cls, hardware):
return cls.PLATFORMS.get(hardware, 'unknown')
| mit | Python |
d9af336506fcca40cbc5ebf337268cfd16459c4f | Use iter_log in example. | jelmer/subvertpy,jelmer/subvertpy | examples/ra_log.py | examples/ra_log.py | #!/usr/bin/python
# Demonstrates how to iterate over the log of a Subversion repository.
from subvertpy.ra import RemoteAccess
conn = RemoteAccess("svn://svn.samba.org/subvertpy/trunk")
for (changed_paths, rev, revprops, has_children) in conn.iter_log(paths=None,
start=0, end=conn.get_latest_revnum(), discover_changed_paths=True):
print "=" * 79
print "%d:" % rev
print "Revision properties:"
for entry in revprops.items():
print " %s: %s" % entry
print ""
print "Changed paths"
for path, (action, from_path, from_rev) in changed_paths.iteritems():
print " %s (%s)" % (path, action)
| #!/usr/bin/python
# Demonstrates how to iterate over the log of a Subversion repository.
from subvertpy.ra import RemoteAccess
conn = RemoteAccess("svn://svn.gnome.org/svn/gnome-specimen/trunk")
def cb(changed_paths, rev, revprops, has_children=None):
print "=" * 79
print "%d:" % rev
print "Revision properties:"
for entry in revprops.items():
print " %s: %s" % entry
print ""
print "Changed paths"
for path, (action, from_path, from_rev) in changed_paths.iteritems():
print " %s (%s)" % (path, action)
conn.get_log(callback=cb, paths=None, start=0, end=conn.get_latest_revnum(),
discover_changed_paths=True)
| lgpl-2.1 | Python |
1d0d28ebdda25a7dc579857063d47c5042e6c02b | Enable south for the docs site. | alawnchen/djangoproject.com,xavierdutreilh/djangoproject.com,gnarf/djangoproject.com,khkaminska/djangoproject.com,rmoorman/djangoproject.com,relekang/djangoproject.com,hassanabidpk/djangoproject.com,django/djangoproject.com,alawnchen/djangoproject.com,xavierdutreilh/djangoproject.com,rmoorman/djangoproject.com,relekang/djangoproject.com,alawnchen/djangoproject.com,relekang/djangoproject.com,nanuxbe/django,alawnchen/djangoproject.com,django/djangoproject.com,hassanabidpk/djangoproject.com,gnarf/djangoproject.com,django/djangoproject.com,django/djangoproject.com,rmoorman/djangoproject.com,khkaminska/djangoproject.com,vxvinh1511/djangoproject.com,gnarf/djangoproject.com,hassanabidpk/djangoproject.com,django/djangoproject.com,khkaminska/djangoproject.com,xavierdutreilh/djangoproject.com,nanuxbe/django,vxvinh1511/djangoproject.com,relekang/djangoproject.com,vxvinh1511/djangoproject.com,nanuxbe/django,gnarf/djangoproject.com,django/djangoproject.com,xavierdutreilh/djangoproject.com,rmoorman/djangoproject.com,nanuxbe/django,hassanabidpk/djangoproject.com,vxvinh1511/djangoproject.com,khkaminska/djangoproject.com | django_docs/settings.py | django_docs/settings.py | # Settings for docs.djangoproject.com
from django_www.common_settings import *
### Django settings
CACHE_MIDDLEWARE_KEY_PREFIX = 'djangodocs'
INSTALLED_APPS = [
'django.contrib.sitemaps',
'django.contrib.sites',
'django.contrib.staticfiles',
'djangosecure',
'haystack',
'south',
'docs',
]
MIDDLEWARE_CLASSES = [
'django.middleware.cache.UpdateCacheMiddleware',
'djangosecure.middleware.SecurityMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
TEMPLATE_CONTEXT_PROCESSORS = [
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'docs.context_processors.recent_release',
'django.core.context_processors.request',
]
ROOT_URLCONF = 'django_docs.urls'
SITE_ID = 2
### Docs settings
if PRODUCTION:
DOCS_BUILD_ROOT = BASE.parent.child('docbuilds')
else:
DOCS_BUILD_ROOT = '/tmp/djangodocs'
### Haystack settings
HAYSTACK_SITECONF = 'docs.search_sites'
if PRODUCTION:
HAYSTACK_SEARCH_ENGINE = 'xapian'
HAYSTACK_XAPIAN_PATH = BASE.parent.child('djangodocs.index')
else:
HAYSTACK_SEARCH_ENGINE = 'whoosh'
HAYSTACK_WHOOSH_PATH = '/tmp/djangodocs.index'
### South settings
SOUTH_TESTS_MIGRATE = False
### Enable optional components
if DEBUG:
try:
import debug_toolbar
except ImportError:
pass
else:
INSTALLED_APPS.append('debug_toolbar')
INTERNAL_IPS = ['127.0.0.1']
MIDDLEWARE_CLASSES.insert(
MIDDLEWARE_CLASSES.index('django.middleware.common.CommonMiddleware') + 1,
'debug_toolbar.middleware.DebugToolbarMiddleware')
# Log errors to Sentry instead of email, if available.
if 'sentry_dsn' in SECRETS:
INSTALLED_APPS.append('raven.contrib.django')
SENTRY_DSN = SECRETS['sentry_dsn']
LOGGING["loggers"]["django.request"]["handlers"].remove("mail_admins")
| # Settings for docs.djangoproject.com
from django_www.common_settings import *
### Django settings
CACHE_MIDDLEWARE_KEY_PREFIX = 'djangodocs'
INSTALLED_APPS = [
'django.contrib.sitemaps',
'django.contrib.sites',
'django.contrib.staticfiles',
'djangosecure',
'haystack',
'docs',
]
MIDDLEWARE_CLASSES = [
'django.middleware.cache.UpdateCacheMiddleware',
'djangosecure.middleware.SecurityMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
TEMPLATE_CONTEXT_PROCESSORS = [
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'docs.context_processors.recent_release',
'django.core.context_processors.request',
]
ROOT_URLCONF = 'django_docs.urls'
SITE_ID = 2
### Docs settings
if PRODUCTION:
DOCS_BUILD_ROOT = BASE.parent.child('docbuilds')
else:
DOCS_BUILD_ROOT = '/tmp/djangodocs'
### Haystack settings
HAYSTACK_SITECONF = 'docs.search_sites'
if PRODUCTION:
HAYSTACK_SEARCH_ENGINE = 'xapian'
HAYSTACK_XAPIAN_PATH = BASE.parent.child('djangodocs.index')
else:
HAYSTACK_SEARCH_ENGINE = 'whoosh'
HAYSTACK_WHOOSH_PATH = '/tmp/djangodocs.index'
### Enable optional components
if DEBUG:
try:
import debug_toolbar
except ImportError:
pass
else:
INSTALLED_APPS.append('debug_toolbar')
INTERNAL_IPS = ['127.0.0.1']
MIDDLEWARE_CLASSES.insert(
MIDDLEWARE_CLASSES.index('django.middleware.common.CommonMiddleware') + 1,
'debug_toolbar.middleware.DebugToolbarMiddleware')
# Log errors to Sentry instead of email, if available.
if 'sentry_dsn' in SECRETS:
INSTALLED_APPS.append('raven.contrib.django')
SENTRY_DSN = SECRETS['sentry_dsn']
LOGGING["loggers"]["django.request"]["handlers"].remove("mail_admins")
| bsd-3-clause | Python |
3434c404d8ab3d42bed4756338f1b8dba3a10255 | split debug_plot into debug and plot | zutshi/S3CAMR,zutshi/S3CAMR,zutshi/S3CAMR | src/settings.py | src/settings.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
debug = False
debug_plot = False
plot = False
# CE hack is ON
CE = True
def plt_show():
from matplotlib import pyplot as plt
if debug_plot or (debug and plot):
plt.show()
else:
plt.close()
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
debug = False
debug_plot = False
plot = False
# CE hack is ON
CE = True
def plt_show():
from matplotlib import pyplot as plt
if debug_plot:
plt.show()
else:
plt.close()
| bsd-2-clause | Python |
00c14e981807668b09a5d6a2e71fe8872291acad | Add admin support for attachments | leifurhauks/django-mailbox,Shekharrajak/django-mailbox,coddingtonbear/django-mailbox,ad-m/django-mailbox | django_mailbox/admin.py | django_mailbox/admin.py | from django.conf import settings
from django.contrib import admin
from django_mailbox.models import MessageAttachment, Message, Mailbox
def get_new_mail(mailbox_admin, request, queryset):
for mailbox in queryset.all():
mailbox.get_new_mail()
get_new_mail.short_description = 'Get new mail'
class MailboxAdmin(admin.ModelAdmin):
list_display = (
'name',
'uri',
'from_email',
'active',
)
actions = [get_new_mail]
class MessageAttachmentAdmin(admin.ModelAdmin):
pass
class MessageAdmin(admin.ModelAdmin):
list_display = (
'subject',
'processed',
'mailbox',
'outgoing',
)
ordering = ['-processed']
list_filter = (
'mailbox',
'outgoing',
)
raw_id_fields = (
'in_reply_to',
)
if getattr(settings, 'DJANGO_MAILBOX_ADMIN_ENABLED', True):
admin.site.register(Message, MessageAdmin)
admin.site.register(MessageAttachmentAdmin, MessageAttachment)
admin.site.register(Mailbox, MailboxAdmin)
| from django.conf import settings
from django.contrib import admin
from django_mailbox.models import Message, Mailbox
def get_new_mail(mailbox_admin, request, queryset):
for mailbox in queryset.all():
mailbox.get_new_mail()
get_new_mail.short_description = 'Get new mail'
class MailboxAdmin(admin.ModelAdmin):
list_display = (
'name',
'uri',
'from_email',
'active',
)
actions = [get_new_mail]
class MessageAdmin(admin.ModelAdmin):
list_display = (
'subject',
'processed',
'mailbox',
'outgoing',
)
ordering = ['-processed']
list_filter = (
'mailbox',
'outgoing',
)
raw_id_fields = (
'in_reply_to',
)
if getattr(settings, 'DJANGO_MAILBOX_ADMIN_ENABLED', True):
admin.site.register(Message, MessageAdmin)
admin.site.register(Mailbox, MailboxAdmin)
| mit | Python |
48c880a35c899929da33f20e9cd4ee7e4fd8bc7e | Set a custom name template including the replica set | hudl/Tyr | servers/mongo/data.py | servers/mongo/data.py | from .. import Server
import logging
class MongoDataNode(Server):
log = logging.getLogger('Servers.MongoDataNode')
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s [%(name)s] %(levelname)s: %(message)s',
datefmt = '%H:%M:%S')
ch.setFormatter(formatter)
log.addHandler(ch)
def __init__(self, dry = None, verbose = None, size = None, cluster = None,
environment = None, ami = None, region = None, role = None,
keypair = None, availability_zone = None,
security_groups = None, block_devices = None,
replica_set = None, replica_set_index = None):
super(MongoDataNode, self).__init__(dry, verbose, size, cluster,
environment, ami, region, role,
keypair, availability_zone,
security_groups, block_devices)
self.replica_set = replica_set
self.replica_set_index = replica_set_index
def configure(self):
super(MongoDataNode, self).configure()
if self.replica_set is None:
self.log.warn('No replica set provided')
self.replica_set = 1
self.log.info('Using replica set {set}'.format(set=self.replica_set))
if self.replica_set_index is None:
self.log.warn('No replica set set index provided')
self.replica_set_index = 1
self.log.info('Using replica set index {index}'.format(
index=self.replica_set_index))
@property
def name(self):
try:
return self.unique_name
except Exception:
pass
template = '{envcl}-rs{set}-{zone}-{index}'
name = template.format(envcl=self.envcl, set=self.replica_set,
zone=self.availability_zone[-1:],
index=self.replica_set_index)
self.unique_name = name
self.log.info('Using node name {name}'.format(name=name))
return name
| from .. import Server
import logging
class MongoDataNode(Server):
log = logging.getLogger('Servers.MongoDataNode')
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s [%(name)s] %(levelname)s: %(message)s',
datefmt = '%H:%M:%S')
ch.setFormatter(formatter)
log.addHandler(ch)
def __init__(self, dry = None, verbose = None, size = None, cluster = None,
environment = None, ami = None, region = None, role = None,
keypair = None, availability_zone = None,
security_groups = None, block_devices = None,
replica_set = None, replica_set_index = None):
super(MongoDataNode, self).__init__(dry, verbose, size, cluster,
environment, ami, region, role,
keypair, availability_zone,
security_groups, block_devices)
self.replica_set = replica_set
self.replica_set_index = replica_set_index
def configure(self):
super(MongoDataNode, self).configure()
if self.replica_set is None:
self.log.warn('No replica set provided')
self.replica_set = 1
self.log.info('Using replica set {set}'.format(set=self.replica_set))
if self.replica_set_index is None:
self.log.warn('No replica set set index provided')
self.replica_set_index = 1
self.log.info('Using replica set index {index}'.format(
index=self.replica_set_index))
| unlicense | Python |
71289d3a22476001421454ff736ea03742e43158 | Add basic parser | praekelt/vumi-twilio-api | vumi_twilio_api/twilml_parser.py | vumi_twilio_api/twilml_parser.py | import xml.etree.ElementTree as ET
class Verb(object):
"""Represents a single verb in TwilML. """
def __init__(self, verb, attributes={}, nouns={}):
self.verb = verb
self.attributes = attributes
self.nouns = nouns
class TwilMLParseError(Exception):
"""Raised when trying to parse invalid TwilML"""
class TwilMLParser(object):
"""Parser for TwilML"""
def parse_xml(self, xml):
"""Parses TwilML and returns a list of :class:`Verb` objects"""
verbs = []
root = ET.fromstring(xml)
if root.tag != "Response":
raise TwilMLParseError(
"Invalid root %r. Should be 'Request'." % root.tag)
for child in root:
parser = getattr(
self, '_parse_%s' % child.tag, self._parse_default)
verbs.append(parser(child))
return verbs
def _parse_default(self, element):
raise TwilMLParseError("Unable to parse verb %r" % element.tag)
| class Verb(object):
"""Represents a single verb in TwilML. """
def __init__(self, verb, attributes={}, nouns={}):
self.verb = verb
self.attributes = attributes
self.nouns = nouns
| bsd-3-clause | Python |
a49095bf078603e046288629aa8497f031ed6bd3 | Add transpose_join, joins 2 infinite lists by transposing the next elements | muddyfish/PYKE,muddyfish/PYKE | node/divide.py | node/divide.py | #!/usr/bin/env python
from nodes import Node
from type.type_infinite_list import DummyList
class Divide(Node):
char = "/"
args = 2
results = 1
@Node.test_func([4, 2], [2])
@Node.test_func([2, 4], [0.5])
def func(self, a: Node.number, b: Node.number):
"""a/b. floating point division.
For integer division, see `f`"""
return a/b
@Node.test_func(["test", "t"], [2])
@Node.test_func([(3, 1, 2, 1, 3), 3], [2])
def count(self, a: Node.indexable, b):
"""a.count(b)"""
return a.count(b)
@Node.test_func([[4, 4, 2, 2, 9, 9], [1, 2, 3]], [[[4], [4, 2], [2, 9, 9]]])
def split_length(self, inp: Node.indexable, lengths: Node.sequence):
"""Split inp into sections length lengths"""
rtn = [[]]
cur_length = 0
for i in inp:
if cur_length != len(lengths) and len(rtn[-1]) == lengths[cur_length]:
cur_length += 1
rtn.append([])
rtn[-1].append(i)
return [rtn]
def time_int_div(self, a: Node.clock, b: Node.number):
return a.divide_int(b)
def time_int_div_2(self, a: Node.number, b: Node.clock):
return b.divide_int(a)
def time_div(self, a: Node.clock, b: Node.clock):
return b.divide_time(a)
def transpose_inf_list(self, a: Node.infinite, b: Node.infinite):
def transpose():
while 1:
yield next(a)
yield next(b)
return DummyList(transpose())
| #!/usr/bin/env python
from nodes import Node
class Divide(Node):
"""
Takes two items from the stack and divides them
"""
char = "/"
args = 2
results = 1
@Node.test_func([4,2], [2])
@Node.test_func([2,4], [0.5])
def func(self, a: Node.number, b: Node.number):
"""a/b. floating point division.
For integer division, see `f`"""
return a/b
@Node.test_func(["test", "t"], [2])
@Node.test_func([(3,1,2,1,3), 3], [2])
def count(self, a: Node.indexable, b):
"""a.count(b)"""
return a.count(b)
@Node.test_func([[4, 4, 2, 2, 9, 9], [1, 2, 3]], [[[4], [4, 2], [2, 9, 9]]])
def split_length(self, inp: Node.indexable, lengths: Node.sequence):
"""Split inp into sections length lengths"""
rtn = [[]]
cur_length = 0
for i in inp:
if cur_length != len(lengths) and len(rtn[-1]) == lengths[cur_length]:
cur_length += 1
rtn.append([])
rtn[-1].append(i)
return [rtn]
def time_int_div(self, a: Node.clock, b: Node.number):
return a.divide_int(b)
def time_int_div_2(self, a: Node.number, b: Node.clock):
return b.divide_int(a)
def time_div(self, a: Node.clock, b: Node.clock):
return b.divide_time(a) | mit | Python |
87d792fda8763f49d83ce274015f3a436a0c89cc | send message after stuff is started | gamechanger/dusty,gamechanger/dusty,gamechanger/dusty,gamechanger/dusty,gamechanger/dusty | dusty/commands/run.py | dusty/commands/run.py |
from ..compiler import (compose as compose_compiler, nginx as nginx_compiler,
port_spec as port_spec_compiler, spec_assembler)
from ..systems import compose, hosts, nginx, virtualbox
def start_local_env():
""" This command will use the compilers to get compose specs
will pass those specs to the systems that need them. Those
systems will in turn launch the services needed to make the
local environment go"""
assembled_spec = spec_assembler.get_assembled_specs()
port_spec = port_spec_compiler.get_port_spec_document(assembled_spec)
nginx_config = nginx_compiler.get_nginx_configuration_spec(port_spec)
compose_config = compose_compiler.get_compose_dict(assembled_spec, port_spec)
hosts.update_hosts_file_from_port_spec(port_spec)
virtualbox.update_virtualbox_port_forwarding_from_port_spec(port_spec)
nginx.update_nginx_from_config(nginx_config)
compose.update_running_containers_from_spec(compose_config)
yield "Your local environment is now started"
|
from ..compiler import (compose as compose_compiler, nginx as nginx_compiler,
port_spec as port_spec_compiler, spec_assembler)
from ..systems import compose, hosts, nginx, virtualbox
def start_local_env():
""" This command will use the compilers to get compose specs
will pass those specs to the systems that need them. Those
systems will in turn launch the services needed to make the
local environment go"""
assembled_spec = spec_assembler.get_assembled_specs()
port_spec = port_spec_compiler.get_port_spec_document(assembled_spec)
nginx_config = nginx_compiler.get_nginx_configuration_spec(port_spec)
compose_config = compose_compiler.get_compose_dict(assembled_spec, port_spec)
hosts.update_hosts_file_from_port_spec(port_spec)
virtualbox.update_virtualbox_port_forwarding_from_port_spec(port_spec)
nginx.update_nginx_from_config(nginx_config)
compose.update_running_containers_from_spec(compose_config)
| mit | Python |
7f2ac925b2343e57ad7f4a6d79ee24e14c8f4d78 | Add a Bazel rule assignment_notebook(). | google/prog-edu-assistant,google/prog-edu-assistant,google/prog-edu-assistant,google/prog-edu-assistant | exercises/defs.bzl | exercises/defs.bzl | # TODO(salikh): Implement the automatic tar rules too
def assignment_notebook_macro(
name,
srcs,
language = None,
visibility = ["//visibility:private"]):
"""
Defines a rule for student notebook and autograder
generation from a master notebook.
Arguments:
name:
srcs: the file name of the input notebook should end in '-master.ipynb'.
"""
language_opt = ""
if language:
language_opt = " --language=" + language
native.genrule(
name = name + "_student",
srcs = srcs,
outs = [name + '-student.ipynb'],
cmd = """$(location //go/cmd/assign) --input="$<" --output="$@" --preamble=$(location //exercises:preamble.py) --command=student""" + language_opt,
tools = [
"//go/cmd/assign",
"//exercises:preamble.py",
],
)
autograder_output = name + '-autograder'
native.genrule(
name = name + "_autograder",
srcs = srcs,
outs = [autograder_output],
cmd = """$(location //go/cmd/assign) --input="$<" --output="$@" --command=autograder""" + language_opt,
tools = [
"//go/cmd/assign",
],
)
def _assignment_notebook_impl(ctx):
print("src = ", ctx.attr.src)
print("src.path = ", ctx.file.src.path)
outs = []
languages = ctx.attr.languages
inputs = [ctx.file.src]
preamble_opt = ""
if ctx.file.preamble:
preamble_opt = " --preamble='" + ctx.file.preamble.path + "'"
inputs.append(ctx.file.preamble)
if len(languages) == 0:
# Force the language-agnostic notebook generation by default.
languages = [""]
for lang in languages:
outfile = ctx.label.name + ("-" + lang if lang else "") + "-student.ipynb"
out = ctx.actions.declare_file(outfile)
outs.append(out)
language_opt = ""
if lang:
language_opt = " -language='" + lang + "'"
print(" command = " + ctx.executable._assign.path + " --command=student --input='" + ctx.file.src.path + "'" + " --output='" + out.path + "'" + language_opt + preamble_opt)
ctx.actions.run_shell(
inputs = inputs,
outputs = [out],
tools = [ctx.executable._assign],
progress_message = "Running %s" % ctx.executable._assign.path,
command = ctx.executable._assign.path + " --command=student --input='" + ctx.file.src.path + "'" + " --output='" + out.path + "'" + language_opt + preamble_opt,
)
return [DefaultInfo(files = depset(outs))]
# Defines a rule for student notebook and autograder
# generation from a master notebook.
#
# Arguments:
# name:
assignment_notebook = rule(
implementation = _assignment_notebook_impl,
attrs = {
# Specifies the list of languages to generate student notebooks.
# If omitted, defaults to empty list, which means that a
# single language-agnostic notebook will be generated.
# It is also possible to generate language-agnostic notebook
# (skipping filtering by language) by adding an empty string
# value to languages.
"languages": attr.string_list(default=[], mandatory=False),
# The file name of the input notebook.
"src": attr.label(
mandatory=True,
allow_single_file=True),
# If present, specifies the label of the preamble file.
"preamble": attr.label(
default=None,
mandatory=False,
allow_single_file=True),
"_assign": attr.label(
default = Label("//go/cmd/assign"),
allow_single_file = True,
executable = True,
cfg = "host",
),
},
)
| # TODO(salikh): Implement the automatic tar rules too
def assignment_notebook_macro(
name,
srcs,
language = None,
visibility = ["//visibility:private"]):
"""
Defines a rule for student notebook and autograder
generation from a master notebook.
Arguments:
name:
srcs: the file name of the input notebook should end in '-master.ipynb'.
"""
language_opt = ""
if language:
language_opt = " --language=" + language
native.genrule(
name = name + "_student",
srcs = srcs,
outs = [name + '-student.ipynb'],
cmd = """$(location //go/cmd/assign) --input="$<" --output="$@" --preamble=$(location //exercises:preamble.py) --command=student""" + language_opt,
tools = [
"//go/cmd/assign",
"//exercises:preamble.py",
],
)
autograder_output = name + '-autograder'
native.genrule(
name = name + "_autograder",
srcs = srcs,
outs = [autograder_output],
cmd = """$(location //go/cmd/assign) --input="$<" --output="$@" --command=autograder""" + language_opt,
tools = [
"//go/cmd/assign",
],
)
| apache-2.0 | Python |
f274f927d600989db1d485212d116166695e6edd | Use keyword arguments for readability | eugene-eeo/scell | scell/core.py | scell/core.py | """
scell.core
~~~~~~~~~~
Provides abstractions over lower level APIs and
file objects and their interests.
"""
from select import select as _select
from collections import namedtuple
def select(rl, wl, timeout=None):
"""
Returns the file objects ready for reading/writing
from the read-list (*rl*) and write-list (*wl*),
subject to *timeout* in seconds.
:param rl: Objects interested in readability.
:param wl: Objects interested in writability.
:param timeout: Maximum blocking time in seconds,
*None* for no timeout.
"""
if not (rl or wl):
return [], []
readers, writers, _ = _select(rl, wl, (), timeout)
return readers, writers
class Monitored(namedtuple('_Monitored', 'fp,wants_read,wants_write,callback')):
"""
Represents the interests of a file handle *fp*,
and whether it *wants_read* and or *wants_write*,
as well as an attached *callback*.
"""
__slots__ = ()
class Event(namedtuple('_Event', 'monitored,readable,writable,fp,callback,ready')):
"""
Represents the readability or writability
of a *monitored* file object.
"""
__slots__ = ()
def __new__(cls, monitored, readable, writable):
ready = (
readable >= monitored.wants_read and
writable >= monitored.wants_write
)
return super(Event, cls).__new__(
cls,
monitored,
readable,
writable,
fp=monitored.fp,
callback=monitored.callback,
ready=ready,
)
| """
scell.core
~~~~~~~~~~
Provides abstractions over lower level APIs and
file objects and their interests.
"""
from select import select as _select
from collections import namedtuple
def select(rl, wl, timeout=None):
"""
Returns the file objects ready for reading/writing
from the read-list (*rl*) and write-list (*wl*),
subject to *timeout* in seconds.
:param rl: Objects interested in readability.
:param wl: Objects interested in writability.
:param timeout: Maximum blocking time in seconds,
*None* for no timeout.
"""
if not (rl or wl):
return [], []
readers, writers, _ = _select(rl, wl, (), timeout)
return readers, writers
class Monitored(namedtuple('_Monitored', 'fp,wants_read,wants_write,callback')):
"""
Represents the interests of a file handle *fp*,
and whether it *wants_read* and or *wants_write*,
as well as an attached *callback*.
"""
__slots__ = ()
class Event(namedtuple('_Event', 'monitored,readable,writable,fp,callback,ready')):
"""
Represents the readability or writability
of a *monitored* file object.
"""
__slots__ = ()
def __new__(cls, monitored, readable, writable):
ready = (
readable >= monitored.wants_read and
writable >= monitored.wants_write
)
return super(Event, cls).__new__(
cls,
monitored,
readable,
writable,
monitored.fp,
monitored.callback,
ready,
)
| mit | Python |
e7cce08f32516bc8b15df7eee0c285eebe795cab | Make it easier to filter on multiple field values | alphagov/govuk-content-explorer,alphagov/govuk-content-explorer | explorer/search.py | explorer/search.py | from . import config
from .document import Document
import requests
from time import time
def perform_search(**params):
response = requests.get(
config.GOVUK_SEARCH_API,
params=params,
auth=config.AUTH,
)
return response.json()
def fetch_documents(scope):
documents = perform_search(**fetch_document_args(scope))
facets = {}
for field in Document.FACET_FIELDS:
start = time()
facet_results = perform_search(**fetch_facet_args(scope, field))
facets[field] = facet_results["facets"][field]
print "Fetched %s facet in %fs" % (field, time() - start)
return present_documents(documents, facets)
def fetch_lots_of_documents(scope, max_documents):
fetched = 0
search_args = fetch_document_args(scope)
while fetched < max_documents:
search_args["start"] = fetched
documents = perform_search(**search_args).get("results", [])
if len(documents) == 0:
break
for document in documents:
yield Document(document)
fetched += 1
def fetch_document_args(scope):
args = scope.search_args()
args["count"] = 1000
args["fields"] = ",".join(Document.DISPLAY_FIELDS)
return args
def fetch_facet_args(scope, facet_field):
args = scope.search_args()
args["count"] = 0
args["facet_" + facet_field] = "1000,scope:exclude_field_filter"
return args
def present_documents(documents, facets):
return {
"count": documents["total"],
"documents": [Document(document)
for document in documents["results"]
],
"facets": facets,
}
| from . import config
from .document import Document
import requests
from time import time
def perform_search(**params):
response = requests.get(
config.GOVUK_SEARCH_API,
params=params,
auth=config.AUTH,
)
return response.json()
def fetch_documents(scope):
documents = perform_search(**fetch_document_args(scope))
facets = {}
for field in Document.FACET_FIELDS:
start = time()
facet_results = perform_search(**fetch_facet_args(scope, field))
facets[field] = facet_results["facets"][field]
print "Fetched %s facet in %fs" % (field, time() - start)
return present_documents(documents, facets)
def fetch_lots_of_documents(scope, max_documents):
fetched = 0
search_args = fetch_document_args(scope)
while fetched < max_documents:
search_args["start"] = fetched
documents = perform_search(**search_args).get("results", [])
if len(documents) == 0:
break
for document in documents:
yield Document(document)
fetched += 1
def fetch_document_args(scope):
args = scope.search_args()
args["count"] = 1000
args["fields"] = ",".join(Document.DISPLAY_FIELDS)
return args
def fetch_facet_args(scope, facet_field):
args = scope.search_args()
args["count"] = 0
args["facet_" + facet_field] = "1000,scope:all_filters"
return args
def present_documents(documents, facets):
return {
"count": documents["total"],
"documents": [Document(document)
for document in documents["results"]
],
"facets": facets,
}
| mit | Python |
10d0b7c452c8d9d5893cfe612e0beaa738f61628 | Add to template builtins only if add_to_buitlins is available (Django <= 1.8) | nigma/django-easy-pjax,nigma/django-easy-pjax,nigma/django-easy-pjax | easy_pjax/__init__.py | easy_pjax/__init__.py | #-*- coding: utf-8 -*-
"""
Register filter so it is available for use in the `extends` template tag
(The `extends` tag must come first in a template, so regular `load` is not
an option).
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__version__ = "1.2.0"
has_add_to_builtins = True
try:
from django.template import add_to_builtins
except ImportError:
try:
# import path changed in 1.8
from django.template.base import add_to_builtins
except ImportError:
has_add_to_builtins = False
if has_add_to_builtins:
add_to_builtins("easy_pjax.templatetags.pjax_tags")
| #-*- coding: utf-8 -*-
"""
Register filter so it is available for use in the `extends` template tag
(The `extends` tag must come first in a template, so regular `load` is not
an option).
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__version__ = "1.2.0"
try:
from django.template import add_to_builtins
except ImportError:
# import path changed in 1.8
from django.template.base import add_to_builtins
add_to_builtins("easy_pjax.templatetags.pjax_tags")
| bsd-3-clause | Python |
e145ef6ca54c9615f038601da17daf16550196d6 | Use environment variables to locate Windows GStreamer includes | dturing/node-gstreamer-superficial,dturing/node-gstreamer-superficial | binding.gyp | binding.gyp | {
"targets": [
{
"target_name": "gstreamer-superficial",
"sources": [ "gstreamer.cpp", "GLibHelpers.cpp", "GObjectWrap.cpp", "Pipeline.cpp" ],
"include_dirs": [
"<!(node -e \"require('nan')\")"
],
"cflags": [
"-Wno-cast-function-type"
],
"conditions" : [
["OS=='linux'", {
"include_dirs": [
'<!@(pkg-config gstreamer-1.0 --cflags-only-I | sed s/-I//g)',
'<!@(pkg-config gstreamer-app-1.0 --cflags-only-I | sed s/-I//g)',
'<!@(pkg-config gstreamer-app-1.0 --cflags-only-I | sed s/-I//g)'
],
"libraries": [
'<!@(pkg-config gstreamer-1.0 --libs)',
'<!@(pkg-config gstreamer-app-1.0 --libs)',
'<!@(pkg-config gstreamer-video-1.0 --libs)'
]
}],
["OS=='mac'", {
"include_dirs": [
'<!@(pkg-config gstreamer-1.0 --cflags-only-I | sed s/-I//g)',
'<!@(pkg-config gstreamer-app-1.0 --cflags-only-I | sed s/-I//g)',
'<!@(pkg-config gstreamer-app-1.0 --cflags-only-I | sed s/-I//g)'
],
"libraries": [
'<!@(pkg-config gstreamer-1.0 --libs)',
'<!@(pkg-config gstreamer-app-1.0 --libs)',
'<!@(pkg-config gstreamer-video-1.0 --libs)'
]
}],
["OS=='win'", {
"include_dirs": [
"<!(echo %GSTREAMER_1_0_ROOT_X86_64%)include\gstreamer-1.0",
"<!(echo %GSTREAMER_1_0_ROOT_X86_64%)lib\glib-2.0\include",
"<!(echo %GSTREAMER_1_0_ROOT_X86_64%)include\glib-2.0",
"<!(echo %GSTREAMER_1_0_ROOT_X86_64%)include\libxml2"
],
"libraries": [
"<!(echo %GSTREAMER_1_0_ROOT_X86_64%)lib\gstreamer-1.0.lib",
"<!(echo %GSTREAMER_1_0_ROOT_X86_64%)lib\gstapp-1.0.lib",
"<!(echo %GSTREAMER_1_0_ROOT_X86_64%)lib\gstvideo-1.0.lib",
"<!(echo %GSTREAMER_1_0_ROOT_X86_64%)lib\gobject-2.0.lib",
"<!(echo %GSTREAMER_1_0_ROOT_X86_64%)lib\glib-2.0.lib"
]
}]
]
}
]
}
| {
"targets": [
{
"target_name": "gstreamer-superficial",
"sources": [ "gstreamer.cpp", "GLibHelpers.cpp", "GObjectWrap.cpp", "Pipeline.cpp" ],
"include_dirs": [
"<!(node -e \"require('nan')\")"
],
"cflags": [
"-Wno-cast-function-type"
],
"conditions" : [
["OS=='linux'", {
"include_dirs": [
'<!@(pkg-config gstreamer-1.0 --cflags-only-I | sed s/-I//g)',
'<!@(pkg-config gstreamer-app-1.0 --cflags-only-I | sed s/-I//g)',
'<!@(pkg-config gstreamer-app-1.0 --cflags-only-I | sed s/-I//g)'
],
"libraries": [
'<!@(pkg-config gstreamer-1.0 --libs)',
'<!@(pkg-config gstreamer-app-1.0 --libs)',
'<!@(pkg-config gstreamer-video-1.0 --libs)'
]
}],
["OS=='mac'", {
"include_dirs": [
'<!@(pkg-config gstreamer-1.0 --cflags-only-I | sed s/-I//g)',
'<!@(pkg-config gstreamer-app-1.0 --cflags-only-I | sed s/-I//g)',
'<!@(pkg-config gstreamer-app-1.0 --cflags-only-I | sed s/-I//g)'
],
"libraries": [
'<!@(pkg-config gstreamer-1.0 --libs)',
'<!@(pkg-config gstreamer-app-1.0 --libs)',
'<!@(pkg-config gstreamer-video-1.0 --libs)'
]
}],
["OS=='win'", {
"include_dirs": [
"X:/gstreamer-sdk/1.0/x86_64/include/gstreamer-1.0",
"X:/gstreamer-sdk/1.0/x86_64/include/glib-2.0",
"X:/gstreamer-sdk/1.0/x86_64/include/libxml2"
],
"libraries": [
"X:/gstreamer-sdk/1.0/x86_64/lib/gstreamer-1.0.lib",
"X:/gstreamer-sdk/1.0/x86_64/lib/gstapp-1.0.lib",
"X:/gstreamer-sdk/1.0/x86_64/lib/gstvideo-1.0.lib",
"X:/gstreamer-sdk/1.0/x86_64/lib/gobject-2.0.lib",
"X:/gstreamer-sdk/1.0/x86_64/lib/glib-2.0.lib"
]
}]
]
}
]
}
| mit | Python |
043a0ad774964d2608ee1c8bd8ba1abc5b2ed0b4 | Tweak binding.gyp so it doesn't error out on Windows | enlight/node-unix-pty,enlight/node-unix-pty,enlight/node-unix-pty | binding.gyp | binding.gyp | {
'targets': [{
'target_name': 'pty',
'conditions': [
['OS!="win"', {
'include_dirs' : [
'<!(node -e "require(\'nan\')")'
],
'sources': [
'src/unix/pty.cc'
],
'libraries': [
'-lutil',
'-L/usr/lib',
'-L/usr/local/lib'
],
'conditions': [
# http://www.gnu.org/software/gnulib/manual/html_node/forkpty.html
# One some systems (at least including Cygwin, Interix,
# OSF/1 4 and 5, and Mac OS X) linking with -lutil is not required.
['OS=="mac" or OS=="solaris"', {
'libraries!': [
'-lutil'
]
}]
]
}]
]
}],
}
| {
'conditions': [
['OS!="win"', {
'targets': [{
'target_name': 'pty',
'include_dirs' : [
'<!(node -e "require(\'nan\')")'
],
'sources': [
'src/unix/pty.cc'
],
'libraries': [
'-lutil',
'-L/usr/lib',
'-L/usr/local/lib'
],
'conditions': [
# http://www.gnu.org/software/gnulib/manual/html_node/forkpty.html
# One some systems (at least including Cygwin, Interix,
# OSF/1 4 and 5, and Mac OS X) linking with -lutil is not required.
['OS=="mac" or OS=="solaris"', {
'libraries!': [
'-lutil'
]
}]
]
}]
}]
]
}
| mit | Python |
5a6f748981554cb4d4aa0b5500a9b86bd09eb1b5 | Add Linux static bindings | lgeiger/zmq-prebuilt,lgeiger/zmq-prebuilt,interpretor/zeromq.js,lgeiger/zmq-prebuilt,interpretor/zeromq.js,lgeiger/zmq-prebuilt,interpretor/zeromq.js,interpretor/zeromq.js,interpretor/zeromq.js,lgeiger/zmq-prebuilt | binding.gyp | binding.gyp | {
'targets': [
{
'target_name': 'zmq',
'sources': [ 'binding.cc' ],
'include_dirs' : [
"<!(node -e \"require('nan')\")"
],
'conditions': [
['OS=="win"', {
'win_delay_load_hook': 'true',
'include_dirs': ['windows/include'],
'link_settings': {
'libraries': [
'Delayimp.lib',
],
'conditions': [
['target_arch=="ia32"', {
'libraries': [
'<(PRODUCT_DIR)/../../windows/lib/x86/libzmq.lib',
]
},{
'libraries': [
'<(PRODUCT_DIR)/../../windows/lib/x64/libzmq.lib',
]
}]
],
},
'msvs_settings': {
'VCLinkerTool': {
'DelayLoadDLLs': ['libzmq.dll']
}
},
}, {
'libraries': [ '<(PRODUCT_DIR)/../../zmq/lib/libzmq.a' ],
'include_dirs': [ '<(PRODUCT_DIR)/../../zmq/include' ],
'cflags!': ['-fno-exceptions'],
'cflags_cc!': ['-fno-exceptions'],
}],
['OS=="mac" or OS=="solaris"', {
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'MACOSX_DEPLOYMENT_TARGET': '10.6',
},
'libraries': [ '<(PRODUCT_DIR)/../../zmq/lib/libzmq.a' ],
}],
['OS=="openbsd" or OS=="freebsd"', {
}],
['OS=="linux"', {
'libraries': [ '<(PRODUCT_DIR)/../../zmq/lib/libzmq.a' ],
}],
]
}
]
}
| {
'targets': [
{
'target_name': 'zmq',
'sources': [ 'binding.cc' ],
'include_dirs' : [
"<!(node -e \"require('nan')\")"
],
'conditions': [
['OS=="win"', {
'win_delay_load_hook': 'true',
'include_dirs': ['windows/include'],
'link_settings': {
'libraries': [
'Delayimp.lib',
],
'conditions': [
['target_arch=="ia32"', {
'libraries': [
'<(PRODUCT_DIR)/../../windows/lib/x86/libzmq.lib',
]
},{
'libraries': [
'<(PRODUCT_DIR)/../../windows/lib/x64/libzmq.lib',
]
}]
],
},
'msvs_settings': {
'VCLinkerTool': {
'DelayLoadDLLs': ['libzmq.dll']
}
},
}, {
'libraries': [ '<(PRODUCT_DIR)/../../zmq/lib/libzmq.a' ],
'include_dirs': [ '<(PRODUCT_DIR)/../../zmq/include' ],
'cflags!': ['-fno-exceptions'],
'cflags_cc!': ['-fno-exceptions'],
}],
['OS=="mac" or OS=="solaris"', {
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'MACOSX_DEPLOYMENT_TARGET': '10.6',
},
'libraries': [ '<(PRODUCT_DIR)/../../zmq/lib/libzmq.a' ],
}],
['OS=="openbsd" or OS=="freebsd"', {
}],
['OS=="linux"', {
}],
]
}
]
}
| mit | Python |
777bb37f9ac4457dca79a07953356ce46b941a30 | change '-std=c++11' to '-std=c++0x' for linux | rick68/eigenjs,rick68/eigenjs,rick68/eigenjs | binding.gyp | binding.gyp | {
'targets': [
{
'target_name': 'eigen',
'sources': [
'src/EigenJS.cpp'
],
'include_dirs': [
'deps',
"<!(node -e \"require('nan')\")"
],
'conditions': [
['OS=="win"', {
'msvs_settings': {
'VCCLCompilerTool': {
'ExceptionHandling': 1,
'AdditionalOptions': [ '/GR', '/EHsc', '/wd4018', '/wd4506' ]
}
}
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
'cflags': [ '-std=c++0x' ],
'cflags_cc!': [ '-fno-rtti', '-fno-exceptions']
}],
['OS=="mac"', {
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'GCC_ENABLE_CPP_RTTI': 'YES',
'OTHER_CPLUSPLUSFLAGS': [ '-std=c++11', '-stdlib=libc++' ],
'OTHER_LDFLAGS': [ '-stdlib=libc++' ],
'MACOSX_DEPLOYMENT_TARGET': '10.7'
}
}]
]
}
]
}
| {
'targets': [
{
'target_name': 'eigen',
'sources': [
'src/EigenJS.cpp'
],
'include_dirs': [
'deps',
"<!(node -e \"require('nan')\")"
],
'conditions': [
['OS=="win"', {
'msvs_settings': {
'VCCLCompilerTool': {
'ExceptionHandling': 1,
'AdditionalOptions': [ '/GR', '/EHsc', '/wd4018', '/wd4506' ]
}
}
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
'cflags': [ '-std=c++11' ],
'cflags_cc!': [ '-fno-rtti', '-fno-exceptions']
}],
['OS=="mac"', {
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'GCC_ENABLE_CPP_RTTI': 'YES',
'OTHER_CPLUSPLUSFLAGS': [ '-std=c++11', '-stdlib=libc++' ],
'OTHER_LDFLAGS': [ '-stdlib=libc++' ],
'MACOSX_DEPLOYMENT_TARGET': '10.7'
}
}]
]
}
]
}
| mpl-2.0 | Python |
786e7d83672ad5ff2718c9a440dbd180f8e7b24a | make addon buildable as static library (#119) | christkv/kerberos,christkv/kerberos,christkv/kerberos,christkv/kerberos | binding.gyp | binding.gyp | {
'targets': [
{
'target_name': 'kerberos',
'type': 'loadable_module',
'include_dirs': [ '<!(node -e "require(\'nan\')")' ],
'sources': [
'src/kerberos.cc'
],
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.12',
'OTHER_CFLAGS': [
"-std=c++11",
"-stdlib=libc++"
],
},
'conditions': [
['OS=="mac" or OS=="linux"', {
'sources': [
'src/unix/base64.cc',
'src/unix/kerberos_gss.cc',
'src/unix/kerberos_unix.cc'
],
'link_settings': {
'libraries': [
'-lkrb5',
'-lgssapi_krb5'
]
},
'conditions': [
['_type=="static_library"', {
'link_settings': {
'libraries': [
'-lcom_err'
]
}
}]
]
}],
['OS=="win"', {
'sources': [
'src/win32/kerberos_sspi.cc',
'src/win32/kerberos_win32.cc'
],
'link_settings': {
'libraries': [
'-lcrypt32',
'-lsecur32',
'-lShlwapi'
]
}
}]
]
}
]
} | {
'targets': [
{
'target_name': 'kerberos',
'include_dirs': [ '<!(node -e "require(\'nan\')")' ],
'sources': [
'src/kerberos.cc'
],
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.12',
'OTHER_CFLAGS': [
"-std=c++11",
"-stdlib=libc++"
],
},
'conditions': [
['OS=="mac" or OS=="linux"', {
'sources': [
'src/unix/base64.cc',
'src/unix/kerberos_gss.cc',
'src/unix/kerberos_unix.cc'
],
'link_settings': {
'libraries': [
'-lkrb5',
'-lgssapi_krb5'
]
}
}],
['OS=="win"', {
'sources': [
'src/win32/kerberos_sspi.cc',
'src/win32/kerberos_win32.cc'
],
'link_settings': {
'libraries': [
'crypt32.lib',
'secur32.lib',
'Shlwapi.lib'
]
}
}]
]
}
]
} | apache-2.0 | Python |
b6208c1f9b6f0afca1dff40a66d2c915594b1946 | Add exception hook to help diagnose server test errors in python3 gui mode | caseyclements/blaze,dwillmer/blaze,jcrist/blaze,mrocklin/blaze,cowlicks/blaze,ContinuumIO/blaze,jcrist/blaze,xlhtc007/blaze,FrancescAlted/blaze,aterrel/blaze,nkhuyu/blaze,AbhiAgarwal/blaze,markflorisson/blaze-core,nkhuyu/blaze,FrancescAlted/blaze,ChinaQuants/blaze,markflorisson/blaze-core,alexmojaki/blaze,alexmojaki/blaze,jdmcbr/blaze,mrocklin/blaze,AbhiAgarwal/blaze,maxalbert/blaze,dwillmer/blaze,markflorisson/blaze-core,AbhiAgarwal/blaze,scls19fr/blaze,LiaoPan/blaze,scls19fr/blaze,mwiebe/blaze,aterrel/blaze,xlhtc007/blaze,aterrel/blaze,ContinuumIO/blaze,AbhiAgarwal/blaze,ChinaQuants/blaze,jdmcbr/blaze,cpcloud/blaze,LiaoPan/blaze,maxalbert/blaze,cpcloud/blaze,cowlicks/blaze,mwiebe/blaze,mwiebe/blaze,markflorisson/blaze-core,FrancescAlted/blaze,caseyclements/blaze,mwiebe/blaze,FrancescAlted/blaze | blaze/io/server/tests/start_simple_server.py | blaze/io/server/tests/start_simple_server.py | """
Starts a Blaze server for tests.
$ start_test_server.py /path/to/catalog_config.yaml <portnumber>
"""
import sys, os
if os.name == 'nt':
old_excepthook = sys.excepthook
# Exclude this from our autogenerated API docs.
undoc = lambda func: func
@undoc
def gui_excepthook(exctype, value, tb):
try:
import ctypes, traceback
MB_ICONERROR = 0x00000010
title = u'Error starting test Blaze server'
msg = u''.join(traceback.format_exception(exctype, value, tb))
ctypes.windll.user32.MessageBoxW(0, msg, title, MB_ICONERROR)
finally:
# Also call the old exception hook to let it do
# its thing too.
old_excepthook(exctype, value, tb)
sys.excepthook = gui_excepthook
import blaze
from blaze.io.server.app import app
blaze.catalog.load_config(sys.argv[1])
app.run(port=int(sys.argv[2]), use_reloader=False)
| """
Starts a Blaze server for tests.
$ start_test_server.py /path/to/catalog_config.yaml <portnumber>
"""
import sys, os
import blaze
from blaze.io.server.app import app
blaze.catalog.load_config(sys.argv[1])
app.run(port=int(sys.argv[2]), use_reloader=False)
| bsd-3-clause | Python |
826698c9894ce94c625718eb041ce817eb6ab5ef | Update config.dist.py | projectshift/shift-boiler,projectshift/shift-boiler,projectshift/shift-boiler | boiler/boiler_template/config/config.dist.py | boiler/boiler_template/config/config.dist.py | from project.backend import config
class DefaultConfig(config.DefaultConfig):
""" Local development config """
# set this for offline mode
SERVER_NAME = None
SECRET_KEY = None
class DevConfig(config.DevConfig, DefaultConfig):
""" Local development config """
pass
class TestingConfig(config.TestingConfig, DefaultConfig):
""" Local testing config """
pass
| from project.backend import config
class DefaultConfig(config.DefaultConfig):
""" Local development config """
# set this for offline mode
SERVER_NAME = None
SECRET_KEY = None
class DevConfig(config.DevConfig, DefaultConfig):
""" Local development config """
pass
class TestingConfig(config.TestingConfig, DefaultConfig):
""" Local testing config """
| mit | Python |
4c4b1e6a4bde5edb9e11942245a21437e73fe6df | fix link creation | pirate/bookmark-archiver,pirate/bookmark-archiver,pirate/bookmark-archiver | archivebox/index/sql.py | archivebox/index/sql.py | __package__ = 'archivebox.index'
from io import StringIO
from typing import List, Tuple, Iterator
from .schema import Link
from ..util import enforce_types
from ..config import setup_django, OUTPUT_DIR
### Main Links Index
@enforce_types
def parse_sql_main_index(out_dir: str=OUTPUT_DIR) -> Iterator[Link]:
setup_django(out_dir, check_db=True)
from core.models import Snapshot
return (
Link.from_json(page.as_json(*Snapshot.keys))
for page in Snapshot.objects.all()
)
@enforce_types
def write_sql_main_index(links: List[Link], out_dir: str=OUTPUT_DIR) -> None:
setup_django(out_dir, check_db=True)
from core.models import Snapshot
from django.db import transaction
with transaction.atomic():
for link in links:
info = {k: v for k, v in link._asdict().items() if k in Snapshot.keys}
Snapshot.objects.update_or_create(url=link.url, defaults=info)
@enforce_types
def write_sql_link_details(link: Link, out_dir: str=OUTPUT_DIR) -> None:
setup_django(out_dir, check_db=True)
from core.models import Snapshot
from django.db import transaction
with transaction.atomic():
snap = Snapshot.objects.get(url=link.url, timestamp=link.timestamp)
snap.title = link.title
snap.tags = link.tags
snap.save()
@enforce_types
def list_migrations(out_dir: str=OUTPUT_DIR) -> List[Tuple[bool, str]]:
setup_django(out_dir, check_db=False)
from django.core.management import call_command
out = StringIO()
call_command("showmigrations", list=True, stdout=out)
out.seek(0)
migrations = []
for line in out.readlines():
if line.strip() and ']' in line:
status_str, name_str = line.strip().split(']', 1)
is_applied = 'X' in status_str
migration_name = name_str.strip()
migrations.append((is_applied, migration_name))
return migrations
@enforce_types
def apply_migrations(out_dir: str=OUTPUT_DIR) -> List[str]:
setup_django(out_dir, check_db=False)
from django.core.management import call_command
null, out = StringIO(), StringIO()
call_command("makemigrations", interactive=False, stdout=null)
call_command("migrate", interactive=False, stdout=out)
out.seek(0)
return [line.strip() for line in out.readlines() if line.strip()]
@enforce_types
def get_admins(out_dir: str=OUTPUT_DIR) -> List[str]:
setup_django(out_dir, check_db=False)
from django.contrib.auth.models import User
return User.objects.filter(is_superuser=True)
| __package__ = 'archivebox.index'
from io import StringIO
from typing import List, Tuple, Iterator
from .schema import Link
from ..util import enforce_types
from ..config import setup_django, OUTPUT_DIR
### Main Links Index
@enforce_types
def parse_sql_main_index(out_dir: str=OUTPUT_DIR) -> Iterator[Link]:
setup_django(out_dir, check_db=True)
from core.models import Snapshot
return (
Link.from_json(page.as_json(*Snapshot.keys))
for page in Snapshot.objects.all()
)
@enforce_types
def write_sql_main_index(links: List[Link], out_dir: str=OUTPUT_DIR) -> None:
setup_django(out_dir, check_db=True)
from core.models import Snapshot
from django.db import transaction
with transaction.atomic():
for link in links:
info = {k: v for k, v in link._asdict().items() if k in Snapshot.keys}
Snapshot.objects.update_or_create(url=url, defaults=info)
@enforce_types
def write_sql_link_details(link: Link, out_dir: str=OUTPUT_DIR) -> None:
setup_django(out_dir, check_db=True)
from core.models import Snapshot
from django.db import transaction
with transaction.atomic():
snap = Snapshot.objects.get(url=link.url, timestamp=link.timestamp)
snap.title = link.title
snap.tags = link.tags
snap.save()
@enforce_types
def list_migrations(out_dir: str=OUTPUT_DIR) -> List[Tuple[bool, str]]:
setup_django(out_dir, check_db=False)
from django.core.management import call_command
out = StringIO()
call_command("showmigrations", list=True, stdout=out)
out.seek(0)
migrations = []
for line in out.readlines():
if line.strip() and ']' in line:
status_str, name_str = line.strip().split(']', 1)
is_applied = 'X' in status_str
migration_name = name_str.strip()
migrations.append((is_applied, migration_name))
return migrations
@enforce_types
def apply_migrations(out_dir: str=OUTPUT_DIR) -> List[str]:
setup_django(out_dir, check_db=False)
from django.core.management import call_command
null, out = StringIO(), StringIO()
call_command("makemigrations", interactive=False, stdout=null)
call_command("migrate", interactive=False, stdout=out)
out.seek(0)
return [line.strip() for line in out.readlines() if line.strip()]
@enforce_types
def get_admins(out_dir: str=OUTPUT_DIR) -> List[str]:
setup_django(out_dir, check_db=False)
from django.contrib.auth.models import User
return User.objects.filter(is_superuser=True)
| mit | Python |
efdf4a4898cc3b5217ac5e45e75a74e19eee95d4 | bump version | google/evojax | evojax/version.py | evojax/version.py | __version__ = "0.1.0-14"
| __version__ = "0.1.0-13"
| apache-2.0 | Python |
155c953f7bf8590b4a11547369bee29baa5ea5f6 | Fix typo. | jsharkey13/isaac-selenium-testing,jsharkey13/isaac-selenium-testing | isaactest/tests/numeric_q_all_correct.py | isaactest/tests/numeric_q_all_correct.py | import time
from ..utils.log import log, INFO, ERROR, PASS
from ..utils.isaac import answer_numeric_q
from ..utils.i_selenium import assert_tab, image_div
from ..utils.i_selenium import wait_for_xpath_element
from ..tests import TestWithDependency
from selenium.common.exceptions import TimeoutException, NoSuchElementException
__all__ = ["numeric_q_all_correct"]
#####
# Test : Numeric Questions Correct Answers
#####
@TestWithDependency("NUMERIC_Q_ALL_CORRECT", ["NUMERIC_Q_UNITS_SELECT"])
def numeric_q_all_correct(driver, ISAAC_WEB, WAIT_DUR):
"""Test if numeric questions can be answered correctly.
- 'driver' should be a Selenium WebDriver.
- 'ISAAC_WEB' is the string URL of the Isaac website to be tested.
- 'WAIT_DUR' is the time in seconds to wait for JavaScript to run/load.
"""
assert_tab(driver, ISAAC_WEB)
time.sleep(WAIT_DUR)
try:
num_question = driver.find_element_by_xpath("//div[@ng-switch-when='isaacNumericQuestion']")
except NoSuchElementException:
log(ERROR, "Can't find the numeric question; can't continue!")
return False
log(INFO, "Attempt to enter correct answer.")
if not answer_numeric_q(num_question, "2.01", "\units{ m\,s^{-1} }", wait_dur=WAIT_DUR):
log(ERROR, "Couldn't answer Numeric Question; can't continue!")
return False
time.sleep(WAIT_DUR)
try:
wait_for_xpath_element(driver, "//div[@ng-switch-when='isaacNumericQuestion']//h1[text()='Correct!']")
log(INFO, "A 'Correct!' message was displayed as expected.")
wait_for_xpath_element(driver, "(//div[@ng-switch-when='isaacNumericQuestion']//p[text()='This is a correct choice.'])[2]")
log(INFO, "The editor entered explanation text was correctly shown.")
wait_for_xpath_element(driver, "//div[@ng-switch-when='isaacNumericQuestion']//strong[text()='Well done!']")
log(INFO, "The 'Well done!' message was correctly shown.")
time.sleep(WAIT_DUR)
log(PASS, "Numeric Question 'correct value, correct unit' behavior as expected.")
return True
except TimeoutException:
image_div(driver, "ERROR_numeric_q_all_correct")
log(ERROR, "The messages shown for a correct answer were not all displayed; see 'ERROR_numeric_q_all_correct.png'!")
return False
| import time
from ..utils.log import log, INFO, ERROR, PASS
from ..utils.isaac import answer_numeric_q
from ..utils.i_selenium import assert_tab, image_div
from ..utils.i_selenium import wait_for_xpath_element
from ..tests import TestWithDependency
from selenium.common.exceptions import TimeoutException, NoSuchElementException
__all__ = ["numeric_q_all_correct"]
#####
# Test : Numeric Questions Correct Answers
#####
@TestWithDependency("NUMERIC_Q_ALL_CORRECT", ["NUMERIC_Q_UNITS_SELECT"])
def numeric_q_all_correct(driver, ISAAC_WEB, WAIT_DUR):
"""Test is numeric questions can be answered correctly.
- 'driver' should be a Selenium WebDriver.
- 'ISAAC_WEB' is the string URL of the Isaac website to be tested.
- 'WAIT_DUR' is the time in seconds to wait for JavaScript to run/load.
"""
assert_tab(driver, ISAAC_WEB)
time.sleep(WAIT_DUR)
try:
num_question = driver.find_element_by_xpath("//div[@ng-switch-when='isaacNumericQuestion']")
except NoSuchElementException:
log(ERROR, "Can't find the numeric question; can't continue!")
return False
log(INFO, "Attempt to enter correct answer.")
if not answer_numeric_q(num_question, "2.01", "\units{ m\,s^{-1} }", wait_dur=WAIT_DUR):
log(ERROR, "Couldn't answer Numeric Question; can't continue!")
return False
time.sleep(WAIT_DUR)
try:
wait_for_xpath_element(driver, "//div[@ng-switch-when='isaacNumericQuestion']//h1[text()='Correct!']")
log(INFO, "A 'Correct!' message was displayed as expected.")
wait_for_xpath_element(driver, "(//div[@ng-switch-when='isaacNumericQuestion']//p[text()='This is a correct choice.'])[2]")
log(INFO, "The editor entered explanation text was correctly shown.")
wait_for_xpath_element(driver, "//div[@ng-switch-when='isaacNumericQuestion']//strong[text()='Well done!']")
log(INFO, "The 'Well done!' message was correctly shown.")
time.sleep(WAIT_DUR)
log(PASS, "Numeric Question 'correct value, correct unit' behavior as expected.")
return True
except TimeoutException:
image_div(driver, "ERROR_numeric_q_all_correct")
log(ERROR, "The messages shown for a correct answer were not all displayed; see 'ERROR_numeric_q_all_correct.png'!")
return False
| mit | Python |
aabc4bc60f0c8b6db21453dd6fad387773b18e55 | Fix a print | gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine | openquake/commands/__main__.py | openquake/commands/__main__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import importlib
from openquake.baselib import sap
from openquake.commonlib import __version__
from openquake import commands
PY_VER = sys.version_info[:3]
# check for Python version
if PY_VER < (3, 5):
sys.exit('Python 3.5+ is required, you are using %s', sys.executable)
elif PY_VER < (3, 6):
print('DeprecationWarning: Python %s.%s.%s is deprecated. '
'Please upgrade to Python 3.6+' % PY_VER)
# force cluster users to use `oq engine` so that we have centralized logs
if os.environ['OQ_DISTRIBUTE'] == 'celery' and 'run' in sys.argv:
print('You are on a cluster and you are using oq run?? '
'Use oq engine --run instead!')
def oq():
modnames = ['openquake.commands.%s' % mod[:-3]
for mod in os.listdir(commands.__path__[0])
if mod.endswith('.py') and not mod.startswith('_')]
for modname in modnames:
importlib.import_module(modname)
parser = sap.compose(sap.Script.registry.values(),
prog='oq', version=__version__)
parser.callfunc()
if __name__ == '__main__':
oq()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import importlib
from openquake.baselib import sap
from openquake.commonlib import __version__
from openquake import commands
PY_VER = sys.version_info[:3]
# check for Python version
if PY_VER < (3, 5):
sys.exit('Python 3.5+ is required, you are using %s', sys.executable)
elif PY_VER < (3, 6):
print('Warning: Python %s.%s.%s is deprecated. '
'Please upgrade to Python 3.6+' % PY_VER)
# force cluster users to use `oq engine` so that we have centralized logs
if os.environ['OQ_DISTRIBUTE'] == 'celery' and 'run' in sys.argv:
print('You are on a cluster and you are using oq run?? '
'Use oq engine --run instead!')
def oq():
modnames = ['openquake.commands.%s' % mod[:-3]
for mod in os.listdir(commands.__path__[0])
if mod.endswith('.py') and not mod.startswith('_')]
for modname in modnames:
importlib.import_module(modname)
parser = sap.compose(sap.Script.registry.values(),
prog='oq', version=__version__)
parser.callfunc()
if __name__ == '__main__':
oq()
| agpl-3.0 | Python |
2a13f4d21085228a1ef615eec8a3e42110c315d3 | Make test pass | undertherain/benchmarker,undertherain/benchmarker,undertherain/benchmarker,undertherain/benchmarker | benchmarker/modules/problems/cnn2d_toy/pytorch.py | benchmarker/modules/problems/cnn2d_toy/pytorch.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from benchmarker.modules.problems.helpers_torch import Net4Inference, Net4Train
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=2)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=2)
# TODO: make sure we check cnt_classes
self.dense1 = nn.Linear(1577088, 2)
def __call__(self, x):
h = x
h = self.conv1(h)
h = F.relu(h)
h = self.conv2(h)
h = F.relu(h)
h = torch.flatten(h, 1)
h = self.dense1(h)
return h
# TODO: this can be reused as well
def get_kernel(params, unparsed_args=None):
net = Net()
if params["mode"] == "inference":
net = Net4Inference(net)
else:
net = Net4Train(net)
return net
| import torch
import torch.nn as nn
import torch.nn.functional as F
from benchmarker.modules.problems.helpers_torch import Net4Inference, Net4Train
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=2)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=2)
# TODO: make sure we check cnt_classes
self.dense1 = nn.Linear(1577088, 2)
def __call__(self, x):
h = x
h = self.conv1(h)
h = F.relu(h)
h = self.conv2(h)
h = F.relu(h)
h = torch.flatten(h, 1)
h = self.dense1(h)
return h
# TODO: this can be reused as well
def get_kernel(net, params, unparsed_args=None):
if params["mode"] == "inference":
net = Net4Inference(net)
else:
net = Net4Train(net)
return net
| mpl-2.0 | Python |
f6e93144a2471ef22883f4db935a499463a76824 | fix sytanx errors | Show-Me-the-Code/python,Show-Me-the-Code/python,Show-Me-the-Code/python,Show-Me-the-Code/python,Show-Me-the-Code/python,Show-Me-the-Code/python | will/0003/into_redis.py | will/0003/into_redis.py | # 第 0003 题: 将 0001 题生成的 200 个激活码(或者优惠券)保存到 Redis 非关系型数据库中。
import random, string, time, math, uuid, redis
chars = string.ascii_letters + string.digits
def gen1():
key = ''.join(random.sample(chars, 10))
#key2 = ''.join(random.choice(chars) for i in range(10))
return key
def gen2():
key = math.modf(time.time())[0]
return key
def gen3():
return uuid.uuid4()
if __name__ == '__main__':
r = redis.Redis(host='localhost', port=6379, db=0)
# r.set('name', 'will')
# print(r.get('name'))
for i in range(200):
r.sadd('code', gen1())
r.save()
| # 第 0003 题: 将 0001 题生成的 200 个激活码(或者优惠券)保存到 Redis 非关系型数据库中。
import random, string, time, math, uuid, redis
chars = string.ascii_letters + string.digits
def gen1():
key = ''.join(random.sample(chars, 10))
#key2 = ''.join(random.choice(chars) for i in range(10))
return key
def gen2():
key = math.modf(time.time())[0]
return key
def gen3():
return uuid.uuid4()
if '__name__' == '__main__':
r = redis.Redis(host='localhost', port=6379, db=0)
# r.set('name', 'will')
# print(r.get('name'))
for i in range(200):
r.sadd('code', gen1())
r.save()
| mit | Python |
3b564cdd4adbf3185d2f18ec6eedbf4b87057cf5 | Add virus fixture to conftest | igboyes/virtool,igboyes/virtool,virtool/virtool,virtool/virtool | conftest.py | conftest.py | from virtool.tests.fixtures.db import *
from virtool.tests.fixtures.documents import *
from virtool.tests.fixtures.client import *
from virtool.tests.fixtures.core import *
from virtool.tests.fixtures.hmm import *
from virtool.tests.fixtures.users import *
from virtool.tests.fixtures.viruses import *
def pytest_addoption(parser):
parser.addoption("--quick", action="store_true", help="Skip slower tests")
| from virtool.tests.fixtures.db import *
from virtool.tests.fixtures.documents import *
from virtool.tests.fixtures.client import *
from virtool.tests.fixtures.core import *
from virtool.tests.fixtures.hmm import *
from virtool.tests.fixtures.users import *
def pytest_addoption(parser):
parser.addoption("--quick", action="store_true", help="Skip slower tests")
| mit | Python |
900de7c14607fbe2936fa682d03747916337f075 | Fix the reactor_pytest fixture. | eLRuLL/scrapy,eLRuLL/scrapy,pablohoffman/scrapy,scrapy/scrapy,pawelmhm/scrapy,elacuesta/scrapy,scrapy/scrapy,pablohoffman/scrapy,pablohoffman/scrapy,dangra/scrapy,starrify/scrapy,eLRuLL/scrapy,pawelmhm/scrapy,dangra/scrapy,starrify/scrapy,starrify/scrapy,pawelmhm/scrapy,scrapy/scrapy,elacuesta/scrapy,elacuesta/scrapy,dangra/scrapy | conftest.py | conftest.py | from pathlib import Path
import pytest
def _py_files(folder):
return (str(p) for p in Path(folder).rglob('*.py'))
collect_ignore = [
# not a test, but looks like a test
"scrapy/utils/testsite.py",
# contains scripts to be run by tests/test_crawler.py::CrawlerProcessSubprocess
*_py_files("tests/CrawlerProcess")
]
for line in open('tests/ignores.txt'):
file_path = line.strip()
if file_path and file_path[0] != '#':
collect_ignore.append(file_path)
@pytest.fixture()
def chdir(tmpdir):
"""Change to pytest-provided temporary directory"""
tmpdir.chdir()
def pytest_collection_modifyitems(session, config, items):
# Avoid executing tests when executing `--flake8` flag (pytest-flake8)
try:
from pytest_flake8 import Flake8Item
if config.getoption('--flake8'):
items[:] = [item for item in items if isinstance(item, Flake8Item)]
except ImportError:
pass
@pytest.fixture(scope='class')
def reactor_pytest(request):
if not request.cls:
# doctests
return
request.cls.reactor_pytest = request.config.getoption("--reactor")
return request.cls.reactor_pytest
@pytest.fixture(autouse=True)
def only_asyncio(request, reactor_pytest):
if request.node.get_closest_marker('only_asyncio') and reactor_pytest != 'asyncio':
pytest.skip('This test is only run with --reactor-asyncio')
| from pathlib import Path
import pytest
def _py_files(folder):
return (str(p) for p in Path(folder).rglob('*.py'))
collect_ignore = [
# not a test, but looks like a test
"scrapy/utils/testsite.py",
# contains scripts to be run by tests/test_crawler.py::CrawlerProcessSubprocess
*_py_files("tests/CrawlerProcess")
]
for line in open('tests/ignores.txt'):
file_path = line.strip()
if file_path and file_path[0] != '#':
collect_ignore.append(file_path)
@pytest.fixture()
def chdir(tmpdir):
"""Change to pytest-provided temporary directory"""
tmpdir.chdir()
def pytest_collection_modifyitems(session, config, items):
# Avoid executing tests when executing `--flake8` flag (pytest-flake8)
try:
from pytest_flake8 import Flake8Item
if config.getoption('--flake8'):
items[:] = [item for item in items if isinstance(item, Flake8Item)]
except ImportError:
pass
@pytest.fixture()
def reactor_pytest(request):
request.cls.reactor_pytest = request.config.getoption("--reactor")
return request.cls.reactor_pytest
@pytest.fixture(autouse=True)
def only_asyncio(request, reactor_pytest):
if request.node.get_closest_marker('only_asyncio') and reactor_pytest != 'asyncio':
pytest.skip('This test is only run with --reactor-asyncio')
| bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.