input
stringlengths
0
2.17k
instruction
stringlengths
18
2.94k
output
stringlengths
47
3.36k
from os import mkdir from os.path import join from shutil import rmtree, copytree from invoke import Collection, ctask as task from invocations.docs import docs, www from invocations.packaging import publish # Until we move to spec-based testing @task def test(ctx, coverage=False, flags=""): if "--verbose" not in flags.split(): flags += " --verbose" runner = "python" if coverage: runner = "coverage run --source=paramiko" ctx.run("{0} test.py {1}".format(runner, flags), pty=True) @task def coverage(ctx): ctx.run("coverage run --source=paramiko test.py --verbose") # Until we stop bundling docs w/ releases. Need to discover use cases first. @task def release(ctx): # Build docs first. Use terribad workaround pending invoke #146 ctx.run("inv docs") # Move the built docs into where Epydocs used to live target = 'docs' rmtree(target, ignore_errors=True) # TODO: make it easier to yank out this config val from the docs coll copytree('sites/docs/_build', target) # Publish publish(ctx) # Remind print("\n\nDon't forget to update RTD's versions page for new minor releases!") ns = Collection(test, coverage, release, docs, www)
Add 'sites' task for rigorous doc error discovery
from os import mkdir from os.path import join from shutil import rmtree, copytree from invoke import Collection, ctask as task from invocations.docs import docs, www, sites from invocations.packaging import publish # Until we move to spec-based testing @task def test(ctx, coverage=False, flags=""): if "--verbose" not in flags.split(): flags += " --verbose" runner = "python" if coverage: runner = "coverage run --source=paramiko" ctx.run("{0} test.py {1}".format(runner, flags), pty=True) @task def coverage(ctx): ctx.run("coverage run --source=paramiko test.py --verbose") # Until we stop bundling docs w/ releases. Need to discover use cases first. @task def release(ctx): # Build docs first. Use terribad workaround pending invoke #146 ctx.run("inv docs") # Move the built docs into where Epydocs used to live target = 'docs' rmtree(target, ignore_errors=True) # TODO: make it easier to yank out this config val from the docs coll copytree('sites/docs/_build', target) # Publish publish(ctx) # Remind print("\n\nDon't forget to update RTD's versions page for new minor releases!") ns = Collection(test, coverage, release, docs, www, sites)
from setuptools import setup, find_packages import dnschain setup( name="dnschain", version=dnschain.__version__, url='https://github.com/okturtles/pydnschain', license='MPL', description="A Python DNSChain library", author='Greg Slepak', author_email='hi@okturtles.com', packages=find_packages(), classifiers=[ 'Operating System :: Unix', 'Operating System :: Microsoft :: Windows', 'Environment :: MacOS X', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Topic :: Security :: Cryptography', ], )
Add a development status classifier.
from setuptools import setup, find_packages import dnschain setup( name="dnschain", version=dnschain.__version__, url='https://github.com/okturtles/pydnschain', license='MPL', description="A Python DNSChain library", author='Greg Slepak', author_email='hi@okturtles.com', packages=find_packages(), classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Operating System :: Unix', 'Operating System :: Microsoft :: Windows', 'Environment :: MacOS X', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Topic :: Security :: Cryptography', ], )
''' cd dropbox/codes/check_forbidden py -3.4 setup.py py2exe Libraries used: import tkinter import tkinter.filedialog import csv import os import re from time import sleep import zipfile ''' from distutils.core import setup import py2exe setup( console=[{'author': 'Shun Sakurai', 'script': 'check_forbidden.py', 'version': '1.4.0', }], options={'py2exe': { 'bundle_files': 2, 'compressed': True, 'excludes': ['_hashlib', '_frozen_importlib', 'argparse', '_lzma', '_bz2', '_ssl', 'calendar', 'datetime', 'difflib', 'doctest', 'inspect', 'locale', 'optparse', 'pdb', 'pickle', 'pydoc', 'pyexpat', 'pyreadline'], }} )
Change the application name to 'Check Forbidden'
''' cd dropbox/codes/check_forbidden py -3.4 setup.py py2exe Libraries used: import tkinter import tkinter.filedialog import csv import os import re from time import sleep import zipfile ''' from distutils.core import setup import py2exe setup( console=[{'author': 'Shun Sakurai', 'dest_base': 'Check Forbidden', 'script': 'check_forbidden.py', 'version': '1.4.0', }], options={'py2exe': { 'bundle_files': 2, 'compressed': True, 'excludes': ['_hashlib', '_frozen_importlib', 'argparse', '_lzma', '_bz2', '_ssl', 'calendar', 'datetime', 'difflib', 'doctest', 'inspect', 'locale', 'optparse', 'pdb', 'pickle', 'pydoc', 'pyexpat', 'pyreadline'], }} )
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup setup( name='Flask-HTMLBuilder', version='0.4', url='http://github.com/majorz/flask-htmlbuilder', license='MIT', author='Zahari Petkov', author_email='zarchaoz@gmail.com', description='Flexible Python-only HTML generation for Flask', long_description=__doc__, packages=['flaskext'], namespace_packages=['flaskext'], test_suite='nose.collector', zip_safe=False, platforms='any', install_requires=[ 'Flask' ], tests_require=[ 'nose' ], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules' ] )
BUMP VERSION to ensure update
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup setup( name='Flask-HTMLBuilder', version='0.10', url='http://github.com/majorz/flask-htmlbuilder', license='MIT', author='Zahari Petkov', author_email='zarchaoz@gmail.com', description='Flexible Python-only HTML generation for Flask', long_description=__doc__, packages=['flaskext'], namespace_packages=['flaskext'], test_suite='nose.collector', zip_safe=False, platforms='any', install_requires=[ 'Flask' ], tests_require=[ 'nose' ], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules' ] )
from setuptools import setup, find_packages import sys, os version = '0.20' setup(name='velruse', version=version, description="Simplifying third-party authentication for web applications.", long_description="""\ """, classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers keywords='', author='Ben Bangert', author_email='ben@groovie.org', url='', license='', packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), include_package_data=True, zip_safe=False, install_requires=[ "WebOb>=0.9.8", "python-openid>=2.2.4", "nose>=0.11", "oauth2>=1.1.3", "pyyaml", "pymongo>=1.6", "redis" ], entry_points=""" [paste.app_factory] main = velruse.app:make_velruse_app """, )
Remove some requirements that aren't really needed (store-specific) and add beaker and routes that are required
from setuptools import setup, find_packages import sys, os version = '0.20' setup(name='velruse', version=version, description="Simplifying third-party authentication for web applications.", long_description="""\ """, classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers keywords='', author='Ben Bangert', author_email='ben@groovie.org', url='', license='', packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), include_package_data=True, zip_safe=False, install_requires=[ "WebOb>=0.9.8", "python-openid>=2.2.4", "nose>=0.11", "oauth2>=1.1.3", "pyyaml", "beaker", "routes", #"pymongo>=1.6", #"redis", ], entry_points=""" [paste.app_factory] main = velruse.app:make_velruse_app """, )
#!/usr/bin/env python import os import sys from setuptools import setup if "publish" in sys.argv[-1]: os.system("python setup.py sdist upload -r pypi") sys.exit() elif "testpublish" in sys.argv[-1]: os.system("python setup.py sdist upload -r pypitest") sys.exit() # Load the __version__ variable without importing the package exec(open('k2mosaic/version.py').read()) entry_points = {'console_scripts': ['k2mosaic = k2mosaic.k2mosaic:k2mosaic_main']} setup(name='k2mosaic', version=__version__, description='Creates a mosaic of all K2 target pixel files ' 'in a given channel during a single cadence.', author='Geert Barentsen', author_email='hello@geert.io', url='https://github.com/barentsen/k2mosaic', packages=['k2mosaic'], package_data={'k2mosaic': ['data/*.csv']}, install_requires=['astropy', 'numpy', 'pandas', 'tqdm'], entry_points=entry_points, classifiers=[ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering :: Astronomy", ], )
Add requests to the requirements
#!/usr/bin/env python import os import sys from setuptools import setup if "publish" in sys.argv[-1]: os.system("python setup.py sdist upload -r pypi") sys.exit() elif "testpublish" in sys.argv[-1]: os.system("python setup.py sdist upload -r pypitest") sys.exit() # Load the __version__ variable without importing the package exec(open('k2mosaic/version.py').read()) entry_points = {'console_scripts': ['k2mosaic = k2mosaic.k2mosaic:k2mosaic_main']} setup(name='k2mosaic', version=__version__, description='Creates a mosaic of all K2 target pixel files ' 'in a given channel during a single cadence.', author='Geert Barentsen', author_email='hello@geert.io', url='https://github.com/barentsen/k2mosaic', packages=['k2mosaic'], package_data={'k2mosaic': ['data/*.csv']}, install_requires=['astropy', 'numpy', 'pandas', 'tqdm', 'requests'], entry_points=entry_points, classifiers=[ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering :: Astronomy", ], )
from setuptools import setup setup(name='emopy', version='0.1', description='Emotion Recognition Package for Python', url='http://github.com/selameab/emopy', author='Selameab', author_email='email@selameab.com', license='', package_data={'emopy': ['models/*.h5', 'models/*.json']}, include_package_data=True, packages=['emopy'], dependency_links=["https://github.com/tensorflow/tensorflow/tarball/master"], install_requires=[ 'dlib', 'tensorflow', 'keras>=2.0' ], zip_safe=False)
Add h5py as a dependency
from setuptools import setup setup(name='emopy', version='0.1', description='Emotion Recognition Package for Python', url='http://github.com/selameab/emopy', author='Selameab', author_email='email@selameab.com', license='', package_data={'emopy': ['models/*.h5', 'models/*.json']}, include_package_data=True, packages=['emopy'], dependency_links=["https://github.com/tensorflow/tensorflow/tarball/master"], install_requires=[ 'dlib', 'tensorflow', 'keras>=2.0', 'h5py' ], zip_safe=False)
#!/usr/bin/env python from setuptools import setup, find_packages try: with open('VERSION.txt', 'r') as v: version = v.read().strip() except FileNotFoundError: version = '0.0.0.dev0' with open('DESCRIPTION', 'r') as d: long_description = d.read() setup( name='tukio', description='An event-based workflow library built around asyncio', long_description=long_description, url='https://github.com/surycat/tukio', author='Enovacom Surycat', author_email='rand@surycat.com', version=version, packages=find_packages(exclude=['tests']), license='Apache 2.0', classifiers=[ 'Development Status :: 4 - Beta', 'Framework :: AsyncIO', 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3 :: Only', ], )
Add back 3.5, 3.6 classifiers
#!/usr/bin/env python from setuptools import setup, find_packages try: with open('VERSION.txt', 'r') as v: version = v.read().strip() except FileNotFoundError: version = '0.0.0.dev0' with open('DESCRIPTION', 'r') as d: long_description = d.read() setup( name='tukio', description='An event-based workflow library built around asyncio', long_description=long_description, url='https://github.com/surycat/tukio', author='Enovacom Surycat', author_email='rand@surycat.com', version=version, packages=find_packages(exclude=['tests']), license='Apache 2.0', classifiers=[ 'Development Status :: 4 - Beta', 'Framework :: AsyncIO' 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3 :: Only', ], )
from setuptools import setup, find_packages setup(name='rnaseq-lib', version='1.0a27', description='Library of convenience functions related to current research', url='http://github.com/jvivian/rnaseq-lib', author='John Vivian', author_email='jtvivian@gmail.com', license='MIT', package_dir={'': 'src'}, packages=find_packages('src'), package_data={'rnaseq_lib': ['data/*']}, install_requires=['pandas', 'numpy', 'seaborn', 'holoviews', 'scipy'], extras_require={ 'web': [ 'requests', 'mygene', 'bs4', 'biopython', 'synpaseclient', 'xmltodict']} )
Add annoy dependency for Trimap
from setuptools import setup, find_packages setup(name='rnaseq-lib', version='1.0a27', description='Library of convenience functions related to current research', url='http://github.com/jvivian/rnaseq-lib', author='John Vivian', author_email='jtvivian@gmail.com', license='MIT', package_dir={'': 'src'}, packages=find_packages('src'), package_data={'rnaseq_lib': ['data/*']}, install_requires=['pandas', 'numpy', 'seaborn', 'holoviews', 'scipy'], extras_require={ 'web': [ 'requests', 'mygene', 'bs4', 'biopython', 'synpaseclient', 'xmltodict'], 'trimap': ['annoy']} )
#!/usr/bin/env python from setuptools import setup, find_packages import os here = os.path.abspath(os.path.dirname(__file__)) try: with open(os.path.join(here, 'README.rst')) as f: README = f.read() except: README = '' setup(name='ppagent', version='0.2.2', description='A statistics collection agent for powerpool mining server', author='Isaac Cook', long_description=README, author_email='isaac@simpload.com', url='http://www.python.org/sigs/distutils-sig/', packages=find_packages(), install_requires=['argparse'], package_data={'ppagent': ['install/*']}, entry_points={ 'console_scripts': [ 'ppagent = ppagent.main:entry' ] } )
Fix compat with 2.7 by only requiring argparse on 2.6
#!/usr/bin/env python from setuptools import setup, find_packages import os import sys here = os.path.abspath(os.path.dirname(__file__)) try: with open(os.path.join(here, 'README.rst')) as f: README = f.read() except: README = '' requires = [] # add argparse to be installed for earlier versions of python if sys.version_info[:2] <= (2, 6): requires.append('argparse') setup(name='ppagent', version='0.2.3', description='A statistics collection agent for powerpool mining server', author='Isaac Cook', long_description=README, author_email='isaac@simpload.com', url='http://www.python.org/sigs/distutils-sig/', packages=find_packages(), install_requires=requires, package_data={'ppagent': ['install/*']}, entry_points={ 'console_scripts': [ 'ppagent = ppagent.main:entry' ] } )
# Copyright (c) 2016, Imperial College London # Copyright (c) 2016, Ghislain Antony Vaillant # All rights reserved. # # Distributed under the terms of the new BSD license. # See the accompanying LICENSE file or read the terms at # https://opensource.org/licenses/BSD-3-Clause. from setuptools import find_packages, setup setup( packages=find_packages(exclude=['builders', 'docs', 'tests']), setup_requires=['cffi>=1.0.0', 'pkgconfig'], install_requires=['cffi>=1.0.0', 'numpy'], test_requires=['nose'], ext_package='nfft', cffi_modules=['builders/build_bindings.py:ffi'], )
Add enum34 install dependency for python < 3.4
# Copyright (c) 2016, Imperial College London # Copyright (c) 2016, Ghislain Antony Vaillant # All rights reserved. # # Distributed under the terms of the new BSD license. # See the accompanying LICENSE file or read the terms at # https://opensource.org/licenses/BSD-3-Clause. from setuptools import find_packages, setup def get_install_requires(): from distutils.version import StrictVersion from sys import version_info install_requires = ['cffi>=1.0.0', 'numpy'] py_version = StrictVersion('.'.join(str(n) for n in version_info[:3])) if py_version < StrictVersion('3.4'): install_requires.append('enum34') setup( packages=find_packages(exclude=['builders', 'docs', 'tests']), setup_requires=['cffi>=1.0.0', 'pkgconfig'], install_requires=get_install_requires(), test_requires=['nose'], ext_package='nfft', cffi_modules=['builders/build_bindings.py:ffi'], )
import setuptools from server.version import VERSION setuptools.setup( name='electrumx', version=VERSION.split()[-1], scripts=['electrumx_server.py', 'electrumx_rpc.py'], python_requires='>=3.5.3', # "irc" package is only required if IRC connectivity is enabled # via environment variables, in which case I've tested with 15.0.4 # "x11_hash" package (1.4) is required to sync DASH network. install_requires=['plyvel', 'pylru', 'irc', 'aiohttp >= 1'], packages=setuptools.find_packages(), description='ElectrumX Server', author='Neil Booth', author_email='kyuupichan@gmail.com', license='MIT Licence', url='https://github.com/kyuupichan/electrumx/', long_description='Server implementation for the Electrum wallet', classifiers=[ 'Development Status :: 3 - Alpha', 'Topic :: Internet', 'License :: OSI Approved :: MIT License', 'Operating System :: Unix', ], )
Exclude tests directory from installation Fixes #223
import setuptools from server.version import VERSION setuptools.setup( name='electrumx', version=VERSION.split()[-1], scripts=['electrumx_server.py', 'electrumx_rpc.py'], python_requires='>=3.5.3', # "irc" package is only required if IRC connectivity is enabled # via environment variables, in which case I've tested with 15.0.4 # "x11_hash" package (1.4) is required to sync DASH network. install_requires=['plyvel', 'pylru', 'irc', 'aiohttp >= 1'], packages=setuptools.find_packages(exclude=['tests']), description='ElectrumX Server', author='Neil Booth', author_email='kyuupichan@gmail.com', license='MIT Licence', url='https://github.com/kyuupichan/electrumx/', long_description='Server implementation for the Electrum wallet', classifiers=[ 'Development Status :: 3 - Alpha', 'Topic :: Internet', 'License :: OSI Approved :: MIT License', 'Operating System :: Unix', ], )
from setuptools import find_packages, setup setup( name="redshift_etl", version="1.0.1", author="Harry's Data Engineering and Contributors", description="ETL code to ferry data from PostgreSQL databases (or S3 files) to Redshift cluster", license="MIT", keywords="redshift postgresql ETL ELT extract transform load", url="https://github.com/harrystech/arthur-redshift-etl", package_dir={"": "python"}, packages=find_packages("python"), package_data={ "etl": [ "assets/*", "config/*", "render_template/templates/*" ] }, scripts=[ "python/scripts/launch_ec2_instance.sh", "python/scripts/launch_emr_cluster.sh", "python/scripts/re_run_partial_pipeline.py", "python/scripts/submit_arthur.sh" ], entry_points={ "console_scripts": [ # NB The script must end in ".py" so that spark submit accepts it as a Python script. "arthur.py = etl.commands:run_arg_as_command", "run_tests.py = etl.selftest:run_tests" ] } )
Bump version for v1.1.0 release
from setuptools import find_packages, setup setup( name="redshift_etl", version="1.1.0", author="Harry's Data Engineering and Contributors", description="ETL code to ferry data from PostgreSQL databases or S3 files to Redshift clusters", license="MIT", keywords="redshift postgresql ETL ELT extract transform load", url="https://github.com/harrystech/arthur-redshift-etl", package_dir={"": "python"}, packages=find_packages("python"), package_data={ "etl": [ "assets/*", "config/*", "render_template/templates/*" ] }, scripts=[ "python/scripts/launch_ec2_instance.sh", "python/scripts/launch_emr_cluster.sh", "python/scripts/re_run_partial_pipeline.py", "python/scripts/submit_arthur.sh" ], entry_points={ "console_scripts": [ # NB The script must end in ".py" so that spark submit accepts it as a Python script. "arthur.py = etl.commands:run_arg_as_command", "run_tests.py = etl.selftest:run_tests" ] } )
from setuptools import setup #from osrm import __version__ with open("requirements.txt") as f: requirements = f.read().split('\n') setup( author_email="ustroetz@gmail.com", author="Ulric Stroetz, mthh", description="A Python wrapper around the OSRM API", install_requires=requirements, name='osrm', packages=['osrm'], test_suite="tests", url="https://github.com/ustroetz/python-osrm", version='0.11.3' )
Fix typo in author name
from setuptools import setup #from osrm import __version__ with open("requirements.txt") as f: requirements = f.read().split('\n') setup( author_email="ustroetz@gmail.com", author="Uli Strötz, mthh", description="A Python wrapper around the OSRM API", install_requires=requirements, name='osrm', packages=['osrm'], test_suite="tests", url="https://github.com/ustroetz/python-osrm", version='0.11.3' )
import uuid __author__ = 'David Barroso <dbarrosop@dravetech.com>' from setuptools import setup, find_packages from pip.req import parse_requirements install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1()) reqs = [str(ir.req) for ir in install_reqs] setup( name="napalm", version="1.00.0", packages=find_packages(), author="David Barroso", author_email="dbarrosop@dravetech.com", description="Network Automation and Programmability Abstraction Layer with Multivendor support", classifiers=[ 'Topic :: Utilities', 'Programming Language :: Python', 'Operating System :: POSIX :: Linux', 'Operating System :: MacOS', ], url="https://github.com/napalm-automation/napalm", include_package_data=True, install_requires=reqs, entry_points={ 'console_scripts': [ 'cl_napalm_configure=napalm.clitools.cl_napalm_configure:main', ], } )
Add classifier indicating only Python2.7 support
import uuid __author__ = 'David Barroso <dbarrosop@dravetech.com>' from setuptools import setup, find_packages from pip.req import parse_requirements install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1()) reqs = [str(ir.req) for ir in install_reqs] setup( name="napalm", version="1.00.0", packages=find_packages(), author="David Barroso", author_email="dbarrosop@dravetech.com", description="Network Automation and Programmability Abstraction Layer with Multivendor support", classifiers=[ 'Topic :: Utilities', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Operating System :: POSIX :: Linux', 'Operating System :: MacOS', ], url="https://github.com/napalm-automation/napalm", include_package_data=True, install_requires=reqs, entry_points={ 'console_scripts': [ 'cl_napalm_configure=napalm.clitools.cl_napalm_configure:main', ], } )
import os.path from ez_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages # read README as the long description readme = 'README' if os.path.exists('README') else 'README.md' with open(readme, 'r') as f: long_description = f.read() setup( name='spandex', version='0.1dev', description='Spatial Analysis and Data Exploration', long_description=long_description, author='Synthicity', author_email='ejanowicz@synthicity.com', license='BSD', url='https://github.com/synthicity/spandex', classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python :: 2.7', 'License :: OSI Approved :: BSD License' ], packages=find_packages(exclude=['*.tests']), install_requires=[ 'gdal>=1.10.1', 'numpy>=1.8.0', 'pandas>=0.13.1', 'psycopg2>=2.5.4', 'rasterio>=0.12', 'rasterstats>=0.4', 'shapely>=1.3.2' ] )
Move RasterIO and Shapely to optional dependencies
import os.path from ez_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages # read README as the long description readme = 'README' if os.path.exists('README') else 'README.md' with open(readme, 'r') as f: long_description = f.read() setup( name='spandex', version='0.1dev', description='Spatial Analysis and Data Exploration', long_description=long_description, author='Synthicity', author_email='ejanowicz@synthicity.com', license='BSD', url='https://github.com/synthicity/spandex', classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python :: 2.7', 'License :: OSI Approved :: BSD License' ], packages=find_packages(exclude=['*.tests']), install_requires=[ 'gdal>=1.10.1', 'numpy>=1.8.0', 'pandas>=0.13.1', 'psycopg2>=2.5.4', ], extras_require={ 'RasterIO': ['rasterio>=0.12', 'rasterstats>=0.4'], 'Shapely': ['shapely>=1.3.2'] } )
import os from ctypeslib.dynamic_module import include from ctypes import * import logging logging.basicConfig(level=logging.INFO) if os.name == "nt": _libc = CDLL("msvcrt") else: _libc = CDLL(None) include("""\ #include <stdio.h> #ifdef _MSC_VER # include <fcntl.h> #else # include <sys/fcntl.h> #endif """, persist=False)
Remove the logging setup call. git-svn-id: ac2c3632cb6543e7ab5fafd132c7fe15057a1882@52678 6015fed2-1504-0410-9fe1-9d1591cc4771
import os from ctypeslib.dynamic_module import include from ctypes import * if os.name == "nt": _libc = CDLL("msvcrt") else: _libc = CDLL(None) include("""\ #include <stdio.h> #ifdef _MSC_VER # include <fcntl.h> #else # include <sys/fcntl.h> #endif """, persist=False)
from setuptools import setup, find_packages setup( name='zeit.push', version='1.21.0.dev0', author='gocept, Zeit Online', author_email='zon-backend@zeit.de', url='http://www.zeit.de/', description="Sending push notifications through various providers", packages=find_packages('src'), package_dir={'': 'src'}, include_package_data=True, zip_safe=False, license='BSD', namespace_packages=['zeit'], install_requires=[ 'fb', 'gocept.testing', 'grokcore.component', 'mock', 'pytz', 'requests', 'setuptools', 'tweepy', 'urbanairship >= 1.0', 'zc.sourcefactory', 'zeit.cms >= 2.102.0.dev0', 'zeit.content.article', 'zeit.content.image', 'zeit.objectlog', 'zope.app.appsetup', 'zope.component', 'zope.formlib', 'zope.interface', 'zope.schema', ], entry_points={ 'console_scripts': [ 'facebook-access-token = zeit.push.facebook:create_access_token', 'parse-payload-doc = zeit.push.parse:print_payload_documentation', 'ua-payload-doc = zeit.push.urbanairship:print_payload_documentation', ], 'fanstatic.libraries': [ 'zeit_push=zeit.push.browser.resources:lib', ], }, )
Remove entry point for parse payload documentation
from setuptools import setup, find_packages setup( name='zeit.push', version='1.21.0.dev0', author='gocept, Zeit Online', author_email='zon-backend@zeit.de', url='http://www.zeit.de/', description="Sending push notifications through various providers", packages=find_packages('src'), package_dir={'': 'src'}, include_package_data=True, zip_safe=False, license='BSD', namespace_packages=['zeit'], install_requires=[ 'fb', 'gocept.testing', 'grokcore.component', 'mock', 'pytz', 'requests', 'setuptools', 'tweepy', 'urbanairship >= 1.0', 'zc.sourcefactory', 'zeit.cms >= 2.102.0.dev0', 'zeit.content.article', 'zeit.content.image', 'zeit.objectlog', 'zope.app.appsetup', 'zope.component', 'zope.formlib', 'zope.interface', 'zope.schema', ], entry_points={ 'console_scripts': [ 'facebook-access-token = zeit.push.facebook:create_access_token', 'ua-payload-doc = zeit.push.urbanairship:print_payload_documentation', ], 'fanstatic.libraries': [ 'zeit_push=zeit.push.browser.resources:lib', ], }, )
import os import re from setuptools import find_packages, setup READMEFILE = "README.rst" VERSIONFILE = os.path.join("elasticutils", "_version.py") VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]" def get_version(): verstrline = open(VERSIONFILE, "rt").read() mo = re.search(VSRE, verstrline, re.M) if mo: return mo.group(1) else: raise RuntimeError( "Unable to find version string in %s." % VERSIONFILE) setup( name='elasticutils', version=get_version(), description='Chainable interface to querying ElasticSearch', long_description=open(READMEFILE).read(), url='https://github.com/mozilla/elasticutils', author='Mozilla Foundation and contributors', license='BSD', packages=find_packages(), install_requires=['pyes>=0.15,<0.17'], include_package_data=True, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules', 'Natural Language :: English', ], )
Add python language version classifiers
import os import re from setuptools import find_packages, setup READMEFILE = "README.rst" VERSIONFILE = os.path.join("elasticutils", "_version.py") VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]" def get_version(): verstrline = open(VERSIONFILE, "rt").read() mo = re.search(VSRE, verstrline, re.M) if mo: return mo.group(1) else: raise RuntimeError( "Unable to find version string in %s." % VERSIONFILE) setup( name='elasticutils', version=get_version(), description='Chainable interface to querying ElasticSearch', long_description=open(READMEFILE).read(), url='https://github.com/mozilla/elasticutils', author='Mozilla Foundation and contributors', license='BSD', packages=find_packages(), install_requires=['pyes>=0.15,<0.17'], include_package_data=True, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic :: Software Development :: Libraries :: Python Modules', 'Natural Language :: English', ], )
from setuptools import setup, find_packages setup( name='idalink', description='An interface to the insides of IDA!', long_description=open('README.md').read(), version='0.11', url='https://github.com/zardus/idalink', license='GNU General Public License v3', packages=find_packages(), install_requires=[ 'rpyc', ], )
Add author/maintainer information to keep pypi happy
from setuptools import setup, find_packages setup( name='idalink', description='An interface to the insides of IDA!', long_description=open('README.md').read(), version='0.11', url='https://github.com/zardus/idalink', license='GNU General Public License v3', author='Zardus', author_email='zardus@gmail.com', maintainer='rhelmot', maintainer_email='audrey@rhelmot.io', packages=find_packages(), install_requires=[ 'rpyc', ], )
from setuptools import setup, find_packages import os from subprocess import call from setuptools import Command from distutils.command.build_ext import build_ext as _build_ext from setuptools.command.bdist_egg import bdist_egg as _bdist_egg class build_frontend(Command): """ A command class to run `frontendbuild.sh` """ description = 'build front-end JavaScript and CSS' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): print __file__ call(['./frontendbuild.sh'], cwd=os.path.dirname(os.path.abspath(__file__))) class build_ext(_build_ext): """ A build_ext subclass that adds build_frontend """ def run(self): self.run_command('build_frontend') _build_ext.run(self) class bdist_egg(_bdist_egg): """ A bdist_egg subclass that runs build_frontend """ def run(self): self.run_command('build_frontend') _bdist_egg.run(self) setup( name="regulations", version="2.0.0", license="public domain", packages=find_packages(), cmdclass={ 'build_frontend': build_frontend, 'build_ext': build_ext, 'bdist_egg': bdist_egg, }, install_requires=[ 'django==1.8', 'lxml', 'requests' ] )
Use CC0 and Public Domain for license
from setuptools import setup, find_packages import os from subprocess import call from setuptools import Command from distutils.command.build_ext import build_ext as _build_ext from setuptools.command.bdist_egg import bdist_egg as _bdist_egg class build_frontend(Command): """ A command class to run `frontendbuild.sh` """ description = 'build front-end JavaScript and CSS' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): print __file__ call(['./frontendbuild.sh'], cwd=os.path.dirname(os.path.abspath(__file__))) class build_ext(_build_ext): """ A build_ext subclass that adds build_frontend """ def run(self): self.run_command('build_frontend') _build_ext.run(self) class bdist_egg(_bdist_egg): """ A bdist_egg subclass that runs build_frontend """ def run(self): self.run_command('build_frontend') _bdist_egg.run(self) setup( name="regulations", version="2.0.0", packages=find_packages(), cmdclass={ 'build_frontend': build_frontend, 'build_ext': build_ext, 'bdist_egg': bdist_egg, }, install_requires=[ 'django==1.8', 'lxml', 'requests' ], classifiers=[ 'License :: Public Domain', 'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication' ] )
from distutils.core import setup setup( name='hamms', packages=['hamms'], version='0.6', description='Malformed servers to test your HTTP client', author='Kevin Burke', author_email='kev@inburke.com', url='https://github.com/kevinburke/hamms', keywords=['testing', 'server', 'http',], # XXX, pin these down install_requires=['flask', 'httpbin', 'twisted'], )
Allow to install in development mode User can now install with python setup.py develop
from setuptools import setup setup( name='hamms', packages=['hamms'], version='0.6', description='Malformed servers to test your HTTP client', author='Kevin Burke', author_email='kev@inburke.com', url='https://github.com/kevinburke/hamms', keywords=['testing', 'server', 'http',], # XXX, pin these down install_requires=['flask', 'httpbin', 'twisted'], )
#!/usr/bin/env python from setuptools import find_packages, Command setup_params = dict( name='bugimporters', version=0.1, author='Various contributers to the OpenHatch project, Berry Phillips', author_email='all@openhatch.org, berryphillips@gmail.com', packages=find_packages(), description='Bug importers for the OpenHatch project', install_requires=[ 'gdata', 'lxml', 'cssselect', 'pyopenssl', 'unicodecsv', 'feedparser', 'twisted', 'python-dateutil', 'decorator', 'scrapy>0.9', 'argparse', 'mock', 'PyYAML', 'autoresponse>=0.3.1', ], ) ### Python 2.7 already has importlib. Because of that, ### we can't put it in install_requires. We test for ### that here; if needed, we add it. try: import importlib except ImportError: setup_params['install_requires'].append('importlib') if __name__ == '__main__': from setuptools import setup setup(**setup_params)
Remove gdata dependency from install reqs
#!/usr/bin/env python from setuptools import find_packages, Command setup_params = dict( name='bugimporters', version=0.1, author='Various contributers to the OpenHatch project, Berry Phillips', author_email='all@openhatch.org, berryphillips@gmail.com', packages=find_packages(), description='Bug importers for the OpenHatch project', install_requires=[ 'lxml', 'cssselect', 'pyopenssl', 'unicodecsv', 'feedparser', 'twisted', 'python-dateutil', 'decorator', 'scrapy>0.9', 'argparse', 'mock', 'PyYAML', 'autoresponse>=0.3.1', ], ) ### Python 2.7 already has importlib. Because of that, ### we can't put it in install_requires. We test for ### that here; if needed, we add it. try: import importlib except ImportError: setup_params['install_requires'].append('importlib') if __name__ == '__main__': from setuptools import setup setup(**setup_params)
import os from setuptools import setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name = "UCLDC Deep Harvester", version = "0.0.3", description = ("deep harvester code for the UCLDC project"), long_description=read('README.md'), author='Barbara Hui', author_email='barbara.hui@ucop.edu', dependency_links=[ 'https://github.com/ucldc/pynux/archive/master.zip#egg=pynux', 'https://github.com/mredar/jsonpath/archive/master.zip#egg=jsonpath', 'https://github.com/mredar/ucldc-iiif/archive/master.zip#egg=ucldc-iiif' ], install_requires=[ 'argparse', 'boto', 'pynux', 'python-magic', 'couchdb', 'jsonpath', 'akara', 'ucldc-iiif' ], packages=['deepharvest', 's3stash'], test_suite='tests' ) ### note: dpla-ingestion code is a dependency ###pip_main(['install', ### 'git+ssh://git@bitbucket.org/mredar/dpla-ingestion.git@ucldc'])
Change ucldc-iiif back to barbara's repo
import os from setuptools import setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name = "UCLDC Deep Harvester", version = "0.0.3", description = ("deep harvester code for the UCLDC project"), long_description=read('README.md'), author='Barbara Hui', author_email='barbara.hui@ucop.edu', dependency_links=[ 'https://github.com/ucldc/pynux/archive/master.zip#egg=pynux', 'https://github.com/mredar/jsonpath/archive/master.zip#egg=jsonpath', 'https://github.com/barbarahui/ucldc-iiif/archive/master.zip#egg=ucldc-iiif' ], install_requires=[ 'argparse', 'boto', 'pynux', 'python-magic', 'couchdb', 'jsonpath', 'akara', 'ucldc-iiif' ], packages=['deepharvest', 's3stash'], test_suite='tests' ) ### note: dpla-ingestion code is a dependency ###pip_main(['install', ### 'git+ssh://git@bitbucket.org/mredar/dpla-ingestion.git@ucldc'])
import pandas as pd import geonames as gn #Enter csv path path = "/Users/larshelin/Documents/PycharmProjects/CEP/nyc-taxi/backend/parser/trips_shortend.csv" #Open Dataframe df = pd.read_csv(path) for i in range(0,10): # Get Latitude and Longitude lat = df.ix[i]['pickup_latitude'] long = df.ix[i]['pickup_longitude'] print(gn.getNeighborhood(lat,long)) print(gn.getPOI(lat, long))
Change the way data is printed
import pandas as pd import geonames as gn #Enter csv path path = "/Users/larshelin/Documents/PycharmProjects/CEP/nyc-taxi/backend/parser/trips_shortend.csv" #Open Dataframe df = pd.read_csv(path) for i in range(0,10): # Get Latitude and Longitude lat = df.ix[i]['pickup_latitude'] long = df.ix[i]['pickup_longitude'] print("District: " + gn.getNeighborhood(lat,long)+ "; POI: " + gn.getPOI(lat, long) )
from django.conf.urls.defaults import * import views urlpatterns = patterns('', (r'^(?P<url>.+)/?', views.article), )
Work for the empty url
from django.conf.urls.defaults import * import views urlpatterns = patterns('', (r'^(?P<url>.*)/?', views.article), )
__version_info__ = (1, 0, 0, 'dev') # Dot-connect all but the last. Last is dash-connected if not None. __version__ = '.'.join([str(i) for i in __version_info__[:-1]]) if __version_info__[-1] is not None: __version__ += ('-%s' % (__version_info__[-1],)) # context processor to add version to the template environment def context_extras(request): return { # software version 'SW_VERSION': __version__ }
Set version to 1.0 final !
__version_info__ = (1, 0, 0, None) # Dot-connect all but the last. Last is dash-connected if not None. __version__ = '.'.join([str(i) for i in __version_info__[:-1]]) if __version_info__[-1] is not None: __version__ += ('-%s' % (__version_info__[-1],)) # context processor to add version to the template environment def context_extras(request): return { # software version 'SW_VERSION': __version__ }
import os from google.appengine.api import apiproxy_stub_map from google.appengine.api.app_identity import get_application_id have_appserver = bool(apiproxy_stub_map.apiproxy.GetStub('datastore_v3')) def appid(): if have_appserver: return get_application_id() else: try: from .boot import PROJECT_DIR from google.appengine.tools import dev_appserver appconfig = dev_appserver.LoadAppConfig(PROJECT_DIR, {}, default_partition='dev')[0] return appconfig.application.split('~', 1)[-1] except ImportError, e: raise Exception("Could not get appid. Is your app.yaml file missing? " "Error was: %s" % e) on_production_server = have_appserver and \ not os.environ.get('SERVER_SOFTWARE', '').lower().startswith('devel')
Fix a bug introduced in the last commit
import os from google.appengine.api import apiproxy_stub_map from google.appengine.api.app_identity import get_application_id have_appserver = bool(apiproxy_stub_map.apiproxy.GetStub('datastore_v3')) if not have_appserver: from .boot import PROJECT_DIR from google.appengine.tools import dev_appserver appconfig = dev_appserver.LoadAppConfig(PROJECT_DIR, {}, default_partition='dev')[0] def appid(): if have_appserver: return get_application_id() else: try: return appconfig.application.split('~', 1)[-1] except ImportError, e: raise Exception("Could not get appid. Is your app.yaml file missing? " "Error was: %s" % e) on_production_server = have_appserver and \ not os.environ.get('SERVER_SOFTWARE', '').lower().startswith('devel')
import pytest import itertools from main import Primes, Sieve def test_sieve_limit(): limit = 10000 with Sieve(limit) as s: assert s.upper_bound() >= limit def test_upper_bound_exception(): limit = 10 with Sieve(limit) as s: with pytest.raises(IndexError): s.is_prime(101) def test_zero_is_not_in_prime_list(): with Primes() as p: n = 20 assert 0 not in list(itertools.islice(p, n)) def test_number_primes_asked_is_given(): with Primes() as p: n = 20 assert len(list(itertools.islice(p, n))) == n
Reword guard test on upper bounds
import pytest import itertools from main import Primes, Sieve def test_sieve_limit(): limit = 10000 with Sieve(limit) as s: assert s.upper_bound() >= limit def test_checking_above_upper_bound_is_an_error(): limit = 10 with Sieve(limit) as s: with pytest.raises(IndexError): s.is_prime(101) def test_zero_is_not_in_prime_list(): with Primes() as p: n = 20 assert 0 not in list(itertools.islice(p, n)) def test_number_primes_asked_is_given(): with Primes() as p: n = 20 assert len(list(itertools.islice(p, n))) == n
from django.contrib.auth.models import User, Group from ztreeauth.models import LocalUser from ztree.component.factories import create_node_factory import logging logger = logging.getLogger('ztreeauth') def local_user_factory(request, local_user_content_type, **kwargs): logger.info('creating local user "%s" at %s with groups %s' % (kwargs['username'], (request.tree_context.node and request.tree_context.node.absolute_path), kwargs['groups'])) user = User(username=kwargs['username']) user.set_password(kwargs['password1']) user.save() # create LocalUser local_user = LocalUser(user=user) local_user.save() # set auth groups for local_user for group_name in kwargs['groups']: grp = Group.objects.get(name=group_name) local_user.groups.add(grp) if hasattr(request, 'user'): username = request.user.username else: # if serving backend tree web service, no auth and no request.user username = kwargs.get('authenticated_username') new_node = create_node_factory(local_user, parent_node=request.tree_context.node, username=username, slug=user.username) return new_node
Fix package path issue caused by previous refactoring commit.
from django.contrib.auth.models import User, Group from ztreeauth.models import LocalUser from ztreecrud.component.factories import create_node_factory import logging logger = logging.getLogger('ztreeauth') def local_user_factory(request, local_user_content_type, **kwargs): logger.info('creating local user "%s" at %s with groups %s' % (kwargs['username'], (request.tree_context.node and request.tree_context.node.absolute_path), kwargs['groups'])) user = User(username=kwargs['username']) user.set_password(kwargs['password1']) user.save() # create LocalUser local_user = LocalUser(user=user) local_user.save() # set auth groups for local_user for group_name in kwargs['groups']: grp = Group.objects.get(name=group_name) local_user.groups.add(grp) if hasattr(request, 'user'): username = request.user.username else: # if serving backend tree web service, no auth and no request.user username = kwargs.get('authenticated_username') new_node = create_node_factory(local_user, parent_node=request.tree_context.node, username=username, slug=user.username) return new_node
# -*- coding: utf-8 -*- from axes.models import AccessAttempt from django.conf import settings from django.db.models.signals import post_save from django.dispatch import receiver from django.utils.importlib import import_module DEFAULT_ACTION = 'axes_login_actions.actions.email.notify' ACTIONS = getattr(settings, 'AXES_LOGIN_ACTIONS', [DEFAULT_ACTION]) #---------------------------------------------------------------------- def import_dotted_path(path): """ Takes a dotted path to a member name in a module, and returns the member after importing it. """ # stolen from Mezzanine (mezzanine.utils.importing.import_dotted_path) try: module_path, member_name = path.rsplit(".", 1) module = import_module(module_path) return getattr(module, member_name) except (ValueError, ImportError, AttributeError), e: raise ImportError("Could not import the name: %s: %s" % (path, e)) #---------------------------------------------------------------------- @receiver(post_save, sender=AccessAttempt) def access_attempt_handler(sender, instance, **kwargs): for action_path in ACTIONS: action = import_dotted_path(action_path) action(instance, **kwargs)
Use importlib from Python instead from Django
# -*- coding: utf-8 -*- from axes.models import AccessAttempt from django.conf import settings from django.db.models.signals import post_save from django.dispatch import receiver from importlib import import_module DEFAULT_ACTION = 'axes_login_actions.actions.email.notify' ACTIONS = getattr(settings, 'AXES_LOGIN_ACTIONS', [DEFAULT_ACTION]) #---------------------------------------------------------------------- def import_dotted_path(path): """ Takes a dotted path to a member name in a module, and returns the member after importing it. """ # stolen from Mezzanine (mezzanine.utils.importing.import_dotted_path) try: module_path, member_name = path.rsplit(".", 1) module = import_module(module_path) return getattr(module, member_name) except (ValueError, ImportError, AttributeError), e: raise ImportError("Could not import the name: %s: %s" % (path, e)) #---------------------------------------------------------------------- @receiver(post_save, sender=AccessAttempt) def access_attempt_handler(sender, instance, **kwargs): for action_path in ACTIONS: action = import_dotted_path(action_path) action(instance, **kwargs)
import os import stat class CopyFilter(object): def sub(self, line): return line def is_only_user_readable(filename): """Return true if and only if filename is readable by user and unreadable by group and others.""" mode = stat.S_IMODE(os.stat(filename).st_mode) return mode == 0600
Make octal number compatible with Python3
import os import stat class CopyFilter(object): def sub(self, line): return line def is_only_user_readable(filename): """Return true if and only if filename is readable by user and unreadable by group and others.""" mode = stat.S_IMODE(os.stat(filename).st_mode) return mode == 0o600
"""Discover Apple TV media players.""" from . import MDNSDiscoverable # pylint: disable=too-few-public-methods class Discoverable(MDNSDiscoverable): """Add support for Apple TV devices.""" def __init__(self, nd): super(Discoverable, self).__init__(nd, '_appletv-v2._tcp.local.') def info_from_entry(self, entry): """Returns most important info from mDNS entries.""" props = entry.properties info = { 'name': props.get(b'Name').decode('utf-8').replace('\xa0', ' '), 'hsgid': props.get(b'hG').decode('utf-8') } return info def get_info(self): """Get details from Apple TV instances.""" return [self.info_from_entry(entry) for entry in self.get_entries()]
Add missing host field to Apple TV
"""Discover Apple TV media players.""" import ipaddress from . import MDNSDiscoverable # pylint: disable=too-few-public-methods class Discoverable(MDNSDiscoverable): """Add support for Apple TV devices.""" def __init__(self, nd): super(Discoverable, self).__init__(nd, '_appletv-v2._tcp.local.') def info_from_entry(self, entry): """Returns most important info from mDNS entries.""" props = entry.properties info = { 'host': str(ipaddress.ip_address(entry.address)), 'name': props.get(b'Name').decode('utf-8').replace('\xa0', ' '), 'hsgid': props.get(b'hG').decode('utf-8') } return info def get_info(self): """Get details from Apple TV instances.""" return [self.info_from_entry(entry) for entry in self.get_entries()]
import unittest from sorting_and_searching import binary_search_recursive class BinarySearchTestCase(unittest.TestCase): ''' Unit tests for binary search ''' def setUp(self): self.example_1 = [2, 3, 4, 10, 40] def test_binary_search_recursive(self): result = binary_search_recursive(self.example_1, 0, len(self.example_1) - 1, 10) self.assertEqual(result,3) def tearDown(self): pass if __name__ == '__main__': unittest.main()
Add unit tests for binary search recursive and iterative
import unittest from aids.sorting_and_searching.binary_search import binary_search_recursive, binary_search_iterative class BinarySearchTestCase(unittest.TestCase): ''' Unit tests for binary search ''' def setUp(self): self.example_1 = [2, 3, 4, 10, 40] def test_binary_search_recursive(self): result = binary_search_recursive(self.example_1, 0, len(self.example_1) - 1, 10) self.assertEqual(result,3) def test_binary_search_iterative(self): result = binary_search_iterative(self.example_1, 0, len(self.example_1) - 1, 10) self.assertEqual(result,3) def tearDown(self): pass if __name__ == '__main__': unittest.main()
"""Urls for the Zinnia entries short link""" from django.conf.urls import url from django.conf.urls import patterns from zinnia.views.shortlink import EntryShortLink urlpatterns = patterns( '', url(r'^e(?P<token>[\da-z]+)/$', EntryShortLink.as_view(), name='entry_shortlink'), )
Revert "Add a "e" prefix to avoid issue when reaching the ID 46656" This reverts commit e730c552c0b6095a8962f29a114069fb335d7ec6.
"""Urls for the Zinnia entries short link""" from django.conf.urls import url from django.conf.urls import patterns from zinnia.views.shortlink import EntryShortLink urlpatterns = patterns( '', url(r'^(?P<token>[\da-z]+)/$', EntryShortLink.as_view(), name='entry_shortlink'), )
def multiples_of_3_and_5(num=1000): for i in range(num): if i % 3 == 0 or i % 5 == 0: yield i if __name__ == '__main__': print(sum(multiples_of_3_and_5()))
Add fun answer to 1 multiples of 3 and 5
from itertools import chain def threes_and_fives_gen(num=1000): for i in range(num): if i % 3 == 0 or i % 5 == 0: yield i def threes_and_fives_fun(n): return set(chain(range(3, n+1, 3), range(5, n+1, 5))) if __name__ == '__main__': print(sum(threes_and_fives_gen(10000000)))
Set up the initial calling of the minion routines
''' Make me some salt! ''' # Import python libs import os import optparse # Import salt libs import salt.master import salt.minion import salt.utils class Master(object): ''' Creates a master server ''' class Minion(object): ''' Create a minion server ''' def __init__(self): self.cli = self.__parse_cli() self.opts = salt.utils.minion_config(self.cli) def __parse_cli(self): ''' Parse the cli input ''' parser = optparse.OptionParser() parser.add_option('-f', '--foreground', dest='foreground', default=False, action='store_true', help='Run the minion in the foreground') parser.add_option('-c', '--config', dest='config', default='/etc/salt/minion', help='Pass in an alternative configuration file') options, args = parser.parse_args() cli = {'foreground': options.foreground, 'config': options.config} return cli
#!/usr/bin/python from config import INTERFACES from built_in_classes import RootHTBClass from .upload import Interactive, TCPACK, SSH, HTTP, Default def apply_qos(): public_if = INTERFACES["public_if"] root_class = RootHTBClass( interface=public_if["name"], rate=public_if["speed"], burst=public_if["speed"]/8, qdisc_prefix_id="1:", default=1500 ) # root_class.add_child(Interactive()) # root_class.add_child(TCPACK()) # root_class.add_child(SSH()) # root_class.add_child(HTTP()) root_class.add_child(Default()) root_class.apply_qos()
Enable all rules in pythonic_rules Has been disabled to avoid errors until the new design was nos finished.
#!/usr/bin/python from config import INTERFACES from built_in_classes import RootHTBClass from .upload import Interactive, TCPACK, SSH, HTTP, Default def apply_qos(): public_if = INTERFACES["public_if"] root_class = RootHTBClass( interface=public_if["name"], rate=public_if["speed"], burst=public_if["speed"]/8, qdisc_prefix_id="1:", default=1500 ) root_class.add_child(Interactive()) root_class.add_child(TCPACK()) root_class.add_child(SSH()) root_class.add_child(HTTP()) root_class.add_child(Default()) root_class.apply_qos()
# -*- coding: utf-8 -*- import sys ticks = ('▁', '▂', '▃', '▄', '▅', '▆', '▇', '█') def scale_data(d): data_range = max(d) - min(d) divider = data_range / (len(ticks) - 1) min_value = min(d) scaled = [int(abs(round((i - min_value) / divider))) for i in d] return scaled def print_ansi_spark(d): for i in d: sys.stdout.write(ticks[i]) print '' if __name__ == "__main__": print 'hello world'
Make division float to fix divide by zero issues
# -*- coding: utf-8 -*- from __future__ import division import argparse ticks = ('▁', '▂', '▃', '▄', '▅', '▆', '▇', '█') def scale_data(data): m = min(data) n = (max(data) - m) / (len(ticks) - 1) print m,n return [ ticks[int((t - m) / n)] for t in data ] def print_ansi_spark(d): print ''.join(d) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Process some integers.') parser.add_argument('integers', metavar='N', type=int, nargs='+', help='an integer for the accumulator') args = parser.parse_args() print_ansi_spark(scale_data(args.integers))
from __future__ import absolute_import, division, unicode_literals from flask import Response, request from sqlalchemy.orm import joinedload from changes.api.base import APIView from changes.models import Project, Build class ProjectBuildIndexAPIView(APIView): def _get_project(self, project_id): project = Project.query.options( joinedload(Project.repository, innerjoin=True), ).filter_by(slug=project_id).first() if project is None: project = Project.query.options( joinedload(Project.repository), ).get(project_id) return project def get(self, project_id): project = self._get_project(project_id) if not project: return '', 404 include_patches = request.args.get('include_patches') or '1' queryset = Build.query.options( joinedload(Build.project, innerjoin=True), joinedload(Build.author), ).filter( Build.project_id == project.id, ).order_by(Build.date_created.desc()) if include_patches == '0': queryset = queryset.filter( Build.patch == None, # NOQA ) return self.paginate(queryset) def get_stream_channels(self, project_id=None): project = self._get_project(project_id) if not project: return Response(status=404) return ['projects:{0}:builds'.format(project.id.hex)]
Add source to project build index query
from __future__ import absolute_import, division, unicode_literals from flask import Response, request from sqlalchemy.orm import joinedload from changes.api.base import APIView from changes.models import Project, Build class ProjectBuildIndexAPIView(APIView): def _get_project(self, project_id): project = Project.query.options( joinedload(Project.repository, innerjoin=True), ).filter_by(slug=project_id).first() if project is None: project = Project.query.options( joinedload(Project.repository), ).get(project_id) return project def get(self, project_id): project = self._get_project(project_id) if not project: return '', 404 include_patches = request.args.get('include_patches') or '1' queryset = Build.query.options( joinedload('project', innerjoin=True), joinedload('author'), joinedload('source'), ).filter( Build.project_id == project.id, ).order_by(Build.date_created.desc()) if include_patches == '0': queryset = queryset.filter( Build.patch == None, # NOQA ) return self.paginate(queryset) def get_stream_channels(self, project_id=None): project = self._get_project(project_id) if not project: return Response(status=404) return ['projects:{0}:builds'.format(project.id.hex)]
import pbr.version version_info = pbr.version.VersionInfo('zaqar-ui')
Add Apache 2.0 license to source file As per OpenStack licensing guide lines [1]: [H102 H103] Newly contributed Source Code should be licensed under the Apache 2.0 license. [1] http://docs.openstack.org/developer/hacking/#openstack-licensing Change-Id: I714355371a6c57f74924efec19f12d48c7fe2d3f
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('zaqar-ui')
# -*- coding: utf-8 -*- import os from distutils.core import setup from humod import __version__ CONFIG_FILES = [('/etc/ppp/peers', ['conf/humod'])] try: os.stat('/etc/ppp/options') except OSError: CONFIG_FILES = [('/etc/ppp/peers', ['conf/humod']), ('/etc/ppp/options', ['conf/options'])] setup(name='humod', version=__version__, packages=['humod'], description='Access SMS, GSM and 3G features of Huawei and ' 'compatible modems via clean and pragmatic Python API', author='Slawek Ligus, František Malina', author_email='root@ooz.ie', url='https://github.com/oozie/pyhumod', license='BSD', platforms=['Linux'], install_requires=['pyserial'], data_files=CONFIG_FILES, classifiers=['License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Operating System :: POSIX', 'Operating System :: POSIX :: Linux', 'Operating System :: MacOS :: MacOS X', 'Intended Audience :: Developers', 'Topic :: Communications', 'Topic :: Software Development :: Libraries'])
Change the name back to pyhumod With the change of name from pyhumod to humod this would be a separate pypi package and we don't want that.
# -*- coding: utf-8 -*- import os from distutils.core import setup from humod import __version__ CONFIG_FILES = [('/etc/ppp/peers', ['conf/humod'])] try: os.stat('/etc/ppp/options') except OSError: CONFIG_FILES = [('/etc/ppp/peers', ['conf/humod']), ('/etc/ppp/options', ['conf/options'])] setup(name='pyhumod', version=__version__, packages=['humod'], description='Access SMS, GSM and 3G features of Huawei and ' 'compatible modems via clean and pragmatic Python API', author='Slawek Ligus, František Malina', author_email='root@ooz.ie', url='https://github.com/oozie/pyhumod', license='BSD', platforms=['Linux'], install_requires=['pyserial'], data_files=CONFIG_FILES, classifiers=['License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Operating System :: POSIX', 'Operating System :: POSIX :: Linux', 'Operating System :: MacOS :: MacOS X', 'Intended Audience :: Developers', 'Topic :: Communications', 'Topic :: Software Development :: Libraries'])
import setuptools setuptools.setup( python_requires='<3.8', entry_points={ "console_scripts": [ "microscopeimagequality=microscopeimagequality.application:command" ] }, install_requires=[ "click", "matplotlib", "nose", "numpy<1.19.0,>=1.16.0", "Pillow", "scikit-image", "scipy", "six", "tensorflow==2.5.1", "imagecodecs", ], test_requires=["pytest"], name="microscopeimagequality", package_data={ "microscopeimagequality": [ "data/" ] }, classifiers=[ 'License :: OSI Approved :: Apache Software License', 'Intended Audience :: Science/Research', 'Programming Language :: Python :: 2.7', 'Topic :: Scientific/Engineering'], description="Microscope Image Quality Classification", url='https://github.com/google/microscopeimagequality', author='Samuel Yang', author_email='samuely@google.com', license='Apache 2.0', packages=setuptools.find_packages( exclude=[ "tests" ] ), version="0.1.0dev5" )
Bump tensorflow from 2.5.1 to 2.5.2 Bumps [tensorflow](https://github.com/tensorflow/tensorflow) from 2.5.1 to 2.5.2. - [Release notes](https://github.com/tensorflow/tensorflow/releases) - [Changelog](https://github.com/tensorflow/tensorflow/blob/master/RELEASE.md) - [Commits](https://github.com/tensorflow/tensorflow/compare/v2.5.1...v2.5.2) --- updated-dependencies: - dependency-name: tensorflow dependency-type: direct:production ... Signed-off-by: dependabot[bot] <5bdcd3c0d4d24ae3e71b3b452a024c6324c7e4bb@github.com>
import setuptools setuptools.setup( python_requires='<3.8', entry_points={ "console_scripts": [ "microscopeimagequality=microscopeimagequality.application:command" ] }, install_requires=[ "click", "matplotlib", "nose", "numpy<1.19.0,>=1.16.0", "Pillow", "scikit-image", "scipy", "six", "tensorflow==2.5.2", "imagecodecs", ], test_requires=["pytest"], name="microscopeimagequality", package_data={ "microscopeimagequality": [ "data/" ] }, classifiers=[ 'License :: OSI Approved :: Apache Software License', 'Intended Audience :: Science/Research', 'Programming Language :: Python :: 2.7', 'Topic :: Scientific/Engineering'], description="Microscope Image Quality Classification", url='https://github.com/google/microscopeimagequality', author='Samuel Yang', author_email='samuely@google.com', license='Apache 2.0', packages=setuptools.find_packages( exclude=[ "tests" ] ), version="0.1.0dev5" )
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name='scholarly', version='0.4.1', author='Steven A. Cholewiak, Panos Ipeirotis, Victor Silva', author_email='steven@cholewiak.com, panos@stern.nyu.edu, vsilva@ualberta.ca', description='Simple access to Google Scholar authors and citations', long_description=long_description, long_description_content_type="text/markdown", license='Unlicense', url='https://github.com/scholarly-python-package/scholarly', packages=setuptools.find_packages(), keywords=['Google Scholar', 'academics', 'citations'], classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Topic :: Software Development :: Libraries :: Python Modules'], install_requires=['arrow', 'beautifulsoup4', 'bibtexparser', 'requests[security]', 'requests[socks]', 'stem', 'fake_useragent', 'PySocks', 'selenium', 'python-dotenv', 'free-proxy', ], test_suite="test_module.py" )
Fix release version to 0.4.2
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name='scholarly', version='0.4.2', author='Steven A. Cholewiak, Panos Ipeirotis, Victor Silva', author_email='steven@cholewiak.com, panos@stern.nyu.edu, vsilva@ualberta.ca', description='Simple access to Google Scholar authors and citations', long_description=long_description, long_description_content_type="text/markdown", license='Unlicense', url='https://github.com/scholarly-python-package/scholarly', packages=setuptools.find_packages(), keywords=['Google Scholar', 'academics', 'citations'], classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Topic :: Software Development :: Libraries :: Python Modules'], install_requires=['arrow', 'beautifulsoup4', 'bibtexparser', 'requests[security]', 'requests[socks]', 'stem', 'fake_useragent', 'PySocks', 'selenium', 'python-dotenv', 'free-proxy', ], test_suite="test_module.py" )
"""Metadata constants and methods.""" from .classes import Resource from .resources import RESOURCES RESOURCES = {name: Resource.from_id(name) for name in RESOURCES}
Print all resource validation errors
"""Metadata constants and methods.""" import pydantic from . import resources from .classes import Resource RESOURCES = {} errors = [] for name in resources.RESOURCES: try: RESOURCES[name] = Resource.from_id(name) except pydantic.ValidationError as error: errors.append("\n" + f"[{name}] {error}") if errors: raise ValueError("".join(errors))
import subprocess import re import sublime, sublime_plugin class ElmFormatCommand(sublime_plugin.TextCommand): def run(self, edit): command = "elm-format {} --yes".format(self.view.file_name()) p = subprocess.Popen(command, shell=True) class ElmFormatOnSave(sublime_plugin.EventListener): def on_pre_save(self, view): sel = view.sel()[0] region = view.word(sel) scope = view.scope_name(region.b) if scope.find('source.elm') != -1: settings = sublime.load_settings('Elm Language Support.sublime-settings') if settings.get('elm_format_on_save', False): regex = settings.get('elm_format_filename_filter', '') if not (len(regex) > 0 and re.search(regex, view.file_name()) is not None): view.run_command('elm_format')
Add debug logging to elm-format
from __future__ import print_function import subprocess import re import sublime, sublime_plugin class ElmFormatCommand(sublime_plugin.TextCommand): def run(self, edit): command = "elm-format {} --yes".format(self.view.file_name()) p = subprocess.Popen(command, stdout=subprocess.PIPE, sterr=subprocess.PIPE, shell=True) output, errors = p.communicate() settings = sublime.load_settings('Elm Language Support.sublime-settings') if settings.get('debug', False): string_settings = sublime.load_settings('Elm User Strings.sublime-settings') print(string_settings.get('logging.prefix', '') + '(elm-format) ' + output.strip(), 'errors: ' + errors.strip()) class ElmFormatOnSave(sublime_plugin.EventListener): def on_pre_save(self, view): sel = view.sel()[0] region = view.word(sel) scope = view.scope_name(region.b) if scope.find('source.elm') != -1: settings = sublime.load_settings('Elm Language Support.sublime-settings') if settings.get('elm_format_on_save', False): regex = settings.get('elm_format_filename_filter', '') if not (len(regex) > 0 and re.search(regex, view.file_name()) is not None): view.run_command('elm_format')
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from erpnext.utilities.repost_stock import update_bin_qty, get_reserved_qty def execute(): repost_for = frappe.db.sql(""" select distinct item_code, warehouse from ( ( select distinct item_code, warehouse from `tabSales Order Item` where docstatus=1 ) UNION ( select distinct item_code, warehouse from `tabPacked Item` where docstatus=1 and parenttype='Sales Order' ) ) so_item where exists(select name from tabItem where name=so_item.item_code and ifnull(is_stock_item, 0)=1) """) for item_code, warehouse in repost_for: update_bin_qty(item_code, warehouse, { "reserved_qty": get_reserved_qty(item_code, warehouse) })
[fix][patch] Delete Bin for non-stock item
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from erpnext.utilities.repost_stock import update_bin_qty, get_reserved_qty def execute(): repost_for = frappe.db.sql(""" select distinct item_code, warehouse from ( ( select distinct item_code, warehouse from `tabSales Order Item` where docstatus=1 ) UNION ( select distinct item_code, warehouse from `tabPacked Item` where docstatus=1 and parenttype='Sales Order' ) ) so_item where exists(select name from tabItem where name=so_item.item_code and ifnull(is_stock_item, 0)=1) """) for item_code, warehouse in repost_for: update_bin_qty(item_code, warehouse, { "reserved_qty": get_reserved_qty(item_code, warehouse) }) frappe.db.sql("""delete from tabBin where exists( select name from tabItem where name=tabBin.item_code and ifnull(is_stock_item, 0) = 0 ) """)
from __future__ import absolute_import, unicode_literals import platform import sys import textwrap import warnings if not (2, 7) <= sys.version_info < (3,): sys.exit( 'ERROR: Mopidy requires Python 2.7, but found %s.' % platform.python_version()) try: import gobject # noqa except ImportError: print(textwrap.dedent(""" ERROR: The gobject Python package was not found. Mopidy requires GStreamer (and GObject) to work. These are C libraries with a number of dependencies themselves, and cannot be installed with the regular Python tools like pip. Please see http://docs.mopidy.com/en/latest/installation/ for instructions on how to install the required dependencies. """)) raise warnings.filterwarnings('ignore', 'could not open display') __version__ = '0.19.4'
py3: Use print function instead of print statement
from __future__ import absolute_import, print_function, unicode_literals import platform import sys import textwrap import warnings if not (2, 7) <= sys.version_info < (3,): sys.exit( 'ERROR: Mopidy requires Python 2.7, but found %s.' % platform.python_version()) try: import gobject # noqa except ImportError: print(textwrap.dedent(""" ERROR: The gobject Python package was not found. Mopidy requires GStreamer (and GObject) to work. These are C libraries with a number of dependencies themselves, and cannot be installed with the regular Python tools like pip. Please see http://docs.mopidy.com/en/latest/installation/ for instructions on how to install the required dependencies. """)) raise warnings.filterwarnings('ignore', 'could not open display') __version__ = '0.19.4'
from thinglang.lexer.symbols.logic import LexicalEquality from thinglang.parser.tokens import BaseToken class Conditional(BaseToken): ADVANCE = False def __init__(self, slice): super(Conditional, self).__init__(slice) _, self.value = slice def describe(self): return 'if {}'.format(self.value) def evaluate(self, stack): return self.value.evaluate(stack) class UnconditionalElse(BaseToken): pass class ConditionalElse(Conditional): def __init__(self, slice): super(ConditionalElse, self).__init__(slice) _, self.conditional = slice def describe(self): return 'otherwise if {}'.format(self.value)
Update interface signatures for else branches
from thinglang.lexer.symbols.logic import LexicalEquality from thinglang.parser.tokens import BaseToken class Conditional(BaseToken): ADVANCE = False def __init__(self, slice): super(Conditional, self).__init__(slice) _, self.value = slice def describe(self): return 'if {}'.format(self.value) def evaluate(self, stack): return self.value.evaluate(stack) class ElseBranchInterface(object): pass class UnconditionalElse(BaseToken, ElseBranchInterface): pass class ConditionalElse(Conditional, ElseBranchInterface): def __init__(self, slice): super(ConditionalElse, self).__init__(slice) _, self.conditional = slice def describe(self): return 'otherwise if {}'.format(self.value)
#!/usr/bin/env python import argparse import pubrunner import pubrunner.command_line import os import sys if __name__ == '__main__': parser = argparse.ArgumentParser(description='Main access point for OpenMinTeD Docker component') parser.add_argument('--input',required=True,type=str,help='Input directory') parser.add_argument('--output',required=True,type=str,help='Output directory') args = parser.parse_args() assert os.path.isdir(args.input) assert os.path.isdir(args.output) inputFormat = 'uimaxmi' githubRepo = 'https://github.com/jakelever/Ab3P' sys.argv = ['pubrunner'] sys.argv += ['--defaultsettings'] sys.argv += ['--forceresource_dir', args.input] sys.argv += ['--forceresource_format', inputFormat] sys.argv += ['--outputdir', args.output] sys.argv += [githubRepo] pubrunner.command_line.main()
Add unused param:language flag for OpenMinTeD purposes
#!/usr/bin/env python import argparse import pubrunner import pubrunner.command_line import os import sys if __name__ == '__main__': parser = argparse.ArgumentParser(description='Main access point for OpenMinTeD Docker component') parser.add_argument('--input',required=True,type=str,help='Input directory') parser.add_argument('--output',required=True,type=str,help='Output directory') parser.add_argument('--param:language',required=False,type=str,help='Ignored language parameter') args = parser.parse_args() assert os.path.isdir(args.input) assert os.path.isdir(args.output) inputFormat = 'uimaxmi' githubRepo = 'https://github.com/jakelever/Ab3P' sys.argv = ['pubrunner'] sys.argv += ['--defaultsettings'] sys.argv += ['--forceresource_dir', args.input] sys.argv += ['--forceresource_format', inputFormat] sys.argv += ['--outputdir', args.output] sys.argv += [githubRepo] pubrunner.command_line.main()
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """ from .Rule import Rule class IsMethodsRuleExtension(Rule): @classmethod def is_regular(cls): return False @classmethod def is_contextfree(cls): return False @classmethod def is_context(cls): return False @classmethod def is_unrestricted(cls): return False
Add header of Rule.isValid method
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """ from .Rule import Rule class IsMethodsRuleExtension(Rule): @classmethod def is_regular(cls): return False @classmethod def is_contextfree(cls): return False @classmethod def is_context(cls): return False @classmethod def is_unrestricted(cls): return False @classmethod def is_valid(cls): return False
# # linter.py # Linter for SublimeLinter3, a code checking framework for Sublime Text 3 # # Written by Bartosz Kruszczynski # Copyright (c) 2015 Bartosz Kruszczynski # # License: MIT # """This module exports the Reek plugin class.""" from SublimeLinter.lint import RubyLinter class Reek(RubyLinter): """Provides an interface to reek.""" syntax = ( 'better rspec', 'betterruby', 'cucumber steps', 'rspec', 'ruby experimental', 'ruby on rails', 'ruby' ) cmd = 'ruby -S reek' regex = r'^.+?\[(?P<line>\d+).*\]:(?P<message>.+)' tempfile_suffix = 'rb' version_args = '-S reek -v' version_re = r'reek\s(?P<version>\d+\.\d+\.\d+)' version_requirement = '>= 3.5.0' config_file = ('-c', 'config.reek')
Remove the wiki link from the smell message
# # linter.py # Linter for SublimeLinter3, a code checking framework for Sublime Text 3 # # Written by Bartosz Kruszczynski # Copyright (c) 2015 Bartosz Kruszczynski # # License: MIT # """This module exports the Reek plugin class.""" from SublimeLinter.lint import RubyLinter class Reek(RubyLinter): """Provides an interface to reek.""" syntax = ( 'better rspec', 'betterruby', 'cucumber steps', 'rspec', 'ruby experimental', 'ruby on rails', 'ruby' ) cmd = 'ruby -S reek' regex = r'^.+?\[(?P<line>\d+).*\]:(?P<message>.+) \[.*\]' tempfile_suffix = 'rb' version_args = '-S reek -v' version_re = r'reek\s(?P<version>\d+\.\d+\.\d+)' version_requirement = '>= 3.5.0' config_file = ('-c', 'config.reek')
""" Test that Gabble times out the connection process after a while if the server stops responding at various points. Real Gabbles time out after a minute; the test suite's Gabble times out after a couple of seconds. """ from servicetest import assertEquals from gabbletest import exec_test, XmppAuthenticator import constants as cs import ns class NoStreamHeader(XmppAuthenticator): def __init__(self): XmppAuthenticator.__init__(self, 'test', 'pass') def streamStarted(self, root=None): return class NoAuthInfoResult(XmppAuthenticator): def __init__(self): XmppAuthenticator.__init__(self, 'test', 'pass') def auth(self, auth): return class NoAuthResult(XmppAuthenticator): def __init__(self): XmppAuthenticator.__init__(self, 'test', 'pass') def bindIq(self, iq): return def test(q, bus, conn, stream): conn.Connect() q.expect('dbus-signal', signal='StatusChanged', args=[cs.CONN_STATUS_CONNECTING, cs.CSR_REQUESTED]) e = q.expect('dbus-signal', signal='StatusChanged') status, reason = e.args assertEquals(cs.CONN_STATUS_DISCONNECTED, status) assertEquals(cs.CSR_NETWORK_ERROR, reason) if __name__ == '__main__': exec_test(test, authenticator=NoStreamHeader()) exec_test(test, authenticator=NoAuthInfoResult()) exec_test(test, authenticator=NoAuthResult())
Use 'pass', not 'return', for empty Python methods
""" Test that Gabble times out the connection process after a while if the server stops responding at various points. Real Gabbles time out after a minute; the test suite's Gabble times out after a couple of seconds. """ from servicetest import assertEquals from gabbletest import exec_test, XmppAuthenticator import constants as cs import ns class NoStreamHeader(XmppAuthenticator): def __init__(self): XmppAuthenticator.__init__(self, 'test', 'pass') def streamStarted(self, root=None): pass class NoAuthInfoResult(XmppAuthenticator): def __init__(self): XmppAuthenticator.__init__(self, 'test', 'pass') def auth(self, auth): pass class NoAuthResult(XmppAuthenticator): def __init__(self): XmppAuthenticator.__init__(self, 'test', 'pass') def bindIq(self, iq): pass def test(q, bus, conn, stream): conn.Connect() q.expect('dbus-signal', signal='StatusChanged', args=[cs.CONN_STATUS_CONNECTING, cs.CSR_REQUESTED]) e = q.expect('dbus-signal', signal='StatusChanged') status, reason = e.args assertEquals(cs.CONN_STATUS_DISCONNECTED, status) assertEquals(cs.CSR_NETWORK_ERROR, reason) if __name__ == '__main__': exec_test(test, authenticator=NoStreamHeader()) exec_test(test, authenticator=NoAuthInfoResult()) exec_test(test, authenticator=NoAuthResult())
import matplotlib.pyplot as plt import scipy.signal import numpy as np import time from signal_functions import * match_filter = make_match_filter() signal_in = import_wav("rec.wav") # plot_waveform(match_filter, downsample=1, title="Match Filter", ax_labels=["Samples", "Magnitude"]) # plot_signal(signal_in, downsample=1) envelope, convolution = get_envelope(signal_in[:600000]) # plot_waveform(convolution[:350000], downsample=10, title="5kHz Signal after Convolution", ax_labels=["Samples", "Magnitude"]) # plot_waveform(envelope[:35000], downsample=1, title="5kHz Signal after Convolution", ax_labels=["Samples", "Magnitude"]) # plot_signal(convolution) interrupt_t, thresholds = find_intterupts(envelope) data, packet = extract_data(interrupt_t) plot_envelope_interrupts(envelope, interrupt_t, thresholds) print("Packet:", packet, "Bits:", len(packet) + 1) print("Data:", data) while True: time.sleep(10)
Print char repersentation of data
import matplotlib.pyplot as plt import scipy.signal import numpy as np import time from signal_functions import * match_filter = make_match_filter() signal_in = import_wav("rec.wav") # plot_waveform(match_filter, downsample=1, title="Match Filter", ax_labels=["Samples", "Magnitude"]) # plot_signal(signal_in, downsample=1) envelope, convolution = get_envelope(signal_in[:600000]) # plot_waveform(convolution[:350000], downsample=10, title="5kHz Signal after Convolution", ax_labels=["Samples", "Magnitude"]) # plot_waveform(envelope[:35000], downsample=1, title="5kHz Signal after Convolution", ax_labels=["Samples", "Magnitude"]) # plot_signal(convolution) interrupt_t, thresholds = find_intterupts(envelope) data, packet = extract_data(interrupt_t) plot_envelope_interrupts(envelope, interrupt_t, thresholds) print("Packet:", packet, "Bits:", len(packet) + 1) print("Data:", chr(data)) while True: time.sleep(10)
from __future__ import print_function import json import sys from itertools import groupby def groupsortby(data, key): """Sort and group by the same key.""" return groupby(sorted(data, key=key), key) def dict_merge(set1, set2): """Joins two dictionaries.""" return dict(set1.items() + set2.items()) def to_h(num, suffix='B'): """Converts a byte value in human readable form.""" if num is None: # Show None when data is missing return "None" for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix) def print_json(data): """Converts `data` into json and prints it to stdout. If stdout is a tty it performs a pretty print. """ if sys.stdout.isatty(): print(json.dumps(data, indent=4, separators=(',', ': '))) else: print(json.dumps(data))
Add brokers information to the output of kafka-info
from __future__ import print_function import json import sys from itertools import groupby def groupsortby(data, key): """Sort and group by the same key.""" return groupby(sorted(data, key=key), key) def dict_merge(set1, set2): """Joins two dictionaries.""" return dict(set1.items() + set2.items()) def to_h(num, suffix='B'): """Converts a byte value in human readable form.""" if num is None: # Show None when data is missing return "None" for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix) def format_to_json(data): """Converts `data` into json If stdout is a tty it performs a pretty print. """ if sys.stdout.isatty(): return json.dumps(data, indent=4, separators=(',', ': ')) else: return json.dumps(data) def print_json(data): """Converts `data` into json and prints it to stdout.""" print(format_to_json(data))
from copy import deepcopy from simpleflake import simpleflake from lab_assistant import conf, utils __all__ = [ 'get_storage', 'store', 'retrieve', 'retrieve_all', 'clear', ] def get_storage(path=None, name='Experiment', **opts): if not path: path = conf.storage['path'] _opts = deepcopy(conf.storage.get('options', {})) _opts.update(opts) opts = _opts if path in get_storage._cache: return get_storage._cache[path] Storage = utils.import_path(path) get_storage._cache[path] = Storage(name, **opts) return get_storage._cache[path] get_storage._cache = {} def store(result, storage=None): storage = storage or get_storage(name=result.experiment.name) key = simpleflake() storage.set(key, result) return key def retrieve(key, storage=None): storage = storage or get_storage() return storage.get(key) def retrieve_all(storage=None): return (storage or get_storage()).list() def remove(key, storage=None): (storage or get_storage()).remove(key) def clear(storage=None): return (storage or get_storage()).clear()
Fix get_storage cache to hold separate entries for each experiment key
from copy import deepcopy from collections import defaultdict from simpleflake import simpleflake from lab_assistant import conf, utils __all__ = [ 'get_storage', 'store', 'retrieve', 'retrieve_all', 'clear', ] def get_storage(path=None, name='Experiment', **opts): if not path: path = conf.storage['path'] _opts = deepcopy(conf.storage.get('options', {})) _opts.update(opts) opts = _opts if path in get_storage._cache: if name in get_storage._cache[path]: return get_storage._cache[path][name] Storage = utils.import_path(path) get_storage._cache[path].update({ name: Storage(name, **opts) }) return get_storage._cache[path][name] get_storage._cache = defaultdict(dict) def store(result, storage=None): storage = storage or get_storage(name=result.experiment.name) key = simpleflake() storage.set(key, result) return key def retrieve(key, storage=None): storage = storage or get_storage() return storage.get(key) def retrieve_all(storage=None): return (storage or get_storage()).list() def remove(key, storage=None): (storage or get_storage()).remove(key) def clear(storage=None): return (storage or get_storage()).clear()
import os import shutil def copy(orig, dest, useShutil=False): if os.path.isdir(orig): if useShutil: shutil.copytree(orig, dest, symlinks=True) else: os.popen("cp -rf {} {}".format(orig, dest)) else: if useShutil: shutil.copy(orig, dest) else: os.popen("cp {} {}".format(orig, dest))
Copy now waits for files to be copies over
import os import shutil def copy(orig, dest, useShutil=False): if os.path.isdir(orig): if useShutil: shutil.copytree(orig, dest, symlinks=True) else: os.system("cp -rf {} {}".format(orig, dest)) else: if useShutil: shutil.copy(orig, dest) else: os.system("cp {} {}".format(orig, dest))
#!/usr/bin/python """A test provider for the stress testing.""" # change registry this often [msec] registryChangeTimeout = 2017 from ContextKit.flexiprovider import * import gobject import time import os def update(): t = time.time() dt = int(1000*(t - round(t))) gobject.timeout_add(1000 - dt, update) v = int(round(t)) fp.set('test.int', v) fp.set('test.int2', v) print t return False pcnt = 0 def chgRegistry(): global pcnt pcnt += 1 if pcnt % 2: print "1 provider" os.system('cp 1provider.cdb cache.cdb') else: print "2 providers" os.system('cp 2providers.cdb cache.cdb') return True gobject.timeout_add(1000, update) # uncoment this to see the "Bus error" XXX gobject.timeout_add(registryChangeTimeout, chgRegistry) fp = Flexiprovider([INT('test.int'), INT('test.int2')], 'my.test.provider', 'session') fp.run()
Fix stress test to avoid cdb bus error bug ref 125505 Signed-off-by: Marja Hassinen <97dfd0cfe579e2c003b71e95ee20ee035e309879@nokia.com>
#!/usr/bin/python """A test provider for the stress testing.""" # change registry this often [msec] registryChangeTimeout = 2017 from ContextKit.flexiprovider import * import gobject import time import os def update(): t = time.time() dt = int(1000*(t - round(t))) gobject.timeout_add(1000 - dt, update) v = int(round(t)) fp.set('test.int', v) fp.set('test.int2', v) print t return False pcnt = 0 def chgRegistry(): global pcnt pcnt += 1 if pcnt % 2: print "1 provider" os.system('cp 1provider.cdb tmp.cdb; mv tmp.cdb cache.cdb') else: print "2 providers" os.system('cp 2providers.cdb tmp.cdb; mv tmp.cdb cache.cdb') return True gobject.timeout_add(1000, update) # uncoment this to see the "Bus error" XXX gobject.timeout_add(registryChangeTimeout, chgRegistry) fp = Flexiprovider([INT('test.int'), INT('test.int2')], 'my.test.provider', 'session') fp.run()
import io import getopt import sys import pickle def usage(): print("usage: " + sys.argv[0] + " -d dictionary-file -p postings-file -q file-of-queries -o output-file-of-results") if __name__ == '__main__': dict_file = postings_file = query_file = output_file = None try: opts, args = getopt.getopt(sys.argv[1:], 'd:p:q:o:') except getopt.GetoptError as err: usage() sys.exit(2) for o, a in opts: if o == '-d': dict_file = a elif o == '-p': postings_file = a elif o == '-q': query_file = a elif o == '-o': output_file = a else: assert False, "unhandled option" if dict_file == None or postings_file == None or query_file == None or output_file == None: usage() sys.exit(2) with io.open(dict_file, 'rb') as f: dictionary = pickle.load(f) with io.open(postings_file, 'rb') as f: postings = pickle.load(f) skip_pointers = pickle.load(f)
Add todo for seeking and reading
import io import getopt import sys import pickle def usage(): print("usage: " + sys.argv[0] + " -d dictionary-file -p postings-file -q file-of-queries -o output-file-of-results") if __name__ == '__main__': dict_file = postings_file = query_file = output_file = None try: opts, args = getopt.getopt(sys.argv[1:], 'd:p:q:o:') except getopt.GetoptError as err: usage() sys.exit(2) for o, a in opts: if o == '-d': dict_file = a elif o == '-p': postings_file = a elif o == '-q': query_file = a elif o == '-o': output_file = a else: assert False, "unhandled option" if dict_file == None or postings_file == None or query_file == None or output_file == None: usage() sys.exit(2) with io.open(dict_file, 'rb') as f: dictionary = pickle.load(f) # Implement seeking and reading don't read entirely with io.open(postings_file, 'rb') as f: postings = pickle.load(f) skip_pointers = pickle.load(f)
from flask import Blueprint, render_template, flash from flask_login import login_required, current_user mod = Blueprint('services', __name__) @mod.route('/services') @login_required def home(): current_team = current_user.team if not current_user.is_blue_team: flash('Only blue teams can access services', 'error') return render_template('overview.html') return render_template('services.html', team=current_team) @mod.route('/service/<id>') @login_required def service(id): return render_template('service.html', service=id)
Add unauthorize to service template Signed-off-by: Brandon Myers <9cda508be11a1ae7ceef912b85c196946f0ec5f3@mozilla.com>
from flask import Blueprint, render_template, url_for, redirect from flask_login import login_required, current_user from scoring_engine.models.service import Service mod = Blueprint('services', __name__) @mod.route('/services') @login_required def home(): current_team = current_user.team if not current_user.is_blue_team: return redirect(url_for('auth.unauthorized')) return render_template('services.html', team=current_team) @mod.route('/service/<id>') @login_required def service(id): service = Service.query.get(id) if service is None or not current_user.team == service.team: return redirect(url_for('auth.unauthorized')) return render_template('service.html', service=service)
# Released under The MIT License (MIT) # http://opensource.org/licenses/MIT # Copyright (c) 2013-2016 SCoT Development Team """Use mne-python routines as backend.""" from __future__ import absolute_import import scipy as sp from . import datatools from . import backend from . import backend_builtin as builtin def generate(): from mne.preprocessing.infomax_ import infomax def wrapper_infomax(data, random_state=None): """Call Infomax for ICA calculation.""" u = infomax(datatools.cat_trials(data).T, extended=True, random_state=random_state).T m = sp.linalg.pinv(u) return m, u def wrapper_csp(x, cl, reducedim): """Call MNE CSP algorithm.""" from mne.decoding import CSP csp = CSP(n_components=reducedim, cov_est="epoch") csp.fit(x, cl) c, d = csp.filters_.T[:, :reducedim], csp.patterns_[:reducedim, :] y = datatools.dot_special(c.T, x) return c, d, y backend = builtin.generate() backend.update({'ica': wrapper_infomax, 'csp': wrapper_csp}) return backend backend.register('mne', generate)
Use regularized covariance in CSP by default
# Released under The MIT License (MIT) # http://opensource.org/licenses/MIT # Copyright (c) 2013-2016 SCoT Development Team """Use mne-python routines as backend.""" from __future__ import absolute_import import scipy as sp from . import datatools from . import backend from . import backend_builtin as builtin def generate(): from mne.preprocessing.infomax_ import infomax def wrapper_infomax(data, random_state=None): """Call Infomax for ICA calculation.""" u = infomax(datatools.cat_trials(data).T, extended=True, random_state=random_state).T m = sp.linalg.pinv(u) return m, u def wrapper_csp(x, cl, reducedim): """Call MNE CSP algorithm.""" from mne.decoding import CSP csp = CSP(n_components=reducedim, cov_est="epoch", reg="ledoit_wolf") csp.fit(x, cl) c, d = csp.filters_.T[:, :reducedim], csp.patterns_[:reducedim, :] y = datatools.dot_special(c.T, x) return c, d, y backend = builtin.generate() backend.update({'ica': wrapper_infomax, 'csp': wrapper_csp}) return backend backend.register('mne', generate)
def selection_sort(L): """ :param L: unsorted list :return: this is a method, there is no return function. The method sorts a list using selection sort algorithm >>> L = [2, 7, 5, 3] >>> selection_sort(L) >>> L [2, 3, 5, 7] """ end = len(L) # Find the index of the smallest element in L[i:] and swap that item # with the item at index i for i in range(end): index_of_smallest = get_index_of_smallest(L, i) L[index_of_smallest], L[i] = L[i], L[index_of_smallest] def get_index_of_smallest(L, i): """ (list, int) -> int :param L: list we want to analyse :param i: index from where we want to start :return: index of smallest object in the list """ # The index of the smallest item so far index_of_smallest = i end = len(L) for j in range(i + 1, end): if L[j] < L[index_of_smallest]: index_of_smallest = j return index_of_smallest if __name__ == '__main__': import doctest doctest.testmod()
Improve selection sort algorithm's documentation
def selection_sort(L): """ (list) -> NoneType Sort list from smallest to largest using selection sort algorithm :param L: unsorted list >>> L = [2, 7, 5, 3] >>> selection_sort(L) >>> L [2, 3, 5, 7] """ end = len(L) # Find the index of the smallest element in L[i:] and swap that item # with the item at index i for i in range(end): index_of_smallest = get_index_of_smallest(L, i) L[index_of_smallest], L[i] = L[i], L[index_of_smallest] def get_index_of_smallest(L, i): """ (list, int) -> int :param L: list we want to analyse :param i: index from where we want to start :return: index of smallest object in the list """ # The index of the smallest item so far index_of_smallest = i end = len(L) for j in range(i + 1, end): if L[j] < L[index_of_smallest]: index_of_smallest = j return index_of_smallest if __name__ == '__main__': import doctest doctest.testmod()
# -*- coding: utf-8 -*- import unittest from pythainlp.tools import ( get_full_data_path, get_pythainlp_data_path, get_pythainlp_path, ) class TestToolsPackage(unittest.TestCase): def test_path(self): data_filename = "ttc_freq.txt" self.assertTrue( get_full_data_path(data_filename).endswith(data_filename) ) self.assertTrue(isinstance(get_pythainlp_data_path(), str)) self.assertTrue(isinstance(get_pythainlp_path, str))
Fix test case for tools.path
# -*- coding: utf-8 -*- import unittest from pythainlp.tools import ( get_full_data_path, get_pythainlp_data_path, get_pythainlp_path, ) class TestToolsPackage(unittest.TestCase): def test_path(self): data_filename = "ttc_freq.txt" self.assertTrue( get_full_data_path(data_filename).endswith(data_filename) ) self.assertTrue(isinstance(get_pythainlp_data_path(), str)) self.assertTrue(isinstance(get_pythainlp_path(), str))
import os import traceback from django.utils import timezone from django_git.management.commands.git_pull_utils.git_folder_enum import enum_git_repo from django_git.management.commands.git_pull_utils.git_synchronizer import GitSynchronizer from djangoautoconf.cmd_handler_base.msg_process_cmd_base import DjangoCmdBase from iconizer.gui_client.notification_service_client import NotificationServiceClient class GitPullOnce(DjangoCmdBase): git_tag_name = "git" def msg_loop(self): for repo in enum_git_repo(): if os.path.exists(repo.full_path): p = GitSynchronizer(repo.full_path, NotificationServiceClient().notify) success = False try: p.pull_all_branches() print "pull and push done" success = True except: traceback.print_exc() print "Pull error for: %s" % repo.full_path repo.last_checked = timezone.now() repo.is_last_pull_success = success repo.save() Command = GitPullOnce
Print message for pull success.
import os import traceback from django.utils import timezone from django_git.management.commands.git_pull_utils.git_folder_enum import enum_git_repo from django_git.management.commands.git_pull_utils.git_synchronizer import GitSynchronizer from djangoautoconf.cmd_handler_base.msg_process_cmd_base import DjangoCmdBase from iconizer.gui_client.notification_service_client import NotificationServiceClient class GitPullOnce(DjangoCmdBase): git_tag_name = "git" def msg_loop(self): for repo in enum_git_repo(): if os.path.exists(repo.full_path): p = GitSynchronizer(repo.full_path, NotificationServiceClient().notify) success = False try: p.pull_all_branches() print "pull and push done", p.sync_msg success = True except: traceback.print_exc() print "Pull error for: %s" % repo.full_path repo.last_checked = timezone.now() repo.is_last_pull_success = success repo.save() Command = GitPullOnce
from pyspeech import best_speech_result import unittest from pyaudio import PyAudio import Queue class PyspeechTest(unittest.TestCase): def setUp(self): self.p = PyAudio() def test_google_stt(self): good_morning = open('example_wavs/good_morning.wav', 'rb') output = best_speech_result(self.p, good_morning.read(), {}, "google") self.assertEqual(output, "good morning") hello_world = open('example_wavs/hello_world.wav', 'rb') output = best_speech_result(self.p, hello_world.read(), {}, "google") self.assertEqual(output, "hello world") if __name__ == "__main__": unittest.main()
Add tests for Wit STT
from pyspeech import best_speech_result import unittest from pyaudio import PyAudio import Queue class PyspeechTest(unittest.TestCase): def setUp(self): self.p = PyAudio() def test_google_stt(self): good_morning = open('example_wavs/good_morning.wav', 'rb') output = best_speech_result(self.p, good_morning.read(), {}, "google") self.assertEqual(output, "good morning") hello_world = open('example_wavs/hello_world.wav', 'rb') output = best_speech_result(self.p, hello_world.read(), {}, "google") self.assertEqual(output, "hello world") # This will fail without a valid wit_token in profile.yml def test_wit_stt(self): import yaml profile = yaml.load(open("profile.yml").read()) good_morning = open('example_wavs/good_morning.wav', 'rb') output = best_speech_result(self.p, good_morning.read(), profile, "wit") self.assertEqual(output, "good morning") hello_world = open('example_wavs/hello_world.wav', 'rb') output = best_speech_result(self.p, hello_world.read(), profile, "wit") self.assertEqual(output, "hello world") if __name__ == "__main__": unittest.main()
#!/usr/bin/env python -u from __future__ import absolute_import, division, print_function, unicode_literals import csv, os, requests url = 'https://api.balancedpayments.com/debits/{}/refunds' username = os.environ['BALANCED_API_USER'] inp = csv.reader(open('refunds.csv')) inp.next() # headers out = csv.reader(open('refunds.completed.csv', 'w+')) out.writerow('ts', 'id', 'amount', 'code', 'body') for ts, id, amount in inp: response = requests.post( url.format(id) , data={'amount': amount} , auth=(username, '') ) out.writerow((ts,id,amount,response.status_code,response.content))
Clean up script to make refunds Tested against httpbin.org
#!/usr/bin/env python -u from __future__ import absolute_import, division, print_function, unicode_literals import csv, os, requests url = 'https://api.balancedpayments.com/debits/{}/refunds' username = os.environ['BALANCED_API_USER'] inp = csv.reader(open('refunds.csv')) inp.next() # headers out = csv.writer(open('refunds.completed.csv', 'w+')) out.writerow(('ts', 'id', 'amount', 'code', 'body')) for ts, id, amount in inp: response = requests.post( url.format(id) , data={'amount': amount} , auth=(username, '') ) out.writerow((ts,id,amount,response.status_code,response.content))
from models import Base, engine, MetricType from sqlalchemy.orm import Session import uuid import os # Create all tables in the engine. This is equivalent to "Create Table" # statements in raw SQL. Base.metadata.create_all(engine) session = Session(engine) def initialize_metric_types(): metric_types = [None] * 2 metric_types[0] = MetricType() metric_types[0].id = str(uuid.uuid4()) metric_types[0].name = 'Temperature' metric_types[0].min_value = -50.0 metric_types[0].max_value = 50.0 metric_types[0].unit = 'C' metric_types[1] = MetricType() metric_types[1].id = str(uuid.uuid4()) metric_types[1].name = 'Humidity' metric_types[1].min_value = 0.0 metric_types[1].max_value = 100.0 metric_types[1].unit = '%' session.add_all(metric_types) session.commit() try: os.remove('station_db.db') except Exception as ex: pass initialize_metric_types()
Fix for lack of file. Signed-off-by: Maciej Szankin <33c1fdf481c8e628d4c6db7ea8dc77f49f2fa5d7@szankin.pl>
from models import Base, engine, MetricType from sqlalchemy.orm import Session import uuid import os try: os.remove('station_db.db') except Exception as ex: pass # Create all tables in the engine. This is equivalent to "Create Table" # statements in raw SQL. Base.metadata.create_all(engine) session = Session(engine) def initialize_metric_types(): metric_types = [None] * 2 metric_types[0] = MetricType() metric_types[0].id = str(uuid.uuid4()) metric_types[0].name = 'Temperature' metric_types[0].min_value = -50.0 metric_types[0].max_value = 50.0 metric_types[0].unit = 'C' metric_types[1] = MetricType() metric_types[1].id = str(uuid.uuid4()) metric_types[1].name = 'Humidity' metric_types[1].min_value = 0.0 metric_types[1].max_value = 100.0 metric_types[1].unit = '%' session.add_all(metric_types) session.commit() initialize_metric_types()
from os.path import join from os import makedirs from django.conf import settings def make_file_id(file_data): return str(abs(hash(file_data))) def get_file_location(file_name, file_id): path = join(settings.MEDIA_ROOT, 'tmp') path = join(path, str(file_id)) try: makedirs(path) except: pass return join(path, file_name) def get_file_url(file_name, file_id): path = join(settings.MEDIA_URL, 'tmp') path = join(path, str(file_id)) return join(path, file_name)
Improve the file id hash
from os.path import join from os import makedirs from django.conf import settings from datetime import datetime import time def make_file_id(file_data): timestamp = datetime.now() timestamp = time.mktime(timestamp.timetuple()) * 1e3 + timestamp.microsecond / 1e3 timestamp = '{}'.format(timestamp).encode() return str(abs(hash(file_data + timestamp))) def get_file_location(file_name, file_id): path = join(settings.MEDIA_ROOT, 'tmp') path = join(path, str(file_id)) try: makedirs(path) except: pass return join(path, file_name) def get_file_url(file_name, file_id): path = join(settings.MEDIA_URL, 'tmp') path = join(path, str(file_id)) return join(path, file_name)
from django.db import models from django.http import HttpResponse from django.conf.urls import url from wagtail.contrib.wagtailroutablepage.models import RoutablePage def routable_page_external_view(request, arg): return HttpResponse("EXTERNAL VIEW: " + arg) class RoutablePageTest(RoutablePage): subpage_urls = ( url(r'^$', 'main', name='main'), url(r'^archive/year/(\d+)/$', 'archive_by_year', name='archive_by_year'), url(r'^archive/author/(?P<author_slug>.+)/$', 'archive_by_author', name='archive_by_author'), url(r'^external/(.+)/$', routable_page_external_view, name='external_view') ) def archive_by_year(self, request, year): return HttpResponse("ARCHIVE BY YEAR: " + str(year)) def archive_by_author(self, request, author_slug): return HttpResponse("ARCHIVE BY AUTHOR: " + author_slug) def main(self, request): return HttpResponse("MAIN VIEW")
Make subpage_urls a property on RoutablePageTest
from django.db import models from django.http import HttpResponse from django.conf.urls import url from wagtail.contrib.wagtailroutablepage.models import RoutablePage def routable_page_external_view(request, arg): return HttpResponse("EXTERNAL VIEW: " + arg) class RoutablePageTest(RoutablePage): @property def subpage_urls(self): return ( url(r'^$', self.main, name='main'), url(r'^archive/year/(\d+)/$', self.archive_by_year, name='archive_by_year'), url(r'^archive/author/(?P<author_slug>.+)/$', self.archive_by_author, name='archive_by_author'), url(r'^external/(.+)/$', routable_page_external_view, name='external_view') ) def archive_by_year(self, request, year): return HttpResponse("ARCHIVE BY YEAR: " + str(year)) def archive_by_author(self, request, author_slug): return HttpResponse("ARCHIVE BY AUTHOR: " + author_slug) def main(self, request): return HttpResponse("MAIN VIEW")
#! /usr/bin/python import jsonschema import json import sys import os import glob vm_schema = json.loads(open("vm.schema.json").read()); def validate_vm_spec(filename): # Load and parse as JSON try: vm_spec = json.loads(open(filename).read()) except: raise Exception("JSON load / parse Error for " + filename) # Validate JSON according to schema try: jsonschema.validate(vm_spec, vm_schema) except Exception as err: raise Exception("JSON schema validation failed: " + err.message) def has_required_stuff(path): # Certain files are mandatory required_files = [ "Makefile", "test.py", "README.md", "*.cpp" ] for file in required_files: if not glob.glob(file): raise Exception("missing " + file) # JSON-files must conform to VM-schema for json in glob.glob("*.json"): validate_vm_spec(json) path = sys.argv[1] if len(sys.argv) > 1 else "." os.chdir(path) try: has_required_stuff(path) print "\tPASS: ",os.getcwd() except Exception as err: print "\tFAIL: unmet requirements in " + path, ": " , err.message
Test validator can now be used as a module
#! /usr/bin/python import jsonschema import json import sys import os import glob vm_schema = None jsons = [] valid_vms = [] def load_schema(filename): global vm_schema vm_schema = json.loads(open(filename).read()); def validate_vm_spec(filename): global valid_vms vm_spec = None # Load and parse as JSON try: vm_spec = json.loads(open(filename).read()) except: raise Exception("JSON load / parse Error for " + filename) # Validate JSON according to schema try: jsonschema.validate(vm_spec, vm_schema) except Exception as err: raise Exception("JSON schema validation failed: " + err.message) valid_vms.append(vm_spec) def has_required_stuff(path): global jsons # Certain files are mandatory required_files = [ "Makefile", "test.py", "README.md", "*.cpp" ] for file in required_files: if not glob.glob(file): raise Exception("missing " + file) # JSON-files must conform to VM-schema jsons = glob.glob("*.json") for json in jsons: validate_vm_spec(json) if __name__ == "__main__": path = sys.argv[1] if len(sys.argv) > 1 else "." load_schema("vm.schema.json") os.chdir(path) try: has_required_stuff(path) print "<validate_test> \tPASS: ",os.getcwd() except Exception as err: print "<validate_test> \tFAIL: unmet requirements in " + path, ": " , err.message
# coding=utf-8 import os.path as path import unittest from devpi_builder import wheeler class WheelTest(unittest.TestCase): def test_build(self): with wheeler.Builder() as builder: wheel_file = builder('progressbar', '2.2') self.assertRegexpMatches(wheel_file, '\.whl$') self.assert_(path.exists(wheel_file)) def test_cleans_up_created_files(self): with wheeler.Builder() as builder: wheel_file = builder('progressbar', '2.2') self.assertFalse(path.exists(wheel_file)) def test_provides_file_that_is_already_a_wheel(self): with wheeler.Builder() as builder: wheel_file = builder('wheel', '0.24') self.assert_(path.exists(wheel_file)) def test_throws_custom_on_build_failure(self): with wheeler.Builder() as builder: with self.assertRaises(wheeler.BuildError): builder('package_that_hopefully_does_not_exist', '99.999') if __name__ == '__main__': unittest.main()
Cover the line that handles the pip<=1.5.2 error case.
# coding=utf-8 import os.path as path import unittest from devpi_builder import wheeler class WheelTest(unittest.TestCase): def test_build(self): with wheeler.Builder() as builder: wheel_file = builder('progressbar', '2.2') self.assertRegexpMatches(wheel_file, '\.whl$') self.assert_(path.exists(wheel_file)) def test_cleans_up_created_files(self): with wheeler.Builder() as builder: wheel_file = builder('progressbar', '2.2') self.assertFalse(path.exists(wheel_file)) def test_provides_file_that_is_already_a_wheel(self): with wheeler.Builder() as builder: wheel_file = builder('wheel', '0.24') self.assert_(path.exists(wheel_file)) def test_throws_custom_on_build_failure(self): with wheeler.Builder() as builder: with self.assertRaises(wheeler.BuildError): builder('package_that_hopefully_does_not_exist', '99.999') def test_look_for_non_existing_wheel(self): with wheeler.Builder() as builder: with self.assertRaises(wheeler.BuildError): builder('nothing_can_be_found', '1.1') if __name__ == '__main__': unittest.main()
import pytest from matplotlib import pyplot as plt from poliastro.plotting import OrbitPlotter2D, OrbitPlotter3D from poliastro.plotting.misc import plot_solar_system @pytest.mark.parametrize("outer,expected", [(True, 8), (False, 4)]) def test_plot_solar_system_has_expected_number_of_orbits(outer, expected): assert len(plot_solar_system(outer).trajectories) == expected @pytest.mark.parametrize( "use_3d, plotter_class", [(True, OrbitPlotter3D), (False, OrbitPlotter2D)] ) def test_plot_solar_system_uses_expected_orbitplotter(use_3d, plotter_class): assert isinstance(plot_solar_system(use_3d=use_3d, interactive=True), plotter_class) @pytest.mark.mpl_image_compare def test_plot_inner_solar_system_static(earth_perihelion): plot_solar_system(outer=False, epoch=earth_perihelion) return plt.gcf() @pytest.mark.mpl_image_compare def test_plot_outer_solar_system_static(earth_perihelion): plot_solar_system(outer=True, epoch=earth_perihelion) return plt.gcf()
Check for error if use_3D and non-interactive
import pytest from matplotlib import pyplot as plt from poliastro.plotting import OrbitPlotter2D, OrbitPlotter3D from poliastro.plotting.misc import plot_solar_system @pytest.mark.parametrize("outer,expected", [(True, 8), (False, 4)]) def test_plot_solar_system_has_expected_number_of_orbits(outer, expected): assert len(plot_solar_system(outer).trajectories) == expected @pytest.mark.parametrize( "use_3d, plotter_class", [(True, OrbitPlotter3D), (False, OrbitPlotter2D)] ) def test_plot_solar_system_uses_expected_orbitplotter(use_3d, plotter_class): assert isinstance(plot_solar_system(use_3d=use_3d, interactive=True), plotter_class) if use_3d: with pytest.raises(ValueError) as excinfo: plot_solar_system(use_3d=use_3d) assert ("The static plotter does not support 3D" in excinfo.exconly()) @pytest.mark.mpl_image_compare def test_plot_inner_solar_system_static(earth_perihelion): plot_solar_system(outer=False, epoch=earth_perihelion) return plt.gcf() @pytest.mark.mpl_image_compare def test_plot_outer_solar_system_static(earth_perihelion): plot_solar_system(outer=True, epoch=earth_perihelion) return plt.gcf()
from dmoj.executors.base_executor import CompiledExecutor from dmoj.executors.mixins import ScriptDirectoryMixin class Executor(ScriptDirectoryMixin, CompiledExecutor): ext = '.rkt' name = 'RKT' fs = ['/etc/nsswitch.conf$', '/etc/passwd$'] command = 'racket' syscalls = ['epoll_create', 'epoll_wait', 'poll', 'select'] address_grace = 131072 test_program = '''\ #lang racket (displayln (read-line)) ''' def get_compile_args(self): return [self.runtime_dict['raco'], 'make', self._code] def get_cmdline(self): return [self.get_command(), self._code] def get_executable(self): return self.get_command() @classmethod def initialize(cls, sandbox=True): if 'raco' not in cls.runtime_dict: return False return super(Executor, cls).initialize(sandbox) @classmethod def get_versionable_commands(cls): return [('racket', cls.get_command())] @classmethod def get_find_first_mapping(cls): return { 'racket': ['racket'], 'raco': ['raco'] }
Fix Racket on FreeBSD after openat changes @quantum5 this feels iffy, but I think it's (marginally) better than allowing all .racket folders to be read
from dmoj.executors.base_executor import CompiledExecutor from dmoj.executors.mixins import ScriptDirectoryMixin import os class Executor(ScriptDirectoryMixin, CompiledExecutor): ext = '.rkt' name = 'RKT' fs = ['/etc/nsswitch.conf$', '/etc/passwd$', os.path.expanduser('~/\.racket/.*?')] command = 'racket' syscalls = ['epoll_create', 'epoll_wait', 'poll', 'select'] address_grace = 131072 test_program = '''\ #lang racket (displayln (read-line)) ''' def get_compile_args(self): return [self.runtime_dict['raco'], 'make', self._code] def get_cmdline(self): return [self.get_command(), self._code] def get_executable(self): return self.get_command() @classmethod def initialize(cls, sandbox=True): if 'raco' not in cls.runtime_dict: return False return super(Executor, cls).initialize(sandbox) @classmethod def get_versionable_commands(cls): return [('racket', cls.get_command())] @classmethod def get_find_first_mapping(cls): return { 'racket': ['racket'], 'raco': ['raco'] }
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Single source of truth for dsub's version. This must remain small and dependency-free so that any dsub module may import it without creating circular dependencies. Note that this module is parsed as a text file by setup.py and changes to the format of this file could break setup.py. The version should follow formatting requirements specified in PEP-440. - https://www.python.org/dev/peps/pep-0440 A typical release sequence will be versioned as: 0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ... """ DSUB_VERSION = '0.3.1.dev0'
Update dsub version to 0.3.1 PiperOrigin-RevId: 243828346
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Single source of truth for dsub's version. This must remain small and dependency-free so that any dsub module may import it without creating circular dependencies. Note that this module is parsed as a text file by setup.py and changes to the format of this file could break setup.py. The version should follow formatting requirements specified in PEP-440. - https://www.python.org/dev/peps/pep-0440 A typical release sequence will be versioned as: 0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ... """ DSUB_VERSION = '0.3.1'
from __future__ import absolute_import import datetime as dt import feedparser from types import StringTypes from comics.aggregator.lxmlparser import LxmlParser class FeedParser(object): def __init__(self, url): self.raw_feed = feedparser.parse(url) def for_date(self, date): return [Entry(e) for e in self.raw_feed.entries if e.updated_parsed and dt.date(*e.updated_parsed[:3]) == date] def all(self): return [Entry(e) for e in self.raw_feed.entries] class Entry(object): def __init__(self, entry): self.raw_entry = entry if 'summary' in entry: self.summary = self.html(entry.summary) if 'content' in entry: self.content0 = self.html(entry.content[0].value) def __getattr__(self, name): return getattr(self.raw_entry, name) def html(self, string): return LxmlParser(string=string) def has_tag(self, tag): def matches_tag(item): return item.term == tag if ('tags' in self.raw_entry and len(filter(matches_tag, self.raw_entry['tags']))): return True return False
Replace inner function with lambda in FeedParser.has_tag()
from __future__ import absolute_import import datetime as dt import feedparser from types import StringTypes from comics.aggregator.lxmlparser import LxmlParser class FeedParser(object): def __init__(self, url): self.raw_feed = feedparser.parse(url) def for_date(self, date): return [Entry(e) for e in self.raw_feed.entries if e.updated_parsed and dt.date(*e.updated_parsed[:3]) == date] def all(self): return [Entry(e) for e in self.raw_feed.entries] class Entry(object): def __init__(self, entry): self.raw_entry = entry if 'summary' in entry: self.summary = self.html(entry.summary) if 'content' in entry: self.content0 = self.html(entry.content[0].value) def __getattr__(self, name): return getattr(self.raw_entry, name) def html(self, string): return LxmlParser(string=string) def has_tag(self, tag): if ('tags' in self.raw_entry and len(filter(lambda t: t.term == tag, self.raw_entry.tags))): return True return False
import os from flask import Flask from flask.ext.assets import Bundle, Environment app = Flask(__name__) # Load the app config app.config.from_object("config.Config") assets = Environment(app) assets.load_path = [ os.path.join(os.path.dirname(__file__), 'static'), os.path.join(os.path.dirname(__file__), 'static', 'bower_components') ] assets.register( 'js_all', Bundle( 'jquery/dist/jquery.min.js', 'bootstrap/dist/js/bootstrap.min.js', output='js_all.js' ) ) assets.register( 'css_all', Bundle( 'bootstrap/dist/css/bootstrap.css', 'bootstrap/dist/css/bootstrap-theme.css', 'css/ignition.css', output='css_all.css' ) ) from manager.views import core
Change theme to sandstone (bootswatch)
import os from flask import Flask from flask.ext.assets import Bundle, Environment app = Flask(__name__) # Load the app config app.config.from_object("config.Config") assets = Environment(app) assets.load_path = [ os.path.join(os.path.dirname(__file__), 'static'), os.path.join(os.path.dirname(__file__), 'static', 'bower_components') ] assets.register( 'js_all', Bundle( 'jquery/dist/jquery.min.js', 'bootstrap/dist/js/bootstrap.min.js', output='js_all.js' ) ) assets.register( 'css_all', Bundle( 'bootswatch/sandstone/bootstrap.css', 'css/ignition.css', output='css_all.css' ) ) from manager.views import core
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.page import test_expectations # Valid expectation conditions are: # # Operating systems: # win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion, # linux, chromeos, android # # GPU vendors: # amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm, # vivante # # Specific GPUs can be listed as a tuple with vendor name and device ID. # Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604') # Device IDs must be paired with a GPU vendor. class MemoryExpectations(test_expectations.TestExpectations): def SetExpectations(self): # Sample Usage: # self.Fail('Memory.CSS3D', # ['mac', 'amd', ('nvidia', 0x1234)], bug=123) self.Fail('Memory.CSS3D', ['mac', ('nvidia', 0x0fd5)], bug=368037)
Add a failure expectation to win memory.css3d test. In tile manager we seem to reach the memory limit early (with the pending tree). However, when we activate our memory gets released and we start filling it up again with the now active tree tiles. The windows bot seems to catch the system at the moment when we're not using a lot of memory, thus failing the test. BUG=373098 R=kbr@chromium.org Review URL: https://codereview.chromium.org/289003004 git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@270962 0039d316-1c4b-4281-b951-d872f2087c98
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.page import test_expectations # Valid expectation conditions are: # # Operating systems: # win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion, # linux, chromeos, android # # GPU vendors: # amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm, # vivante # # Specific GPUs can be listed as a tuple with vendor name and device ID. # Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604') # Device IDs must be paired with a GPU vendor. class MemoryExpectations(test_expectations.TestExpectations): def SetExpectations(self): # Sample Usage: # self.Fail('Memory.CSS3D', # ['mac', 'amd', ('nvidia', 0x1234)], bug=123) self.Fail('Memory.CSS3D', ['mac', ('nvidia', 0x0fd5)], bug=368037) # TODO(vmpstr): Memory drops and increases again, and this # particular bot happens to catch it when its low. Remove # once the bug is fixed. self.Fail('Memory.CSS3D', ['win'], bug=373098)
import numpy as np def histeq(im, num_bins=256): """ Performs an histogram equalization on ``img``. This was taken from: http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html Parameters ---------- im : ndarray Image on which to perform histogram equalization. num_bins : int Number of bins used to construct the histogram. Returns ------- result : ndarray Histogram equalized image. """ #get image histogram histo, bins = np.histogram(im.flatten(), num_bins, normed=True) cdf = histo.cumsum() cdf = 255 * cdf / cdf[-1] #use linear interpolation of cdf to find new pixel values result = np.interp(im.flatten(), bins[:-1], cdf) return result.reshape(im.shape)
Fix comment format and input var name.
import numpy as np def histeq(arr, num_bins=256): """ Performs an histogram equalization on ``arr``. This was taken from: http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html Parameters ---------- arr : ndarray Image on which to perform histogram equalization. num_bins : int Number of bins used to construct the histogram. Returns ------- result : ndarray Histogram equalized image. """ #get image histogram histo, bins = np.histogram(arr.flatten(), num_bins, normed=True) cdf = histo.cumsum() cdf = 255 * cdf / cdf[-1] #use linear interpolation of cdf to find new pixel values result = np.interp(arr.flatten(), bins[:-1], cdf) return result.reshape(arr.shape)
import subprocess import string class Plugin: def __init__(self, command, allowedChars): # We replace the characters we do allow with empty strings, to get a string of all the characters we don't allow. self.notAllowedCharMap = string.maketrans(allowedChars, " "*len(allowedChars)) self.command = command def run(self, values): sanitizedValues = [] for value in values: sanitizedValues.append(str(value).translate(None, self.notAllowedCharMap)) result = subprocess.Popen([self.command] + sanitizedValues, stdout=subprocess.PIPE) stdout, stderr = result.communicate() return stdout.replace("\n", "<br>") whois = {"displayName": "Whois Lookup", "plugin": Plugin("whois", string.letters + string.digits + ".:-_")} dns_lookup = {"displayName": "DNS Lookup", "plugin": Plugin("host", string.letters + string.digits + ".:-_")} mapping = {"addr": [whois, dns_lookup], "string": [dns_lookup]}
Use the Team Cymru whois server by default, make it easier to use complex commands, and optionally insert a new line before the output.
import subprocess import string import shlex class Plugin: def __init__(self, command, allowedChars, insertInitialNewline=False): # We replace the characters we do allow with empty strings, to get a string of all the characters we don't allow. self.notAllowedCharMap = str(string.maketrans(allowedChars, " "*len(allowedChars))) self.command = shlex.split(command) self.insertInitialNewline = insertInitialNewline def run(self, values): sanitizedValues = [] for value in values: sanitizedValues.append(str(value).translate(None, self.notAllowedCharMap)) result = subprocess.Popen(self.command + sanitizedValues, stdout=subprocess.PIPE) stdout, stderr = result.communicate() if self.insertInitialNewline: stdout = "\n" + stdout return stdout.replace("\n", "<br>") whois = {"displayName": "Whois Lookup", "plugin": Plugin("whois -h whois.cymru.com \" -p -u\"", string.letters + string.digits + ".:-_", insertInitialNewline=True)} dns_lookup = {"displayName": "DNS Lookup", "plugin": Plugin("host", string.letters + string.digits + ".:-_")} mapping = {"addr": [whois, dns_lookup], "string": [dns_lookup]}
import json from datetime import datetime from django.http import HttpResponse def heartbeat(request): """ Simple view that a loadbalancer can check to verify that the app is up """ output = { 'date': datetime.now().isoformat() } return HttpResponse(json.dumps(output, indent=4))
Make heartbeat url wait for courses to be loaded
import json from datetime import datetime from django.http import HttpResponse from xmodule.modulestore.django import modulestore def heartbeat(request): """ Simple view that a loadbalancer can check to verify that the app is up """ output = { 'date': datetime.now().isoformat(), 'courses': [course.location for course in modulestore().get_courses()], } return HttpResponse(json.dumps(output, indent=4))
#!/usr/bin/python3 from pprint import pprint import json import sys if __name__ == '__main__': if len(sys.argv) >= 2: path = sys.argv[1].split('.') else: path = ['error', 'stack'] obj = json.load(sys.stdin) try: for part in path: obj = obj[part] except KeyError: pass if isinstance(obj, str): print(obj) else: pprint(obj)
Improve stacktrace print for traceback.
#!/usr/bin/python3 from pprint import pprint import json import sys def get(obj, path): try: for part in path: obj = obj[part] return obj except KeyError: return None if __name__ == '__main__': if len(sys.argv) >= 2: paths = [sys.argv[1].split('.')] else: paths = [ ['meta', 'error', 'stack'], ['error', 'stack'], ['traceback'], ] obj = json.load(sys.stdin) for path in paths: subobj = get(obj, path) if subobj is not None: obj = subobj break if isinstance(obj, str): print(obj) else: pprint(obj)
from __future__ import absolute_import, print_function, unicode_literals from builtins import dict, str import matplotlib fontsize=7 def set_fig_params(): matplotlib.rcParams['font.sans-serif'] = 'Arial' matplotlib.rcParams['text.usetex'] = True matplotlib.rcParams['text.latex.preamble'] = [ r'\usepackage{helvet}', r'\usepackage{sansmath}', r'\sansmath', r'\usepackage{underscore}',] def format_axis(ax, label_padding=2, tick_padding=0, yticks_position='left'): ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position(yticks_position) ax.yaxis.set_tick_params(which='both', direction='out', labelsize=fontsize, pad=tick_padding, length=2, width=0.5) ax.xaxis.set_tick_params(which='both', direction='out', labelsize=fontsize, pad=tick_padding, length=2, width=0.5) ax.xaxis.labelpad = label_padding ax.yaxis.labelpad = label_padding ax.xaxis.label.set_size(fontsize) ax.yaxis.label.set_size(fontsize)
Remove strings with r'\use...' getting interp as Unicode!
from __future__ import absolute_import, print_function, unicode_literals from builtins import dict, str import matplotlib fontsize=7 def set_fig_params(): matplotlib.rcParams['font.sans-serif'] = 'Arial' matplotlib.rcParams['text.usetex'] = True matplotlib.rcParams['text.latex.preamble'] = [ '\\usepackage{helvet}', '\\usepackage{sansmath}', '\\sansmath', '\\usepackage{underscore}',] def format_axis(ax, label_padding=2, tick_padding=0, yticks_position='left'): ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position(yticks_position) ax.yaxis.set_tick_params(which='both', direction='out', labelsize=fontsize, pad=tick_padding, length=2, width=0.5) ax.xaxis.set_tick_params(which='both', direction='out', labelsize=fontsize, pad=tick_padding, length=2, width=0.5) ax.xaxis.labelpad = label_padding ax.yaxis.labelpad = label_padding ax.xaxis.label.set_size(fontsize) ax.yaxis.label.set_size(fontsize)
from django.core.management.base import BaseCommand from django.contrib.auth.models import User from django.core.management import call_command class Command(BaseCommand): help = 'Creates a superuser for Heroku' def handle(self, *args, **kwargs): verbosity = kwargs['verbosity'] call_command('migrate', verbosity=0) User.objects.create_superuser( username='admin', email='admin@example.com', password='changeme123' ) if verbosity > 0: self.stdout.write( self.style.SUCCESS('Successfully run all Heroku commands.') )
Remove Heroku createsuperuser command. Migrate now creates a default user.
from django.core.management.base import BaseCommand from django.contrib.auth.models import User from django.core.management import call_command class Command(BaseCommand): help = 'Runs migrations for Heroku' def handle(self, *args, **kwargs): verbosity = kwargs['verbosity'] call_command('migrate', verbosity=0) if verbosity > 0: self.stdout.write( self.style.SUCCESS('Successfully ran all Heroku commands.') )
import gzip import csv from django.core.files.storage import default_storage def update_feed(feed): with default_storage.open(feed.file_path, 'w') as output_file: if feed.compression: output = gzip.GzipFile(fileobj=output_file) else: output = output_file writer = csv.DictWriter(output,feed.attributes, delimiter=str("\t")) writer.writeheader() for item in feed.items(): writer.writerow(feed.item_attributes(item)) if feed.compression: output.close()
Fix compressed feeds in python3
from __future__ import unicode_literals import gzip import csv from django.core.files.storage import default_storage def update_feed(feed): with default_storage.open(feed.file_path, 'wb') as output_file: if feed.compression: try: output = gzip.open(output_file, 'wt') except TypeError: output = gzip.GzipFile(fileobj=output_file, mode='w') else: output = output_file writer = csv.DictWriter(output, feed.attributes, dialect=csv.excel_tab) writer.writeheader() for item in feed.items(): writer.writerow(feed.item_attributes(item)) if feed.compression: output.close()
import os import django from django.conf import settings if not settings.configured: settings_dict = dict( INSTALLED_APPS=[ 'django.contrib.contenttypes', 'django.contrib.auth', 'bootstrap3', 'form_utils', ], DATABASES={ "default": { "ENGINE": "django.db.backends.sqlite3", } }, MEDIA_ROOT=os.path.join(os.path.dirname(__file__), 'media'), MEDIA_URL='/media/', STATIC_URL='/static/', MIDDLEWARE_CLASSES=[], BOOTSTRAP3={ 'form_renderers': { 'default': 'form_utils_bootstrap3.renderers.BetterFormRenderer' } } ) settings.configure(**settings_dict) if django.VERSION >= (1, 7): django.setup()
Fix tests for Django trunk
import os import django from django.conf import settings if not settings.configured: settings_dict = dict( INSTALLED_APPS=[ 'django.contrib.contenttypes', 'django.contrib.auth', 'bootstrap3', 'form_utils', ], DATABASES={ "default": { "ENGINE": "django.db.backends.sqlite3", } }, MEDIA_ROOT=os.path.join(os.path.dirname(__file__), 'media'), MEDIA_URL='/media/', STATIC_URL='/static/', MIDDLEWARE_CLASSES=[], BOOTSTRAP3={ 'form_renderers': { 'default': 'form_utils_bootstrap3.renderers.BetterFormRenderer' } } ) if django.VERSION >= (1, 8): settings_dict['TEMPLATES'] = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [] } ] settings.configure(**settings_dict) if django.VERSION >= (1, 7): django.setup()
#from requests import get from heliotron.bridge import Bridge from heliotron.light import Light import heliotron.presets __all__ = ['Bridge', 'Light', 'presets']
Change module import to squash a code smell
#from requests import get from heliotron.bridge import Bridge from heliotron.light import Light from heliotron import presets __all__ = ['Bridge', 'Light', 'presets']
# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version __version__ = pbr.version.VersionInfo( 'openstack').version_string()
Use project name to retrieve version info Change-Id: Iaef93bde5183263f900166b8ec90eefb7bfdc99b
# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version __version__ = pbr.version.VersionInfo( 'python-openstacksdk').version_string()
# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. from UM.Tool import Tool from UM.Scene.Selection import Selection from UM.Application import Application from . import PerObjectSettingsModel class PerObjectSettingsTool(Tool): def __init__(self): super().__init__() self.setExposedProperties("Model", "SelectedIndex", "PrintSequence") def event(self, event): return False def getModel(self): return PerObjectSettingsModel.PerObjectSettingsModel() def getSelectedIndex(self): selected_object_id = id(Selection.getSelectedObject(0)) index = self.getModel().find("id", selected_object_id) return index def getPrintSequence(self): settings = Application.getInstance().getMachineManager().getActiveProfile() return settings.getSettingValue("print_sequence")
Remove more remnants of print sequence message I found this other place that was helping to display the message that warns that print sequcence is set per-object. Since the latter is no longer possible, this message shouldn't be displayed any more. Contributes to issue CURA-458.
# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. from UM.Tool import Tool from UM.Scene.Selection import Selection from UM.Application import Application from . import PerObjectSettingsModel class PerObjectSettingsTool(Tool): def __init__(self): super().__init__() self.setExposedProperties("Model", "SelectedIndex") def event(self, event): return False def getModel(self): return PerObjectSettingsModel.PerObjectSettingsModel() def getSelectedIndex(self): selected_object_id = id(Selection.getSelectedObject(0)) index = self.getModel().find("id", selected_object_id) return index
import copy import json import os import unittest from client.client import Client # To run the tests against the test instance instead, # set environment variable PMI_DRC_RDR_INSTANCE. _DEFAULT_INSTANCE = 'http://localhost:8080' _OFFLINE_BASE_PATH = 'offline' class BaseClientTest(unittest.TestCase): def setUp(self): super(BaseClientTest, self).setUp() self.maxDiff = None instance = os.environ.get('PMI_DRC_RDR_INSTANCE') or _DEFAULT_INSTANCE creds_file = os.environ.get('TESTING_CREDS_FILE') self.client = Client(parse_cli=False, default_instance=instance, creds_file=creds_file) self.offline_client = Client( base_path=_OFFLINE_BASE_PATH, parse_cli=False, default_instance=instance, creds_file=creds_file) def assertJsonEquals(self, obj_a, obj_b): obj_b = copy.deepcopy(obj_b) for transient_key in ('etag', 'kind', 'meta'): if transient_key in obj_b: del obj_b[transient_key] self.assertMultiLineEqual(_pretty(obj_a), _pretty(obj_b)) def _pretty(obj): return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
Configure logging in client tests, so client logs show up.
import copy import json import os import unittest from client.client import Client from tools.main_util import configure_logging # To run the tests against the test instance instead, # set environment variable PMI_DRC_RDR_INSTANCE. _DEFAULT_INSTANCE = 'http://localhost:8080' _OFFLINE_BASE_PATH = 'offline' class BaseClientTest(unittest.TestCase): def setUp(self): super(BaseClientTest, self).setUp() configure_logging() self.maxDiff = None instance = os.environ.get('PMI_DRC_RDR_INSTANCE') or _DEFAULT_INSTANCE creds_file = os.environ.get('TESTING_CREDS_FILE') self.client = Client(parse_cli=False, default_instance=instance, creds_file=creds_file) self.offline_client = Client( base_path=_OFFLINE_BASE_PATH, parse_cli=False, default_instance=instance, creds_file=creds_file) def assertJsonEquals(self, obj_a, obj_b): obj_b = copy.deepcopy(obj_b) for transient_key in ('etag', 'kind', 'meta'): if transient_key in obj_b: del obj_b[transient_key] self.assertMultiLineEqual(_pretty(obj_a), _pretty(obj_b)) def _pretty(obj): return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
""" Classes for customising node caching. """ class BasicCacher(object): """ Basic in-memory caching. """ def __init__(self, logger=None): self._cache = {} self.logger = logger def set_cache(self, node, data): """ Store some data on the object. """ self._cache[node.label] = data def get_cache(self, node): """ Return cached data. """ return self._cache.get(node.label) def has_cache(self, node): return self._cache.get(node.label) is not None def clear_cache(self, node): del self._cache[node.label] def clear(self): self._cache = {} def __repr__(self): return "<%s>" % self.__class__.__name__
Test for existence of node before clearing it
""" Classes for customising node caching. """ class BasicCacher(object): """ Basic in-memory caching. """ def __init__(self, logger=None): self._cache = {} self.logger = logger def set_cache(self, node, data): """ Store some data on the object. """ self._cache[node.label] = data def get_cache(self, node): """ Return cached data. """ return self._cache.get(node.label) def has_cache(self, node): return self._cache.get(node.label) is not None def clear_cache(self, node): if self._cache.get(node.label): del self._cache[node.label] def clear(self): self._cache = {} def __repr__(self): return "<%s>" % self.__class__.__name__
from comics.crawler.base import BaseComicCrawler from comics.crawler.meta import BaseComicMeta class ComicMeta(BaseComicMeta): name = 'Ctrl+Alt+Del Sillies' language = 'en' url = 'http://www.ctrlaltdel-online.com/' start_date = '2008-06-27' history_capable_date = '2008-06-27' schedule = 'Mo,Tu,We,Th,Fr,Sa,Su' time_zone = -5 rights = 'Tim Buckley' class ComicCrawler(BaseComicCrawler): def _get_url(self): self.url = 'http://www.cad-comic.com/comics/Lite%(date)s.jpg' % { 'date': self.pub_date.strftime('%Y%m%d'), }
Update Ctrl+Alt+Del Sillies crawler with new URL
from comics.crawler.base import BaseComicCrawler from comics.crawler.meta import BaseComicMeta class ComicMeta(BaseComicMeta): name = 'Ctrl+Alt+Del Sillies' language = 'en' url = 'http://www.ctrlaltdel-online.com/' start_date = '2008-06-27' history_capable_date = '2008-06-27' schedule = 'Mo,Tu,We,Th,Fr,Sa,Su' time_zone = -5 rights = 'Tim Buckley' class ComicCrawler(BaseComicCrawler): def _get_url(self): self.url = 'http://www.ctrlaltdel-online.com/comics/Lite%(date)s.gif' % { 'date': self.pub_date.strftime('%Y%m%d'), }
import os import logging from flask import Flask from flask.ext.bcrypt import Bcrypt import flask.ext.login as flask_login from flask.ext.sqlalchemy import SQLAlchemy log = logging.getLogger(__name__) app = Flask(__name__) app.config.from_object(os.getenv('APP_SETTINGS')) bcrypt = Bcrypt(app) db = SQLAlchemy(app) # Loads user from an ID and directs actions for redirects etc login_manager = flask_login.LoginManager() login_manager.init_app(app) # Determine which view to direct user if logged out login_manager.login_view = 'login' import mangacork.views from .models import User # Define how to get user object with app object @login_manager.user_loader def load_user(userid): return User.query.filter(User.id == userid).first()
Add important todo for fixing prod
import os import logging from flask import Flask from flask.ext.bcrypt import Bcrypt import flask.ext.login as flask_login from flask.ext.sqlalchemy import SQLAlchemy log = logging.getLogger(__name__) app = Flask(__name__) # TODO: It doesn't look like getenv is returning anything on prod # Find an alternative or fix path to retrieve environment var from # virtualenvwrapper app.config.from_object(os.getenv('APP_SETTINGS')) bcrypt = Bcrypt(app) db = SQLAlchemy(app) # Loads user from an ID and directs actions for redirects etc login_manager = flask_login.LoginManager() login_manager.init_app(app) # Determine which view to direct user if logged out login_manager.login_view = 'login' import mangacork.views from .models import User # Define how to get user object with app object @login_manager.user_loader def load_user(userid): return User.query.filter(User.id == userid).first()
# This file is part of Moksha. # # Moksha is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Moksha is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Moksha. If not, see <http://www.gnu.org/licenses/>. # # Copyright 2008, Red Hat, Inc. # Authors: Luke Macken <lmacken@redhat.com> """ Choses the best platform-specific Twisted reactor """ import sys try: if 'linux' in sys.platform: from twisted.internet import epollreactor epollreactor.install() elif 'win' in sys.platform: from twisted.internet import iocpreactor iocpreactor.install() elif 'freebsd' in sys.platform or 'darwin' in sys.platform: from twisted.internet import kqreactor kqreactor.install() except AssertionError: # reactor already installed pass from twisted.internet import reactor
Fix a bug on platform detection on Mac OSX
# This file is part of Moksha. # # Moksha is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Moksha is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Moksha. If not, see <http://www.gnu.org/licenses/>. # # Copyright 2008, Red Hat, Inc. # Authors: Luke Macken <lmacken@redhat.com> """ Choses the best platform-specific Twisted reactor """ import sys try: if 'linux' in sys.platform: from twisted.internet import epollreactor epollreactor.install() elif 'freebsd' in sys.platform or 'darwin' in sys.platform: from twisted.internet import kqreactor kqreactor.install() elif 'win' in sys.platform: from twisted.internet import iocpreactor iocpreactor.install() except AssertionError: # reactor already installed pass from twisted.internet import reactor
# # Jasy - JavaScript Tooling Refined # Copyright 2010 Sebastian Werner # import os, sys def root(): """ Returns the root path of Jasy """ return os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir)) def cldrData(what): return os.path.join(root(), "data", "cldr", what) def localeProject(locale): return os.path.join(root(), "data", "jslocale", locale) def coreProject(): return os.path.join(root(), "data", "jscore")
Reduce path to shortest possible from current dir.
# # Jasy - JavaScript Tooling Refined # Copyright 2010 Sebastian Werner # import os, sys def root(): """ Returns the root path of Jasy """ return os.path.relpath(os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir))) return os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir)) def cldrData(what): return os.path.join(root(), "data", "cldr", what) def localeProject(locale): return os.path.join(root(), "data", "jslocale", locale) def coreProject(): return os.path.join(root(), "data", "jscore")
from tinydb import TinyDB, Query class BotModule: name = '' # name of your module description = '' # description of its function help_text = '' # help text for explaining how to do things trigger_string = '' # string to listen for as trigger has_background_loop = False listen_for_reaction = False loaded_modules = [] admin_modules = [370934086111330308, 372729159933362177] trigger_char = '!' # char preceding trigger string module_version = '0.0.0' def __init__(self): self.module_db = TinyDB('./modules/databases/' + self.name) async def parse_command(self, message, client): raise NotImplementedError("Parse function not implemented in module:" + self.name) async def background_loop(self, client): raise NotImplementedError("background_loop function not implemented in module:" + self.name) async def on_reaction_add(self, reaction, client, user): raise NotImplementedError("on_reaction_add function not implemented in module:" + self.name) async def on_reaction_remove(self, reaction, client, user): raise NotImplementedError("on_reaction_remove function not implemented in module:" + self.name)
Fix bug in admin_module checking
from tinydb import TinyDB, Query class BotModule: name = '' # name of your module description = '' # description of its function help_text = '' # help text for explaining how to do things trigger_string = '' # string to listen for as trigger has_background_loop = False listen_for_reaction = False loaded_modules = [] admin_modules = ['370934086111330308', '372729159933362177'] trigger_char = '!' # char preceding trigger string module_version = '0.0.0' def __init__(self): self.module_db = TinyDB('./modules/databases/' + self.name) async def parse_command(self, message, client): raise NotImplementedError("Parse function not implemented in module:" + self.name) async def background_loop(self, client): raise NotImplementedError("background_loop function not implemented in module:" + self.name) async def on_reaction_add(self, reaction, client, user): raise NotImplementedError("on_reaction_add function not implemented in module:" + self.name) async def on_reaction_remove(self, reaction, client, user): raise NotImplementedError("on_reaction_remove function not implemented in module:" + self.name)
# -*- coding: utf-8 -*- import logging #from django import forms #from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.forms import UserCreationForm from django.contrib.auth import get_user_model LOGGER = logging.getLogger(__name__) User = get_user_model() class FullUserCreationForm(UserCreationForm): """ Like the django UserCreationForm, with optional first_name and last_name, and email too. .. note:: the ``username`` field from Django's ``UserCreationForm`` is overriden by our Model's one. Thus the 30 chars limit doesn't apply. """ class Meta: model = User # We want a different order of fields on the page. fields = ['first_name', 'last_name', 'username', 'email', 'password1', 'password2', ] def save(self, commit=True): user = super(FullUserCreationForm, self).save(commit=False) user.email = self.cleaned_data["email"] user.first_name = self.cleaned_data["first_name"] user.last_name = self.cleaned_data["last_name"] if commit: user.save() return user
Make the FullUserCreationForm work on a fresh database which doesn't have Django's auth_user table.
# -*- coding: utf-8 -*- import logging from django import forms from django.utils.translation import ugettext_lazy as _ from django.contrib.auth import get_user_model LOGGER = logging.getLogger(__name__) User = get_user_model() class FullUserCreationForm(forms.ModelForm): """ Like the django UserCreationForm, with optional first_name and last_name, and email too. .. note:: the ``username`` field from Django's ``UserCreationForm`` is overriden by our Model's one. Thus the 30 chars limit doesn't apply. """ error_messages = { 'password_mismatch': _("The two password fields didn't match."), } email = forms.EmailField() password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput) password2 = forms.CharField(label=_("Password confirmation"), widget=forms.PasswordInput, help_text=_("Enter the same password as " "above, for verification.")) class Meta: model = User fields = ['first_name', 'last_name', 'username', 'email', 'password1', 'password2', ] def clean_password2(self): password1 = self.cleaned_data.get("password1") password2 = self.cleaned_data.get("password2") if password1 and password2 and password1 != password2: raise forms.ValidationError( self.error_messages['password_mismatch']) return password2 def save(self, commit=True): user = super(FullUserCreationForm, self).save(commit=False) user.set_password(self.cleaned_data["password1"]) user.email = self.cleaned_data["email"] user.first_name = self.cleaned_data["first_name"] user.last_name = self.cleaned_data["last_name"] if commit: user.save() return user
from synapse.tests.common import * import synapse.lib.config as s_config class ConfTest(SynTest): def test_conf_base(self): defs = ( ('fooval',{'type':'int','doc':'what is foo val?','defval':99}), ('enabled',{'type':'bool','doc':'is thing enabled?','defval':0}), ) data = {} def callback(v): data['woot'] = v with s_config.Config(defs=defs) as conf: conf.onConfOptSet('enabled',callback) conf.setConfOpt('enabled','true') self.eq(data.get('woot'), 1) conf.setConfOpts({'fooval':'0x20'}) self.eq(conf.getConfOpt('fooval'), 0x20) conf.setConfOpts({'fooval':0x30}) self.eq(conf.getConfOpt('fooval'), 0x30) self.assertRaises( NoSuchOpt, conf.setConfOpts, {'newp':'hehe'} ) def test_conf_asloc(self): with s_config.Config() as conf: conf.addConfDef('foo',type='int',defval=0,asloc='_foo_valu') self.eq( conf._foo_valu, 0 ) conf.setConfOpt('foo','0x20') self.eq( conf._foo_valu, 0x20)
Update test to ensure that default configuration values are available via getConfOpt
from synapse.tests.common import * import synapse.lib.config as s_config class ConfTest(SynTest): def test_conf_base(self): defs = ( ('fooval',{'type':'int','doc':'what is foo val?','defval':99}), ('enabled',{'type':'bool','doc':'is thing enabled?','defval':0}), ) data = {} def callback(v): data['woot'] = v with s_config.Config(defs=defs) as conf: self.eq(conf.getConfOpt('enabled'), 0) self.eq(conf.getConfOpt('fooval'), 99) conf.onConfOptSet('enabled',callback) conf.setConfOpt('enabled','true') self.eq(data.get('woot'), 1) conf.setConfOpts({'fooval':'0x20'}) self.eq(conf.getConfOpt('fooval'), 0x20) conf.setConfOpts({'fooval':0x30}) self.eq(conf.getConfOpt('fooval'), 0x30) self.assertRaises( NoSuchOpt, conf.setConfOpts, {'newp':'hehe'} ) def test_conf_asloc(self): with s_config.Config() as conf: conf.addConfDef('foo',type='int',defval=0,asloc='_foo_valu') self.eq( conf._foo_valu, 0 ) conf.setConfOpt('foo','0x20') self.eq( conf._foo_valu, 0x20)
from django import forms from django_fixmystreet.fixmystreet.models import FMSUser, getLoggedInUserId from django.contrib.auth.models import User from django.conf import settings from django.utils.translation import ugettext_lazy from django.contrib.sessions.models import Session class ManagersChoiceField (forms.fields.ChoiceField): def __init__(self, *args, **kwargs): # assemble the opt groups. choices = [] choices.append(('', ugettext_lazy("Select a manager"))) currentUserOrganisationId = FMSUser.objects.get(pk=getLoggedInUserId(Session.objects.all()[0].session_key)).organisation managers = FMSUser.objects.filter(manager=True) managers = managers.filter(organisation_id=currentUserOrganisationId) for manager in managers: choices.append((manager.pk,manager.first_name+manager.last_name)) super(ManagersChoiceField,self).__init__(choices,*args,**kwargs) def clean(self, value): super(ManagersChoiceField,self).clean(value) try: model = FMSUser.objects.get(pk=value) except FMSUser.DoesNotExist: raise ValidationError(self.error_messages['invalid_choice']) return model class ManagersListForm(forms.Form): manager=ManagersChoiceField(label="")
Fix user not defined error for not logged in users
from django import forms from django_fixmystreet.fixmystreet.models import FMSUser, getLoggedInUserId from django.contrib.auth.models import User from django.conf import settings from django.utils.translation import ugettext_lazy from django.contrib.sessions.models import Session from django.contrib.auth.decorators import login_required class ManagersChoiceField (forms.fields.ChoiceField): def __init__(self, *args, **kwargs): choices = [] choices.append(('', ugettext_lazy("Select a manager"))) currentUserOrganisationId = 1 if Session.objects.all()[0].session_key: currentUserOrganisationId = FMSUser.objects.get(pk=getLoggedInUserId(Session.objects.all()[0].session_key)).organisation managers = FMSUser.objects.filter(manager=True) managers = managers.filter(organisation_id=currentUserOrganisationId) for manager in managers: choices.append((manager.pk,manager.first_name+manager.last_name)) super(ManagersChoiceField,self).__init__(choices,*args,**kwargs) def clean(self, value): super(ManagersChoiceField,self).clean(value) try: model = FMSUser.objects.get(pk=value) except FMSUser.DoesNotExist: raise ValidationError(self.error_messages['invalid_choice']) return model class ManagersListForm(forms.Form): manager=ManagersChoiceField(label="")
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function from polyaxon_client.stores.stores.base_store import Store class LocalStore(Store): """ Local filesystem store. This store is noop store since all data is accessible through the filesystem. """ # pylint:disable=arguments-differ STORE_TYPE = Store._LOCAL_STORE # pylint:disable=protected-access def download_file(self, *args, **kwargs): pass def upload_file(self, *args, **kwargs): pass def upload_dir(self, *args, **kwargs): pass def download_dir(self, *args, **kwargs): pass
Update local store base class
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function from polyaxon_client.stores.stores.base_store import BaseStore class LocalStore(BaseStore): """ Local filesystem store. This store is noop store since all data is accessible through the filesystem. """ # pylint:disable=arguments-differ STORE_TYPE = BaseStore._LOCAL_STORE # pylint:disable=protected-access def download_file(self, *args, **kwargs): pass def upload_file(self, *args, **kwargs): pass def upload_dir(self, *args, **kwargs): pass def download_dir(self, *args, **kwargs): pass
import re from unicodedata import normalize _punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+') # From http://flask.pocoo.org/snippets/5/ def slugify(text, delim=u'-'): """Generates an slightly worse ASCII-only slug.""" result = [] for word in _punct_re.split(text.lower()): word = normalize('NFKD', word).encode('ascii', 'ignore') if word: result.append(word) return unicode(delim.join(result))
Change ":" in titles to "-" for better SEO
import re from unicodedata import normalize _punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.:]+') # From http://flask.pocoo.org/snippets/5/ def slugify(text, delim=u'-'): """Generates an slightly worse ASCII-only slug.""" result = [] for word in _punct_re.split(text.lower()): word = normalize('NFKD', word).encode('ascii', 'ignore') if word: result.append(word) return unicode(delim.join(result))