input
stringlengths
0
2.17k
instruction
stringlengths
18
2.94k
output
stringlengths
47
3.36k
# -*- encoding: utf-8 -*- ############################################################################## # # Copyright (C) 2015 Compassion CH (http://www.compassion.ch) # Releasing children from poverty in Jesus' name # @author: Emanuel Cino <ecino@compassion.ch> # # The licence is in the file __openerp__.py # ############################################################################## from . import xmlrpc_helper
Fix comment header on init
# -*- encoding: utf-8 -*- ############################################################################## # # Copyright (C) 2015 Compassion CH (http://www.compassion.ch) # Releasing children from poverty in Jesus' name # @author: Emmanuel Mathier <emmanuel.mathier@gmail.ch> # # The licence is in the file __openerp__.py # ############################################################################## from . import xmlrpc_helper
from species import CODES def all_region_codes(): return CODES.keys def all_species_codes(): return species_codes_for_regions(all_region_codes) def species_codes_for_regions(region_codes): if region_codes is None: return None species_codes = [] for region_code in region_codes: species_codes.extend(CODES[region_code]) # Converting to a set removes duplicates return list(set(species_codes))
Fix typo that prevented instance creation
from species import CODES def all_region_codes(): return CODES.keys def all_species_codes(): return species_codes_for_regions(all_region_codes()) def species_codes_for_regions(region_codes): if region_codes is None: return None species_codes = [] for region_code in region_codes: species_codes.extend(CODES[region_code]) # Converting to a set removes duplicates return list(set(species_codes))
from functools import lru_cache import boto3 from pydantic import BaseSettings, Field, root_validator @lru_cache def get_secrets_client(): return boto3.client("secretsmanager") @lru_cache def get_secret(secret_id): client = get_secrets_client() secret_value = client.get_secret_value(SecretId=secret_id) return secret_value["SecretString"] class Config(BaseSettings): environment: str evm_full_node_url: str reward_root_submitter_address: str reward_root_submitter_private_key: str reward_root_submitter_sentry_dsn: str log_level: str = Field("WARNING") @root_validator(pre=True) def load_secrets(cls, values): env = values["environment"] for field_name, field in cls.__fields__.items(): # Check it isn't already set *and* there is no default if field_name not in values and field.default is None: values[field_name] = get_secret(f"{env}_{field_name}") return values class Config: env_file = ".env" env_file_encoding = "utf-8"
Set default logging to info
from functools import lru_cache import boto3 from pydantic import BaseSettings, Field, root_validator @lru_cache def get_secrets_client(): return boto3.client("secretsmanager") @lru_cache def get_secret(secret_id): client = get_secrets_client() secret_value = client.get_secret_value(SecretId=secret_id) return secret_value["SecretString"] class Config(BaseSettings): environment: str evm_full_node_url: str reward_root_submitter_address: str reward_root_submitter_private_key: str reward_root_submitter_sentry_dsn: str log_level: str = Field("INFO") @root_validator(pre=True) def load_secrets(cls, values): env = values["environment"] for field_name, field in cls.__fields__.items(): # Check it isn't already set *and* there is no default if field_name not in values and field.default is None: values[field_name] = get_secret(f"{env}_{field_name}") return values class Config: env_file = ".env" env_file_encoding = "utf-8"
import sys sys.path.insert(1, "../../../") import h2o def bigcatRF(ip,port): # Connect to h2o h2o.init(ip,port) # Training set has 100 categories from cat001 to cat100 # Categories cat001, cat003, ... are perfect predictors of y = 1 # Categories cat002, cat004, ... are perfect predictors of y = 0 #Log.info("Importing bigcat_5000x2.csv data...\n") bigcat = h2o.import_frame(path=h2o.locate("smalldata/gbm_test/bigcat_5000x2.csv")) bigcat["y"] = bigcat["y"].asfactor() #Log.info("Summary of bigcat_5000x2.csv from H2O:\n") #bigcat.summary() # Train H2O DRF Model: #Log.info("H2O DRF (Naive Split) with parameters:\nclassification = TRUE, ntree = 1, depth = 1, nbins = 100\n") model = h2o.random_forest(x=bigcat[["X"]], y=bigcat["y"], ntrees=1, max_depth=1, nbins=100) model.show() if __name__ == "__main__": h2o.run_test(sys.argv, bigcatRF)
Add usage of nbins_cats to RF pyunit.
import sys sys.path.insert(1, "../../../") import h2o def bigcatRF(ip,port): # Connect to h2o h2o.init(ip,port) # Training set has 100 categories from cat001 to cat100 # Categories cat001, cat003, ... are perfect predictors of y = 1 # Categories cat002, cat004, ... are perfect predictors of y = 0 #Log.info("Importing bigcat_5000x2.csv data...\n") bigcat = h2o.import_frame(path=h2o.locate("smalldata/gbm_test/bigcat_5000x2.csv")) bigcat["y"] = bigcat["y"].asfactor() #Log.info("Summary of bigcat_5000x2.csv from H2O:\n") #bigcat.summary() # Train H2O DRF Model: #Log.info("H2O DRF (Naive Split) with parameters:\nclassification = TRUE, ntree = 1, depth = 1, nbins = 100, nbins_cats=10\n") model = h2o.random_forest(x=bigcat[["X"]], y=bigcat["y"], ntrees=1, max_depth=1, nbins=100, nbins_cats=10) model.show() if __name__ == "__main__": h2o.run_test(sys.argv, bigcatRF)
"""Test GitHub issue #4. Diagram could not be loaded due to JuggleError (presumed cyclic resolving of diagram items). """ from gi.repository import GLib, Gtk from gaphor.storage.storage import load class TestCyclicDiagram: def test_bug(self, case, test_models): """Load file. This does not nearly resemble the error, since the model should be loaded from within the mainloop (which will delay all updates). """ path = test_models / "dbus.gaphor" load(path, case.element_factory, case.modeling_language) def test_bug_idle(self, case, test_models): """Load file in gtk main loop. This does not nearly resemble the error, since the model should be loaded from within the mainloop (which will delay all updates). """ def handler(): try: path = test_models / "dbus.gaphor" load(path, case.element_factory, case.modeling_language) finally: Gtk.main_quit() assert GLib.timeout_add(1, handler) > 0 Gtk.main()
Convert load_model tests to pytest functions
# flake8: noqa F401,F811 """Diagram could not be loaded due to JuggleError (presumed cyclic resolving of diagram items).""" from gi.repository import GLib, Gtk from gaphor.diagram.tests.fixtures import ( element_factory, event_manager, modeling_language, ) from gaphor.storage.storage import load def test_cyclic_diagram_bug(element_factory, modeling_language, test_models): """Load file. This does not nearly resemble the error, since the model should be loaded from within the mainloop (which will delay all updates). """ path = test_models / "dbus.gaphor" load(path, element_factory, modeling_language) def test_cyclic_diagram_bug_idle(element_factory, modeling_language, test_models): """Load file in gtk main loop. This does not nearly resemble the error, since the model should be loaded from within the mainloop (which will delay all updates). """ def handler(): try: path = test_models / "dbus.gaphor" load(path, element_factory, modeling_language) finally: Gtk.main_quit() assert GLib.timeout_add(1, handler) > 0 Gtk.main()
from __future__ import print_function import json from argh import ArghParser, arg from ghtools import cli from ghtools.api import GithubAPIClient parser = ArghParser(description="Browse the GitHub API") @arg('github', nargs='?', help='GitHub instance nickname (e.g "enterprise")') @arg('url', help='URL to browse') @arg('-m', '--method', default='get', choices=('get', 'delete'), help='HTTP method to use.') def browse(args): """ Print the GitHub API response at the given URL """ with cli.catch_api_errors(): client = GithubAPIClient(nickname=args.github) res = client.request(args.method, args.url, _raise=False) print('HTTP/1.1 {0} {1}'.format(res.status_code, res.reason)) for k, v in res.headers.items(): print("{0}: {1}".format(k, v)) print() if res.json() is not None: print(json.dumps(res.json, indent=2)) else: print(res.content) parser.set_default_command(browse) def main(): parser.dispatch() if __name__ == '__main__': main()
Add missed json to json() tweak
from __future__ import print_function import json from argh import ArghParser, arg from ghtools import cli from ghtools.api import GithubAPIClient parser = ArghParser(description="Browse the GitHub API") @arg('github', nargs='?', help='GitHub instance nickname (e.g "enterprise")') @arg('url', help='URL to browse') @arg('-m', '--method', default='get', choices=('get', 'delete'), help='HTTP method to use.') def browse(args): """ Print the GitHub API response at the given URL """ with cli.catch_api_errors(): client = GithubAPIClient(nickname=args.github) res = client.request(args.method, args.url, _raise=False) print('HTTP/1.1 {0} {1}'.format(res.status_code, res.reason)) for k, v in res.headers.items(): print("{0}: {1}".format(k, v)) print() if res.json() is not None: print(json.dumps(res.json(), indent=2)) else: print(res.content) parser.set_default_command(browse) def main(): parser.dispatch() if __name__ == '__main__': main()
import sys def lowest_unique(int_list): numbers = {} for index in range(len(int_list)): group = numbers.setdefault(int(int_list[index]), []) group.append(index) for number in numbers: retval = numbers[number] if len(retval) == 1: return retval[0] + 1 return 0 if __name__ == '__main__': inputfile = sys.argv[1] with open(inputfile, 'r') as f: for line in f: line_list = line.rstrip().split() if line_list: print str(lowest_unique(line_list))
Improve solution by using enumerate
import sys def lowest_unique(int_list): numbers = {} for index, number in enumerate(int_list): group = numbers.setdefault(int(number), []) group.append(index) for number in sorted(numbers.keys()): retval = numbers[number] if len(retval) == 1: return retval[0] + 1 return 0 if __name__ == '__main__': inputfile = sys.argv[1] with open(inputfile, 'r') as f: for line in f: line_list = line.rstrip().split() if line_list: print str(lowest_unique(line_list))
from distutils.core import setup setup( name='python3-indy', version='1.6.1', packages=['indy'], url='https://github.com/hyperledger/indy-sdk', license='MIT/Apache-2.0', author='Vyacheslav Gudkov', author_email='vyacheslav.gudkov@dsr-company.com', description='This is the official SDK for Hyperledger Indy (https://www.hyperledger.org/projects), which provides a distributed-ledger-based foundation for self-sovereign identity (https://sovrin.org). The major artifact of the SDK is a c-callable library.', install_requires=['pytest', 'pytest-asyncio', 'base58'], tests_require=['pytest', 'pytest-asyncio', 'base58'] )
Fix pytest version in python wrapper deps. Signed-off-by: Sergey Minaev <322af3f2df10918c6ef5280f56be0b711278b1ae@dsr-company.com>
from distutils.core import setup setup( name='python3-indy', version='1.6.1', packages=['indy'], url='https://github.com/hyperledger/indy-sdk', license='MIT/Apache-2.0', author='Vyacheslav Gudkov', author_email='vyacheslav.gudkov@dsr-company.com', description='This is the official SDK for Hyperledger Indy (https://www.hyperledger.org/projects), which provides a distributed-ledger-based foundation for self-sovereign identity (https://sovrin.org). The major artifact of the SDK is a c-callable library.', install_requires=['pytest<3.7', 'pytest-asyncio', 'base58'], tests_require=['pytest<3.7', 'pytest-asyncio', 'base58'] )
import RDF import zope.interface import interfaces import rdf_helper class Jurisdiction(object): zope.interface.implements(interfaces.IJurisdiction) def __init__(self, short_name): '''@param short_name can be e.g. mx''' model = rdf_helper.init_model( rdf_helper.JURI_RDF_PATH) self.code = short_name self.id = 'http://creativecommons.org/international/%s/' % short_name id_uri = RDF.Uri(self.id) self.local_url = rdf_helper.query_to_single_value(model, id_uri, RDF.Uri(rdf_helper.NS_CC + 'jurisdictionSite'), None) self.launched = rdf_helper.query_to_single_value(model, id_uri, RDF.Uri(rdf_helper.NS_CC + 'launched'), None)
Add documentation and make Jurisdiction calls not fail when some of the values aren't found.
import RDF import zope.interface import interfaces import rdf_helper class Jurisdiction(object): zope.interface.implements(interfaces.IJurisdiction) def __init__(self, short_name): """Creates an object representing a jurisdiction. short_name is a (usually) two-letter code representing the same jurisdiction; for a complete list, see cc.license.jurisdiction_codes()""" model = rdf_helper.init_model( rdf_helper.JURI_RDF_PATH) self.code = short_name self.id = 'http://creativecommons.org/international/%s/' % short_name id_uri = RDF.Uri(self.id) try: self.local_url = rdf_helper.query_to_single_value(model, id_uri, RDF.Uri(rdf_helper.NS_CC + 'jurisdictionSite'), None) except rdf_helper.NoValuesFoundException: self.local_url = None try: self.launched = rdf_helper.query_to_single_value(model, id_uri, RDF.Uri(rdf_helper.NS_CC + 'launched'), None) except rdf_helper.NoValuesFoundException: self.launched = None
# # Copyright (c) 2009 rPath, Inc. # # All Rights Reserved # from mint import amiperms class AWSHandler(object): def __init__(self, cfg, db): self.db = db self.amiPerms = amiperms.AMIPermissionsManager(cfg, db) def notify_UserProductRemoved(self, event, userId, projectId, userlevel = None): self.amiPerms.addMemberToProject(userId, projectId) def notify_UserProductAdded(self, event, userId, projectId, userlevel = None): self.amiPerms.deleteMemberFromProject(userId, projectId) def notify_UserProductChanged(self, event, userId, projectId, oldLevel, newLevel): self.amiPerms.setMemberLevel(userId, projectId, oldLevel, newLevel) def notify_UserCancelled(self, userId): # yuck. awsFound, oldAwsAccountNumber = self.db.userData.getDataValue( userId, 'awsAccountNumber') self.amiPerms.setUserKey(userId, oldAwsAccountNumber, None) def notify_ReleasePublished(self, releaseId): self.amiPerms.publishRelease(releaseId) def notify_ReleaseUnpublished(self, releaseId): self.amiPerms.unpublishRelease(releaseId)
Fix typo when setting up handler.
# # Copyright (c) 2009 rPath, Inc. # # All Rights Reserved # from mint import amiperms class AWSHandler(object): def __init__(self, cfg, db): self.db = db self.amiPerms = amiperms.AMIPermissionsManager(cfg, db) def notify_UserProductRemoved(self, event, userId, projectId, userlevel = None): self.amiPerms.deleteMemberFromProject(userId, projectId) def notify_UserProductAdded(self, event, userId, projectId, userlevel = None): self.amiPerms.addMemberToProject(userId, projectId) def notify_UserProductChanged(self, event, userId, projectId, oldLevel, newLevel): self.amiPerms.setMemberLevel(userId, projectId, oldLevel, newLevel) def notify_UserCancelled(self, userId): # yuck. awsFound, oldAwsAccountNumber = self.db.userData.getDataValue( userId, 'awsAccountNumber') self.amiPerms.setUserKey(userId, oldAwsAccountNumber, None) def notify_ReleasePublished(self, releaseId): self.amiPerms.publishRelease(releaseId) def notify_ReleaseUnpublished(self, releaseId): self.amiPerms.unpublishRelease(releaseId)
import sys import time import bracoujl.processor.gb_z80 as proc dis = proc.CPU_CONF['disassembler']() def disassemble(lines): res = '' for line in lines: op = proc.CPU_CONF['parse_line'](line) if op is None: continue res += '{:04X}'.format(op['pc']) + ' - ' + dis.disassemble(op) + '\n' res += '-' * 30 return res try: N = int(sys.argv[1]) except (ValueError, IndexError): N = -1 uniq, lines, count = set(), [], 0 for line in sys.stdin: if line == '--\n': tlines = disassemble(lines) if tlines not in uniq: uniq.add(tlines) print(tlines) lines = [] if N == count: sys.exit(0) count += 1 lines.append(line[:-1])
Fix and enhance disassemble miscellaneous script.
import argparse import sys import time import bracoujl.processor.gb_z80 as proc dis = proc.CPU_CONF['disassembler']() def disassemble(lines, keep_logs=False): res = [] for line in lines: op, gline = proc.CPU_CONF['parse_line'](line), '' if keep_logs: gline += line + (' | DIS: ' if op is not None else '') else: gline += '{:04X}'.format(op['pc']) + ' - ' if op is not None: gline += dis.disassemble(op) res.append(gline) res.append('-' * 20) return '\n'.join(res) uniq = set() def display_lines(lines, **kwds): tlines = disassemble(lines, **kwds) if tlines not in uniq: uniq.add(tlines) print(tlines) return [] if __name__ == '__main__': parser = argparse.ArgumentParser(description='Little disassembly helper.') parser.add_argument('-N', action='store', default=-1, help='number of uniq blocks displayed') parser.add_argument('-k', '--keep-logs', action='store_true', default=False, help='keep log lines') args = parser.parse_args(sys.argv[1:]) lines, count = [], 0 for line in sys.stdin: if line == '--\n': lines = display_lines(lines, keep_logs=args.keep_logs) if args.N == count: sys.exit(0) count += 1 lines.append(line[:-1]) if lines: display_lines(lines, keep_logs=args.keep_logs)
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('interfaces', parent_package, top_path) config.add_data_dir('tests') config.add_data_dir('script_templates') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
Add fsl subpackage on install. git-svn-id: 24f545668198cdd163a527378499f2123e59bf9f@1050 ead46cd0-7350-4e37-8683-fc4c6f79bf00
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('interfaces', parent_package, top_path) config.add_subpackage('fsl') config.add_data_dir('tests') config.add_data_dir('script_templates') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
#!/usr/bin/python from pytun import TunTapDevice from binascii import hexlify if __name__ == '__main__': tun = TunTapDevice(name='ipsec-tun') tun.up() tun.persist(True) while True: try: buf = tun.read(tun.mtu) print hexlify(buf[4:]) IPpayload = buf[4:] # TODO encrypt buf # TODO send to wlan0 # TODO enable routing except KeyboardInterrupt: tun.close()
Change shebang to use python from environment. Fix Indentation.
#!/usr/bin/env python from pytun import TunTapDevice from binascii import hexlify if __name__ == '__main__': tun = TunTapDevice(name='ipsec-tun') tun.up() tun.persist(True) while True: try: buf = tun.read(tun.mtu) print hexlify(buf[4:]) IPpayload = buf[4:] # TODO encrypt buf # TODO send to wlan0 # TODO enable routing except KeyboardInterrupt: tun.close()
import frappe from frappe.model.rename_doc import rename_doc def execute(): if frappe.db.exists("DocType", "Desk Page"): if frappe.db.exists('DocType', 'Workspace'): # this patch was not added initially, so this page might still exist frappe.delete_doc('DocType', 'Desk Page') else: rename_doc('DocType', 'Desk Page', 'Workspace') rename_doc('DocType', 'Desk Chart', 'Workspace Chart', ignore_if_exists=True) rename_doc('DocType', 'Desk Link', 'Workspace Link', ignore_if_exists=True) rename_doc('DocType', 'Desk Shortcut', 'Workspace Shortcut', ignore_if_exists=True) frappe.reload_doc('desk', 'doctype', 'workspace')
fix(Patch): Rename Desk Link only if it exists
import frappe from frappe.model.rename_doc import rename_doc def execute(): if frappe.db.exists("DocType", "Desk Page"): if frappe.db.exists('DocType', 'Workspace'): # this patch was not added initially, so this page might still exist frappe.delete_doc('DocType', 'Desk Page') else: rename_doc('DocType', 'Desk Page', 'Workspace') rename_doc('DocType', 'Desk Chart', 'Workspace Chart', ignore_if_exists=True) rename_doc('DocType', 'Desk Shortcut', 'Workspace Shortcut', ignore_if_exists=True) if frappe.db.exist('DocType', 'Desk Link'): rename_doc('DocType', 'Desk Link', 'Workspace Link', ignore_if_exists=True) frappe.reload_doc('desk', 'doctype', 'workspace')
#!/usr/bin/env python from gevent.wsgi import WSGIServer import werkzeug.serving from werkzeug.debug import DebuggedApplication from app import get_app APP_PORT = 5000 DEBUG = True @werkzeug.serving.run_with_reloader def main(): """Starts web application """ app = get_app() app.debug = DEBUG # app.config['ASSETS_DEBUG'] = DEBUG http_server = WSGIServer(('', APP_PORT), DebuggedApplication(app, evalex=True)) http_server.serve_forever() # app.run() if __name__ == '__main__': print('purkinje ready') main()
Apply gevent monkey patching, so it will get invoked when main is called via the entry point script and not via shell script
#!/usr/bin/env python import gevent.monkey gevent.monkey.patch_all() from gevent.wsgi import WSGIServer import werkzeug.serving from werkzeug.debug import DebuggedApplication from app import get_app APP_PORT = 5000 DEBUG = True @werkzeug.serving.run_with_reloader def main(): """Starts web application """ app = get_app() app.debug = DEBUG # app.config['ASSETS_DEBUG'] = DEBUG http_server = WSGIServer(('', APP_PORT), DebuggedApplication(app, evalex=True)) http_server.serve_forever() # app.run() if __name__ == '__main__': print('purkinje ready') main()
"""Utiltiy functions for workign on the NinaPro Databases (1 & 2).""" from setuptools import setup, find_packages setup(name='nina_helper', version='2.1', description='Utiltiy functions for workign on the NinaPro Databases (1 & 2)', author='Lif3line', author_email='adamhartwell2@gmail.com', license='MIT', packages=find_packages(), url='https://github.com/Lif3line/nina_helper_package_mk2', # use the URL to the github repo download_url='https://github.com/Lif3line/nina_helper_package_mk2/archive/2.1.tar.gz', # Hack github address install_requires=[ 'os', 'scipy', 'sklearn', 'itertools', 'numpy' ], keywords='ninapro emg')
Use local upload for release
"""Utiltiy functions for workign on the NinaPro Databases (1 & 2).""" from setuptools import setup, find_packages setup(name='nina_helper', version='2.2', description='Utiltiy functions for workign on the NinaPro Databases (1 & 2)', author='Lif3line', author_email='adamhartwell2@gmail.com', license='MIT', packages=find_packages(), url='https://github.com/Lif3line/nina_helper_package_mk2', # use the URL to the github repo # download_url='https://github.com/Lif3line/nina_helper_package_mk2/archive/2.2.tar.gz', # Hack github address install_requires=[ 'scipy', 'sklearn', 'numpy' ], keywords='ninapro emg')
import logging from django.test import TestCase, RequestFactory from log_request_id.middleware import RequestIDMiddleware from testproject.views import test_view class RequestIDLoggingTestCase(TestCase): def setUp(self): self.factory = RequestFactory() self.handler = logging.getLogger('testproject').handlers[0] def test_id_generation(self): request = self.factory.get('/') middleware = RequestIDMiddleware() middleware.process_request(request) self.assertTrue(hasattr(request, 'id')) test_view(request) self.assertTrue(request.id in self.handler.messages[0])
Add test for externally-generated request IDs
import logging from django.test import TestCase, RequestFactory from log_request_id.middleware import RequestIDMiddleware from testproject.views import test_view class RequestIDLoggingTestCase(TestCase): def setUp(self): self.factory = RequestFactory() self.handler = logging.getLogger('testproject').handlers[0] self.handler.messages = [] def test_id_generation(self): request = self.factory.get('/') middleware = RequestIDMiddleware() middleware.process_request(request) self.assertTrue(hasattr(request, 'id')) test_view(request) self.assertTrue(request.id in self.handler.messages[0]) def test_external_id_in_http_header(self): with self.settings(LOG_REQUEST_ID_HEADER='REQUEST_ID_HEADER'): request = self.factory.get('/') request.META['REQUEST_ID_HEADER'] = 'some_request_id' middleware = RequestIDMiddleware() middleware.process_request(request) self.assertEqual(request.id, 'some_request_id') test_view(request) self.assertTrue('some_request_id' in self.handler.messages[0])
import eventlet import os import sys from oslo.config import cfg from st2common import log as logging from st2common.models.db import db_setup from st2common.models.db import db_teardown from st2actions import config from st2actions import history LOG = logging.getLogger(__name__) eventlet.monkey_patch( os=True, select=True, socket=True, thread=False if '--use-debugger' in sys.argv else True, time=True) def _setup(): # Parse args to setup config. config.parse_args() # Setup logging. logging.setup(cfg.CONF.history.logging) # All other setup which requires config to be parsed and logging to be correctly setup. db_setup(cfg.CONF.database.db_name, cfg.CONF.database.host, cfg.CONF.database.port) def _run_worker(): LOG.info('(PID=%s) History worker started.', os.getpid()) try: history.work() except (KeyboardInterrupt, SystemExit): LOG.info('(PID=%s) History worker stopped.', os.getpid()) except: return 1 return 0 def _teardown(): db_teardown() def main(): try: _setup() return _run_worker() except: LOG.exception('(PID=%s) History worker quit due to exception.', os.getpid()) return 1 finally: _teardown()
Move code from _run_worker into main
import eventlet import os import sys from oslo.config import cfg from st2common import log as logging from st2common.models.db import db_setup from st2common.models.db import db_teardown from st2actions import config from st2actions import history LOG = logging.getLogger(__name__) eventlet.monkey_patch( os=True, select=True, socket=True, thread=False if '--use-debugger' in sys.argv else True, time=True) def _setup(): # Parse args to setup config. config.parse_args() # Setup logging. logging.setup(cfg.CONF.history.logging) # All other setup which requires config to be parsed and logging to be correctly setup. db_setup(cfg.CONF.database.db_name, cfg.CONF.database.host, cfg.CONF.database.port) def _teardown(): db_teardown() def main(): try: _setup() LOG.info('(PID=%s) Historian started.', os.getpid()) history.work() except (KeyboardInterrupt, SystemExit): LOG.info('(PID=%s) Historian stopped.', os.getpid()) return 0 except: LOG.exception('(PID=%s) Historian quit due to exception.', os.getpid()) return 1 finally: _teardown() return 0
from __future__ import absolute_import from __future__ import unicode_literals from casexml.apps.case.models import CommCareCase from dimagi.utils.parsing import json_format_datetime def get_case_ids_modified_with_owner_since(domain, owner_id, reference_date): """ Gets all cases with a specified owner ID that have been modified since a particular reference_date (using the server's timestamp) """ return [ row['id'] for row in CommCareCase.get_db().view( 'cases_by_server_date/by_owner_server_modified_on', startkey=[domain, owner_id, json_format_datetime(reference_date)], endkey=[domain, owner_id, {}], include_docs=False, reduce=False ) ]
Make get_case_ids_modified_with_owner_since accept an end date as well
from __future__ import absolute_import from __future__ import unicode_literals from casexml.apps.case.models import CommCareCase from dimagi.utils.parsing import json_format_datetime def get_case_ids_modified_with_owner_since(domain, owner_id, reference_date, until_date=None): """ Gets all cases with a specified owner ID that have been modified since a particular reference_date (using the server's timestamp) """ return [ row['id'] for row in CommCareCase.get_db().view( 'cases_by_server_date/by_owner_server_modified_on', startkey=[domain, owner_id, json_format_datetime(reference_date)], endkey=[domain, owner_id, {} if not until_date else json_format_datetime(until_date)], include_docs=False, reduce=False ) ]
Allow django-money to be specified as read-only in a model Monkey patch the Django admin so that we can display django-money fields read-only. In order to do this, we simply catch the exception that results from trying to convert a money object (e.g. '10 USD') into a floating field. And then we call just ask for the string representation of the field.
from django.db import models from django.utils.encoding import smart_unicode from django.utils import formats from django.utils import timezone from django.core.exceptions import ObjectDoesNotExist from django.contrib.admin.util import lookup_field from django.utils.safestring import mark_safe from django.utils.html import conditional_escape from django.db.models.fields.related import ManyToManyRel from django.contrib.admin import util as admin_util def djmoney_display_for_field(value, field): from django.contrib.admin.templatetags.admin_list import _boolean_icon from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE try: if field.flatchoices: return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE) # NullBooleanField needs special-case null-handling, so it comes # before the general null test. elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField): return _boolean_icon(value) elif value is None: return EMPTY_CHANGELIST_VALUE elif isinstance(field, models.DateTimeField): return formats.localize(timezone.localtime(value)) elif isinstance(field, models.DateField) or isinstance(field, models.TimeField): return formats.localize(value) elif isinstance(field, models.DecimalField): return formats.number_format(value, field.decimal_places) elif isinstance(field, models.FloatField): return formats.number_format(value) else: return smart_unicode(value) except: return smart_unicode(value) admin_util.display_for_field = djmoney_display_for_field def djmoney_contents(self): from django.contrib.admin.templatetags.admin_list import _boolean_icon from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin try: f, attr, value = lookup_field(field, obj, model_admin) except (AttributeError, ValueError, ObjectDoesNotExist): result_repr = EMPTY_CHANGELIST_VALUE else: if f is None: boolean = getattr(attr, "boolean", False) if boolean: result_repr = _boolean_icon(value) else: result_repr = smart_unicode(value) if getattr(attr, "allow_tags", False): result_repr = mark_safe(result_repr) else: if value is None: result_repr = EMPTY_CHANGELIST_VALUE elif isinstance(f.rel, ManyToManyRel): result_repr = ", ".join(map(unicode, value.all())) else: result_repr = djmoney_display_for_field(value, f) return conditional_escape(result_repr) from django.contrib.admin.helpers import AdminReadonlyField AdminReadonlyField.contents = djmoney_contents
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.conf.urls import patterns, url from .views import HomePageView, FormHorizontalView, FormInlineView, PaginationView, FormWithFilesView, \ DefaultFormView, MiscView, DefaultFormsetView, DefaultFormByFieldView urlpatterns = [ url(r'^$', HomePageView.as_view(), name='home'), url(r'^formset$', DefaultFormsetView.as_view(), name='formset_default'), url(r'^form$', DefaultFormView.as_view(), name='form_default'), url(r'^form_by_field$', DefaultFormByFieldView.as_view(), name='form_by_field'), url(r'^form_horizontal$', FormHorizontalView.as_view(), name='form_horizontal'), url(r'^form_inline$', FormInlineView.as_view(), name='form_inline'), url(r'^form_with_files$', FormWithFilesView.as_view(), name='form_with_files'), url(r'^pagination$', PaginationView.as_view(), name='pagination'), url(r'^misc$', MiscView.as_view(), name='misc'), ]
Remove obsolete import (removed in Django 1.10)
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.conf.urls import url from .views import HomePageView, FormHorizontalView, FormInlineView, PaginationView, FormWithFilesView, \ DefaultFormView, MiscView, DefaultFormsetView, DefaultFormByFieldView urlpatterns = [ url(r'^$', HomePageView.as_view(), name='home'), url(r'^formset$', DefaultFormsetView.as_view(), name='formset_default'), url(r'^form$', DefaultFormView.as_view(), name='form_default'), url(r'^form_by_field$', DefaultFormByFieldView.as_view(), name='form_by_field'), url(r'^form_horizontal$', FormHorizontalView.as_view(), name='form_horizontal'), url(r'^form_inline$', FormInlineView.as_view(), name='form_inline'), url(r'^form_with_files$', FormWithFilesView.as_view(), name='form_with_files'), url(r'^pagination$', PaginationView.as_view(), name='pagination'), url(r'^misc$', MiscView.as_view(), name='misc'), ]
import pytest from pyhocon.config_tree import ConfigTree from pyhocon.exceptions import ConfigMissingException, ConfigWrongTypeException class TestConfigParser(object): def test_config_tree_quoted_string(self): config_tree = ConfigTree() config_tree.put("a.b.c", "value") assert config_tree.get("a.b.c") == "value" with pytest.raises(ConfigMissingException): assert config_tree.get("a.b.d") with pytest.raises(ConfigMissingException): config_tree.get("a.d.e") with pytest.raises(ConfigWrongTypeException): config_tree.get("a.b.c.e") def test_config_tree_number(self): config_tree = ConfigTree() config_tree.put("a.b.c", 5) assert config_tree.get("a.b.c") == 5
Add failing tests for iteration and logging config
import pytest from pyhocon.config_tree import ConfigTree from pyhocon.exceptions import ConfigMissingException, ConfigWrongTypeException class TestConfigParser(object): def test_config_tree_quoted_string(self): config_tree = ConfigTree() config_tree.put("a.b.c", "value") assert config_tree.get("a.b.c") == "value" with pytest.raises(ConfigMissingException): assert config_tree.get("a.b.d") with pytest.raises(ConfigMissingException): config_tree.get("a.d.e") with pytest.raises(ConfigWrongTypeException): config_tree.get("a.b.c.e") def test_config_tree_number(self): config_tree = ConfigTree() config_tree.put("a.b.c", 5) assert config_tree.get("a.b.c") == 5 def test_config_tree_iterator(self): config_tree = ConfigTree() config_tree.put("a.b.c", 5) for k in config_tree: assert k == "a" assert config_tree[k]["b.c"] == 5 def test_config_logging(self): import logging, logging.config config_tree = ConfigTree() config_tree.put('version', 1) config_tree.put('root.level', logging.INFO) assert dict(config_tree)['version'] == 1 logging.config.dictConfig(config_tree)
"""Little helper application to improve django choices (for fields)""" from __future__ import unicode_literals from .choices import Choices __author__ = 'Stephane "Twidi" Ange;' __contact__ = "s.angel@twidi.com" __homepage__ = "https://pypi.python.org/pypi/django-extended-choices" __version__ = "1.1"
Make OrderedChoices available at the package root
"""Little helper application to improve django choices (for fields)""" from __future__ import unicode_literals from .choices import Choices, OrderedChoices __author__ = 'Stephane "Twidi" Ange;' __contact__ = "s.angel@twidi.com" __homepage__ = "https://pypi.python.org/pypi/django-extended-choices" __version__ = "1.1"
from functools import wraps from django.db import connection def auto_close_db(f): "Ensures the database connection is closed when the function returns." @wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) finally: connection.close() return wrapper
Fix circular import when used with other add-ons that import django.db eg sorl_thumbnail: Traceback (most recent call last): File "/home/rpatterson/src/work/retrans/src/ReTransDjango/bin/manage", line 40, in <module> sys.exit(manage.main()) File "/home/rpatterson/src/work/retrans/src/ReTransDjango/retrans/manage.py", line 15, in main execute_manager(settings) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/core/management/__init__.py", line 438, in execute_manager utility.execute() File "/opt/src/eggs/Django-1.3-py2.7.egg/django/core/management/__init__.py", line 379, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/core/management/base.py", line 191, in run_from_argv self.execute(*args, **options.__dict__) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/core/management/base.py", line 209, in execute translation.activate('en-us') File "/opt/src/eggs/Django-1.3-py2.7.egg/django/utils/translation/__init__.py", line 100, in activate return _trans.activate(language) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/utils/translation/trans_real.py", line 202, in activate _active.value = translation(language) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/utils/translation/trans_real.py", line 185, in translation default_translation = _fetch(settings.LANGUAGE_CODE) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/utils/translation/trans_real.py", line 162, in _fetch app = import_module(appname) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/utils/importlib.py", line 35, in import_module __import__(name) File "/opt/src/eggs/sorl_thumbnail-11.12-py2.7.egg/sorl/thumbnail/__init__.py", line 1, in <module> from sorl.thumbnail.fields import ImageField File "/opt/src/eggs/sorl_thumbnail-11.12-py2.7.egg/sorl/thumbnail/fields.py", line 2, in <module> from django.db import models File "/opt/src/eggs/Django-1.3-py2.7.egg/django/db/__init__.py", line 78, in <module> connection = connections[DEFAULT_DB_ALIAS] File "/opt/src/eggs/Django-1.3-py2.7.egg/django/db/utils.py", line 94, in __getitem__ backend = load_backend(db['ENGINE']) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/db/utils.py", line 47, in load_backend if backend_name not in available_backends: django.core.exceptions.ImproperlyConfigured: 'django_mysqlpool.backends.mysqlpool' isn't an available database backend. Try using django.db.backends.XXX, where XXX is one of: 'dummy', 'mysql', 'oracle', 'postgresql', 'postgresql_psycopg2', 'sqlite3' Error was: cannot import name connection
from functools import wraps def auto_close_db(f): "Ensures the database connection is closed when the function returns." from django.db import connection @wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) finally: connection.close() return wrapper
#!/usr/bin/env python from twisted.web.server import Site from webtest.session import RedisSessionFactory from webtest.request import RedisRequest from webtest import log logger = log.get_logger() class RedisSite(Site): sessionFactory = RedisSessionFactory requestFactory = RedisRequest def makeSession(self): """ Generate a new Session instance """ uid = self._mkuid() return self.sessionFactory.retrieve(uid, reactor=self._reactor) def getSession(self, uid): """ Get a previously generated session, by its unique ID. This raises a KeyError if the session is not found. """ return self.sessionFactory.retrieve(uid, reactor=self._reactor)
Move RedisSessionFactory into its own module
#!/usr/bin/env python from twisted.web.server import Site from webtest.session_factory import RedisSessionFactory from webtest.request import RedisRequest from webtest import log logger = log.get_logger() class RedisSite(Site): sessionFactory = RedisSessionFactory requestFactory = RedisRequest def makeSession(self): """ Generate a new Session instance """ uid = self._mkuid() return self.sessionFactory.retrieve(uid, reactor=self._reactor) def getSession(self, uid): """ Get a previously generated session, by its unique ID. This raises a KeyError if the session is not found. """ return self.sessionFactory.retrieve(uid, reactor=self._reactor)
class LocalNodeMiddleware(object): """ Ensures a Node that represents the local server always exists. No other suitable hook for code that's run once and can access the server's host name was found. A migration was not suitable for the second reason. """ def __init__(self): self.local_node_created = False def process_request(self, request): if not self.local_node_created: from dashboard.models import Node nodes = Node.objects.filter(local=True) host = "http://" + request.get_host() if host[-1] != "/": host += "/" service = host + "service/" if len(nodes) == 0: node = Node(name="Local", website_url=host, service_url=service, local=True) node.save() elif len(nodes) == 1: node = nodes[0] node.host = host node.service = service node.save() else: raise RuntimeError("More than one local node found in Nodes table. Please fix before continuing.") self.local_node_created = True return None
Add TODO to fix bug at later date
class LocalNodeMiddleware(object): """ Ensures a Node that represents the local server always exists. No other suitable hook for code that's run once and can access the server's host name was found. A migration was not suitable for the second reason. """ def __init__(self): self.local_node_created = False def process_request(self, request): if not self.local_node_created: from dashboard.models import Node nodes = Node.objects.filter(local=True) host = "http://" + request.get_host() if host[-1] != "/": host += "/" service = host + "service/" if len(nodes) == 0: node = Node(name="Local", website_url=host, service_url=service, local=True) node.save() elif len(nodes) == 1: node = nodes[0] node.host = host node.service = service # TODO: Fix bug that prevents this from actually saving node.save() else: raise RuntimeError("More than one local node found in Nodes table. Please fix before continuing.") self.local_node_created = True return None
from eve import Eve app = Eve() if __name__ == '__main__': app.run()
Use port 80 to serve the API
from eve import Eve app = Eve() if __name__ == '__main__': app.run(host='0.0.0.0', port=80)
from rest_framework import serializers from registries.models import Organization from gwells.models import ProvinceState class DrillerListSerializer(serializers.ModelSerializer): province_state = serializers.ReadOnlyField() class Meta: model = Organization # Using all fields for now fields = ( #'who_created', #'when_created', #'who_updated', #'when_updated', 'name', 'street_address', 'city', 'province_state', 'postal_code', 'main_tel', 'fax_tel', 'website_url', 'certificate_authority', )
Add fields to driller list serializer
from rest_framework import serializers from registries.models import Organization from gwells.models import ProvinceState class DrillerListSerializer(serializers.ModelSerializer): """ Serializer for Driller model "list" view. """ province_state = serializers.ReadOnlyField(source="province_state.code") class Meta: model = Organization # Using all fields for now fields = ( #'who_created', #'when_created', #'who_updated', #'when_updated', 'org_guid', 'name', 'street_address', 'city', 'province_state', 'postal_code', 'main_tel', #'fax_tel', #'website_url', #'certificate_authority', )
from django.conf import settings def add_settings( request ): """Add some selected settings values to the context""" return { 'settings': { 'GOOGLE_ANALYTICS_ACCOUNT': settings.GOOGLE_ANALYTICS_ACCOUNT, } }
Make settings.DEBUG available to templates It's used in the default base.html template so makes sense for it to actually appear in the context.
from django.conf import settings def add_settings( request ): """Add some selected settings values to the context""" return { 'settings': { 'GOOGLE_ANALYTICS_ACCOUNT': settings.GOOGLE_ANALYTICS_ACCOUNT, 'DEBUG': settings.DEBUG, } }
#!/usr/bin/python import os import sys import api import json import getpass # Banks banks = {} import bankofamerica banks["bankofamerica"] = bankofamerica print "Login" print "Username: ", username = sys.stdin.readline().strip() password = getpass.getpass() if not api.callapi("login",{"username": username, "password": password}): print "Login failed" sys.exit(1) todo = api.callapi("accountstodo") for account in todo: if account["bankname"] not in banks: print "No scraper for %s!" % (account["bankname"]) continue print "Scraping %s..." % (account["bankname"]) if os.getenv("DATAFILE"): data = open(os.getenv("DATAFILE")).read() else: data = json.dumps(banks[account["bankname"]].downloadaccount(account),default=str) api.callapi("newtransactions", {"data": data}) api.callapi("logout")
Read bank list from config file.
#!/usr/bin/python import os import sys import api import json import getpass sys.path.append("../") import config # Banks banks = {} for bank in config.banks: exec "import %s" % (bank) banks[bank] = eval(bank) print "Login" print "Username: ", username = sys.stdin.readline().strip() password = getpass.getpass() if not api.callapi("login",{"username": username, "password": password}): print "Login failed" sys.exit(1) todo = api.callapi("accountstodo") for account in todo: if account["bankname"] not in banks: print "No scraper for %s!" % (account["bankname"]) continue print "Scraping %s..." % (account["bankname"]) if os.getenv("DATAFILE"): data = open(os.getenv("DATAFILE")).read() else: data = json.dumps(banks[account["bankname"]].downloadaccount(account),default=str) api.callapi("newtransactions", {"data": data}) api.callapi("logout")
# Taken from txircd: # https://github.com/ElementalAlchemist/txircd/blob/8832098149b7c5f9b0708efe5c836c8160b0c7e6/txircd/utils.py#L9 def _enum(**enums): return type('Enum', (), enums) ModeType = _enum(LIST=0, PARAM_SET=1, PARAM_UNSET=2, NO_PARAM=3) ModuleLoadType = _enum(LOAD=0, UNLOAD=1, ENABLE=2, DISABLE=3) def isNumber(s): try: float(s) return True except ValueError: return False def parseUserPrefix(prefix): if "!" in prefix: nick = prefix[:prefix.find("!")] ident = prefix[prefix.find("!") + 1:prefix.find("@")] host = prefix[prefix.find("@") + 1:] return nick, ident, host # Not all "users" have idents and hostnames nick = prefix return nick, None, None def networkName(bot, server): return bot.servers[server].supportHelper.network
Fix the handling of missing prefixes Twisted defaults to an empty string, while IRCBase defaults to None.
# Taken from txircd: # https://github.com/ElementalAlchemist/txircd/blob/8832098149b7c5f9b0708efe5c836c8160b0c7e6/txircd/utils.py#L9 def _enum(**enums): return type('Enum', (), enums) ModeType = _enum(LIST=0, PARAM_SET=1, PARAM_UNSET=2, NO_PARAM=3) ModuleLoadType = _enum(LOAD=0, UNLOAD=1, ENABLE=2, DISABLE=3) def isNumber(s): try: float(s) return True except ValueError: return False def parseUserPrefix(prefix): if prefix is None: prefix = "" if "!" in prefix: nick = prefix[:prefix.find("!")] ident = prefix[prefix.find("!") + 1:prefix.find("@")] host = prefix[prefix.find("@") + 1:] return nick, ident, host # Not all "users" have idents and hostnames nick = prefix return nick, None, None def networkName(bot, server): return bot.servers[server].supportHelper.network
# -*- coding: utf-8 -*- # # (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com> # # See LICENSE comming with the source of 'trex' for details. # from rest_framework.serializers import HyperlinkedModelSerializer from trex.models.project import Project class ProjectSerializer(HyperlinkedModelSerializer): class Meta: model = Project fields = ("url", "name", "description", "active", "created")
Add a ProjectDetailSerializer and EntryDetailSerializer
# -*- coding: utf-8 -*- # # (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com> # # See LICENSE comming with the source of 'trex' for details. # from rest_framework.serializers import HyperlinkedModelSerializer from trex.models.project import Project, Entry class ProjectSerializer(HyperlinkedModelSerializer): class Meta: model = Project fields = ("url", "name", "description", "active", "created") class ProjectDetailSerializer(HyperlinkedModelSerializer): class Meta: model = Project fields = ("name", "description", "active", "created", "entries") class EntryDetailSerializer(HyperlinkedModelSerializer): class Meta: model = Entry fields = ("date", "duration", "description", "state", "user", "created")
from utils import models class Plugin: plugin_name = None display_name = None description = None author = None short_name = None stage = None manager_url = None version = None janeway_version = None is_workflow_plugin = False jump_url = None handshake_url = None article_pk_in_handshake_url = False press_wide = False kanban_card = '{plugin_name}/kanban_card.html'.format( plugin_name=plugin_name, ) @classmethod def install(cls): plugin, created = cls.get_or_create_plugin_object() if not created and plugin.version != cls.version: plugin.version = cls.version plugin.save() return plugin, created @classmethod def hook_registry(cls): pass @classmethod def get_or_create_plugin_object(cls): plugin, created = models.Plugin.objects.get_or_create( name=cls.short_name, display_name=cls.display_name, press_wide=cls.press_wide, defaults={'version': cls.version, 'enabled': True}, ) return plugin, created
Add get_self and change get_or_create to avoid mis-creation.
from utils import models class Plugin: plugin_name = None display_name = None description = None author = None short_name = None stage = None manager_url = None version = None janeway_version = None is_workflow_plugin = False jump_url = None handshake_url = None article_pk_in_handshake_url = False press_wide = False kanban_card = '{plugin_name}/kanban_card.html'.format( plugin_name=plugin_name, ) @classmethod def install(cls): plugin, created = cls.get_or_create_plugin_object() if not created and plugin.version != cls.version: print('Plugin updated: {0} -> {1}'.format(cls.version, plugin.version)) plugin.version = cls.version plugin.save() return plugin, created @classmethod def hook_registry(cls): pass @classmethod def get_or_create_plugin_object(cls): plugin, created = models.Plugin.objects.get_or_create( name=cls.short_name, defaults={ 'display_name': cls.display_name, 'version': cls.version, 'enabled': True, 'press_wide': cls.press_wide, }, ) return plugin, created @classmethod def get_self(cls): try: plugin = models.Plugin.objects.get( name=cls.short_name, ) except models.Plugin.MultipleObjectsReturned: plugin = models.Plugin.objects.filter( name=cls.short_name, ).order_by( '-version' ).first() except models.Plugin.DoesNotExist: return None return plugin
import csv ppelm_s3_key = '' def process_from_dump(fname=None, delimiter='\t'): ppelm_json = [] if fname is None: # ToDo Get from S3 pass else: with open(fname, 'r') as f: csv_reader = csv.reader(f.readlines(), delimiter=delimiter) columns = next(csv_reader) for entry in csv_reader: row_dict = {columns[n]: entry[n] for n in range(len(columns))} ppelm_json.append(row_dict) return ppelm_json
Move iterator to own function
import csv ppelm_s3_key = '' def process_from_dump(fname=None, delimiter='\t'): if fname is None: # ToDo Get from S3 return [] else: with open(fname, 'r') as f: csv_reader = csv.reader(f.readlines(), delimiter=delimiter) ppelm_json = _get_json_from_entry_rows(csv_reader) return ppelm_json def _get_json_from_entry_rows(row_iter): ppelm_json = [] columns = next(row_iter) for entry in row_iter: row_dict = {columns[n]: entry[n] for n in range(len(columns))} ppelm_json.append(row_dict) return ppelm_json
class Neuron: pass class NeuronNetwork: neurons = []
Create 2D list of Neurons in NeuronNetwork's init
class Neuron: pass class NeuronNetwork: neurons = [] def __init__(self, rows, columns): self.neurons = [] for row in xrange(rows): self.neurons.append([]) for column in xrange(columns): self.neurons[row].append(Neuron())
from distutils.core import setup from setuptools import find_packages setup(name='geventconnpool', version = "0.1", description = 'TCP connection pool for gevent', url="https://github.com/rasky/geventconnpool", author="Giovanni Bajo", author_email="rasky@develer.com", packages=find_packages('src'), package_dir={'': 'src'}, include_package_data=True, install_requires=[ 'gevent >= 0.13' ], classifiers=[ "Development Status :: 4 - Beta", "Environment :: No Input/Output (Daemon)", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 2", "Topic :: Software Development", ])
Add long description to the package.
from distutils.core import setup from setuptools import find_packages with open('README.rst') as file: long_description = file.read() setup(name='geventconnpool', version = "0.1a", description = 'TCP connection pool for gevent', long_description = long_description, url="https://github.com/rasky/geventconnpool", author="Giovanni Bajo", author_email="rasky@develer.com", packages=find_packages('src'), package_dir={'': 'src'}, include_package_data=True, install_requires=[ 'gevent >= 0.13' ], classifiers=[ "Development Status :: 4 - Beta", "Environment :: No Input/Output (Daemon)", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 2", "Topic :: Software Development", ])
""" Py-Tree-sitter """ import platform from setuptools import setup, Extension setup( name = "tree_sitter", version = "0.0.8", maintainer = "Max Brunsfeld", maintainer_email = "maxbrunsfeld@gmail.com", author = "Max Brunsfeld", author_email = "maxbrunsfeld@gmail.com", url = "https://github.com/tree-sitter/py-tree-sitter", license = "MIT", platforms = ["any"], python_requires = ">=3.3", description = "Python bindings to the Tree-sitter parsing library", classifiers = [ "License :: OSI Approved :: MIT License", "Topic :: Software Development :: Compilers", "Topic :: Text Processing :: Linguistic", ], packages = ['tree_sitter'], ext_modules = [ Extension( "tree_sitter_binding", [ "tree_sitter/core/lib/src/lib.c", "tree_sitter/binding.c", ], include_dirs = [ "tree_sitter/core/lib/include", "tree_sitter/core/lib/utf8proc", ], extra_compile_args = ( ['-std=c99'] if platform.system() != 'Windows' else None ) ) ], project_urls = { 'Source': 'https://github.com/tree-sitter/py-tree-sitter', 'Documentation': 'http://initd.org/psycopg/docs/', } )
Remove an incorrect documentation URL Fixes #9.
""" Py-Tree-sitter """ import platform from setuptools import setup, Extension setup( name = "tree_sitter", version = "0.0.8", maintainer = "Max Brunsfeld", maintainer_email = "maxbrunsfeld@gmail.com", author = "Max Brunsfeld", author_email = "maxbrunsfeld@gmail.com", url = "https://github.com/tree-sitter/py-tree-sitter", license = "MIT", platforms = ["any"], python_requires = ">=3.3", description = "Python bindings to the Tree-sitter parsing library", classifiers = [ "License :: OSI Approved :: MIT License", "Topic :: Software Development :: Compilers", "Topic :: Text Processing :: Linguistic", ], packages = ['tree_sitter'], ext_modules = [ Extension( "tree_sitter_binding", [ "tree_sitter/core/lib/src/lib.c", "tree_sitter/binding.c", ], include_dirs = [ "tree_sitter/core/lib/include", "tree_sitter/core/lib/utf8proc", ], extra_compile_args = ( ['-std=c99'] if platform.system() != 'Windows' else None ) ) ], project_urls = { 'Source': 'https://github.com/tree-sitter/py-tree-sitter', } )
from setuptools import setup, find_packages setup( name='bfg9000', version='0.1.0pre', license='BSD', author='Jim Porter', author_email='porterj@alum.rit.edu', packages=find_packages(exclude=['test']), entry_points={ 'console_scripts': ['bfg9000=bfg9000.driver:main'], }, test_suite='test', )
Fix version number to comply with PEP 440
from setuptools import setup, find_packages setup( name='bfg9000', version='0.1.0-dev', license='BSD', author='Jim Porter', author_email='porterj@alum.rit.edu', packages=find_packages(exclude=['test']), entry_points={ 'console_scripts': ['bfg9000=bfg9000.driver:main'], }, test_suite='test', )
from setuptools import find_packages, setup setup( name='txkazoo', version='0.0.4', description='Twisted binding for Kazoo', maintainer='Manish Tomar', maintainer_email='manish.tomar@rackspace.com', license='Apache 2.0', packages=find_packages(), install_requires=['twisted==13.2.0', 'kazoo==2.0b1'] )
Add long_description from README + URL
from setuptools import find_packages, setup setup( name='txkazoo', version='0.0.4', description='Twisted binding for Kazoo', long_description=open("README.md").read(), url="https://github.com/rackerlabs/txkazoo", maintainer='Manish Tomar', maintainer_email='manish.tomar@rackspace.com', license='Apache 2.0', packages=find_packages(), install_requires=['twisted==13.2.0', 'kazoo==2.0b1'] )
import importlib from cx_Freeze import setup, Executable backend_path = importlib.import_module("bcrypt").__path__[0] backend_path = backend_path.replace("bcrypt", ".libs_cffi_backend") # Dependencies are automatically detected, but it might need # fine tuning. build_exe_options = { "include_files": [ ("client/dist", "client"), "LICENSE", "templates", "readme.md", (backend_path, "lib/.libs_cffi_backend") ], "includes": [ "cffi", "numpy", "numpy.core._methods", "numpy.lib", "numpy.lib.format" ], "namespace_packages": [ "virtool" ], "packages": [ "_cffi_backend", "appdirs", "asyncio", "bcrypt", "cffi", "idna", "motor", "packaging", "uvloop" ] } options = { "build_exe": build_exe_options } executables = [ Executable('run.py', base="Console") ] importlib.import_module("virtool") setup(name='virtool', executables=executables, options=options)
Fix missing raven.processors in build
import importlib from cx_Freeze import setup, Executable backend_path = importlib.import_module("bcrypt").__path__[0] backend_path = backend_path.replace("bcrypt", ".libs_cffi_backend") # Dependencies are automatically detected, but it might need # fine tuning. build_exe_options = { "include_files": [ ("client/dist", "client"), "LICENSE", "templates", "readme.md", (backend_path, "lib/.libs_cffi_backend") ], "includes": [ "cffi", "numpy", "numpy.core._methods", "numpy.lib", "numpy.lib.format" "raven.processors" "namespace_packages": [ "virtool" ], "packages": [ "_cffi_backend", "appdirs", "asyncio", "bcrypt", "cffi", "idna", "motor", "packaging", "uvloop" ] } options = { "build_exe": build_exe_options } executables = [ Executable('run.py', base="Console") ] importlib.import_module("virtool") setup(name='virtool', executables=executables, options=options)
from setuptools import setup from setuptools.command.test import test as TestCommand import sys class PyTest(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): import pytest errcode = pytest.main(self.test_args) sys.exit(errcode) setup( name='msumastro', version='IXME', description='Process FITS files painlessly', long_description=(open('README.rst').read()), license='BSD 3-clause', author='Matt Craig', author_email='mcraig@mnstate.edu', packages=['msumastro'], include_package_data=True, install_requires=['astropysics>=0.0.dev0', 'astropy', 'numpy'], extras_require={ 'testing': ['pytest', 'pytest-capturelog'] }, classifiers=['Development Status :: 4 - Beta', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2 :: Only'] )
Add entry point for quick_add_keys_to_file
from setuptools import setup, find_packages from setuptools.command.test import test as TestCommand import sys class PyTest(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): import pytest errcode = pytest.main(self.test_args) sys.exit(errcode) setup( name='msumastro', version='FIXME', description='Process FITS files painlessly', url='http://github.com/mwcraig/msumastro', long_description=(open('README.rst').read()), license='BSD 3-clause', author='Matt Craig', author_email='mcraig@mnstate.edu', packages=find_packages(exclude=['tests*']), include_package_data=True, install_requires=['astropysics>=0.0.dev0', 'astropy', 'numpy'], extras_require={ 'testing': ['pytest>1.4', 'pytest-capturelog'], 'docs': ['numpydoc', 'sphinx-argparse'] }, entry_points={ 'console_scripts': [ ('quick_add_keys_to_file = ' 'msumastro.scripts.quick_add_keys_to_file:main') ] }, classifiers=['Development Status :: 4 - Beta', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2 :: Only'] )
#!/usr/bin/env python import os from setuptools import find_packages, setup SCRIPT_DIR = os.path.dirname(__file__) if not SCRIPT_DIR: SCRIPT_DIR = os.getcwd() SRC_PREFIX = 'src' packages = find_packages(SRC_PREFIX) setup( name='cmdline', version='0.0.0', description='Utilities for consistent command line tools', author='Roberto Aguilar', author_email='r@rreboto.com', package_dir={'': SRC_PREFIX}, packages=packages, long_description=open('README.md').read(), url='http://github.com/rca/cmdline', license='LICENSE', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Natural Language :: English', 'Topic :: Utilities' ], install_requires=[ 'PyYAML>=3', ], )
Add markdown content type for README
#!/usr/bin/env python import os from setuptools import find_packages, setup SCRIPT_DIR = os.path.dirname(__file__) if not SCRIPT_DIR: SCRIPT_DIR = os.getcwd() SRC_PREFIX = 'src' def readme(): with open('README.md') as f: return f.read() packages = find_packages(SRC_PREFIX) setup( name='cmdline', version='0.0.0', description='Utilities for consistent command line tools', author='Roberto Aguilar', author_email='r@rreboto.com', package_dir={'': SRC_PREFIX}, packages=packages, long_description=readme(), long_description_content_type='text/markdown', url='http://github.com/rca/cmdline', license='LICENSE', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Natural Language :: English', 'Topic :: Utilities' ], install_requires=[ 'PyYAML>=3', ], )
import json import os import webapp2 from webapp2_extras import jinja2 class BaseHandler(webapp2.RequestHandler): @webapp2.cached_property def jinja2(self): return jinja2.get_jinja2(app=self.app) def render_template(self, filename, **template_args): self.response.write(self.jinja2.render_template(filename, **template_args)) class IndexHandler(BaseHandler): def get(self): self.render_template('index.html', name=self.request.get('name')) class RegistrationHandler(webapp2.RequestHandler): def post(self): json_object = json.loads(self.request.body) self.response.write('Registration Received {}'.format(json_object)) class GamesHandler(webapp2.RequestHandler): def post(self): self.response.write('Game Received') app = webapp2.WSGIApplication([ webapp2.Route('/', handler=IndexHandler, name='home', methods=['GET']), webapp2.Route('/register', handler=RegistrationHandler, name='registration', methods=['POST']), webapp2.Route('/games', handler=GamesHandler, name='games', methods=['POST']), ], debug=True)
Return Status 422 on bad JSON content
import json import os import webapp2 from webapp2_extras import jinja2 class BaseHandler(webapp2.RequestHandler): @webapp2.cached_property def jinja2(self): return jinja2.get_jinja2(app=self.app) def render_template(self, filename, **template_args): self.response.write(self.jinja2.render_template(filename, **template_args)) class IndexHandler(BaseHandler): def get(self): self.render_template('index.html', name=self.request.get('name')) class RegistrationHandler(webapp2.RequestHandler): def post(self): json_object = json.loads(self.request.body) if not 'username' in json_object: webapp2.abort(422, detail='Field "username" is required') else: self.response.write('Registration Received {}'.format(json_object)) class GamesHandler(webapp2.RequestHandler): def post(self): self.response.write('Game Received') app = webapp2.WSGIApplication([ webapp2.Route('/', handler=IndexHandler, name='home', methods=['GET']), webapp2.Route('/register', handler=RegistrationHandler, name='registration', methods=['POST']), webapp2.Route('/games', handler=GamesHandler, name='games', methods=['POST']), ], debug=True)
""" WSGI config for classicalguitar project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "classicalguitar.settings") from django.core.wsgi import get_wsgi_application from dj_static import Cling application = Cling(get_wsgi_application())
Correct some remaining classical guitar refs
""" WSGI config for website project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "website.settings") from django.core.wsgi import get_wsgi_application from dj_static import Cling application = Cling(get_wsgi_application())
""" ydf/yaml_ext ~~~~~~~~~~~~ Contains extensions to existing YAML functionality. """ import collections from ruamel import yaml from ruamel.yaml import resolver __all__ = ['load_all', 'load_all_gen'] class OrderedRoundTripLoader(yaml.RoundTripLoader): """ Extends the default round trip YAML loader to use :class:`~collections.OrderedDict` for mapping types. """ def __init__(self, *args, **kwargs): super(OrderedRoundTripLoader, self).__init__(*args, **kwargs) self.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, self.construct_ordered_mapping) @staticmethod def construct_ordered_mapping(loader, node): loader.flatten_mapping(node) return collections.OrderedDict(loader.construct_pairs(node)) def load_all(stream): """ Load all documents within the given YAML string. :param stream: A valid YAML stream. :return: List that contains all documents found in the YAML stream. """ return list(load_all_gen(stream)) def load_all_gen(stream): """ Load all documents within the given YAML string. :param stream: A valid YAML stream. :return: Generator that yields each document found in the YAML stream. """ return yaml.load_all(stream, OrderedRoundTripLoader)
Add YAML load for single document.
""" ydf/yaml_ext ~~~~~~~~~~~~ Contains extensions to existing YAML functionality. """ import collections from ruamel import yaml from ruamel.yaml import resolver __all__ = ['load', 'load_all', 'load_all_gen'] class OrderedRoundTripLoader(yaml.RoundTripLoader): """ Extends the default round trip YAML loader to use :class:`~collections.OrderedDict` for mapping types. """ def __init__(self, *args, **kwargs): super(OrderedRoundTripLoader, self).__init__(*args, **kwargs) self.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, self.construct_ordered_mapping) @staticmethod def construct_ordered_mapping(loader, node): loader.flatten_mapping(node) return collections.OrderedDict(loader.construct_pairs(node)) def load(stream): """ Load a single document within the given YAML string. :param stream: A valid YAML stream. :return: An :class:`~collections.OrderedDict` representation of the YAML stream. """ return yaml.load(stream, OrderedRoundTripLoader) def load_all(stream): """ Load all documents within the given YAML string. :param stream: A valid YAML stream. :return: List that contains all documents found in the YAML stream. """ return list(load_all_gen(stream)) def load_all_gen(stream): """ Load all documents within the given YAML string. :param stream: A valid YAML stream. :return: Generator that yields each document found in the YAML stream. """ return yaml.load_all(stream, OrderedRoundTripLoader)
#!/usr/bin/env python ''' Copyright (c) 2016 anti-XSS developers ''' import sys from lib.core.link import Link from optparse import OptionParser from lib.core.engine import getPage from lib.core.engine import getScript from lib.core.engine import xssScanner from lib.generator.report import gnrReport def main(): parser = OptionParser() parser.add_option('-u', '--url', dest='startUrl', help='Target URL (e.g. \'http://www.site.com/\')') parser.add_option('-d', '--depth', dest='depth', help='The depth you want to scan (default: 2)') (options, args) = parser.parse_args() if options.startUrl: rootLink = Link(options.startUrl, options.startUrl) if options.depth: getPage(rootLink, int(options.depth)) else: getPage(rootLink, 2) getScript() xssScanner() pass if __name__ == '__main__': main()
Add initialization before get url
#!/usr/bin/env python ''' Copyright (c) 2016 anti-XSS developers ''' import sys from lib.core.urlfun import * from lib.core.link import Link from optparse import OptionParser from lib.core.engine import getPage from lib.core.engine import getScript from lib.core.engine import xssScanner from lib.generator.report import gnrReport def main(): parser = OptionParser() parser.add_option('-u', '--url', dest='startUrl', help='Target URL (e.g. \'http://www.site.com/\')') parser.add_option('-d', '--depth', dest='depth', help='The depth you want to scan (default: 2)') (options, args) = parser.parse_args() if options.startUrl: url = initialize(options.startUrl) rootLink = Link(url, url) if options.depth: getPage(rootLink, int(options.depth)) else: getPage(rootLink, 2) getScript() xssScanner() pass if __name__ == '__main__': main()
def map_range(x, in_min, in_max, out_min, out_max): out_delta = out_max - out_min in_delta = in_max - in_min return (x - in_min) * out_delta / in_delta + out_min
Add function for linear interpolation (lerp)
def lerp(a, b, t): return (1.0 - t) * a + t * b def map_range(x, in_min, in_max, out_min, out_max): out_delta = out_max - out_min in_delta = in_max - in_min return (x - in_min) * out_delta / in_delta + out_min
# -*- coding: utf-8 -*- import sys import os from glob import glob # ------------------------------------------------------------------------- # Configure extensions extensions = [ 'sphinx.ext.autodoc', ] # ------------------------------------------------------------------------- # General configuration project = u'sphinxcontrib.traceables' copyright = u'2015, Christo' version = '0.1' # The short X.Y version. release = '0.1' # The full version, incl alpha/beta/rc. templates_path = ['_templates'] source_suffix = '.txt' # The suffix of source filenames. master_doc = 'index' # The master toctree document. today_fmt = '%Y-%m-%d' exclude_patterns = ['_build'] pygments_style = 'sphinx' keep_warnings = True # Keep warnings in output documents. # ------------------------------------------------------------------------- # Configure HTML output html_theme = 'sphinx_rtd_theme' html_show_sourcelink = True # Link to source from pages.
Add retrieval of docs version from VERSION.txt
# -*- coding: utf-8 -*- import sys import os from glob import glob # ------------------------------------------------------------------------- # Configure extensions extensions = [ 'sphinx.ext.autodoc', ] # ------------------------------------------------------------------------- # Helper function for retrieving info from files def read(*names): root_dir = os.path.dirname(__file__) path = os.path.join(root_dir, *names) with open(path) as f: return f.read() # ------------------------------------------------------------------------- # General configuration project = u'sphinxcontrib.traceables' copyright = u'2015, Christo' release = read('..', 'VERSION.txt') # The full version, incl alpha/beta/rc. version = '.'.join(release.split('.')[0:2]) # The short X.Y version. templates_path = ['_templates'] source_suffix = '.txt' # The suffix of source filenames. master_doc = 'index' # The master toctree document. today_fmt = '%Y-%m-%d' exclude_patterns = ['_build'] pygments_style = 'sphinx' keep_warnings = True # Keep warnings in output documents. # ------------------------------------------------------------------------- # Configure HTML output html_theme = 'sphinx_rtd_theme' html_show_sourcelink = True # Link to source from pages.
from fabric.api import env env.client = 'zsoobhan' env.project_code = 'prometheus' env.web_dir = 'www' # Environment-agnostic folders env.project_dir = '/var/www/%(client)s/%(project_code)s' % env env.static_dir = '/mnt/static/%(client)s/%(project_code)s' % env env.builds_dir = '%(project_dir)s/builds' % env def _configure(build_name): env.build = build_name env.virtualenv = '%(project_dir)s/virtualenvs/%(build)s/' % env env.data_dir = '%(project_dir)s/data/%(build)s/' % env env.nginx_conf = 'www/deploy/nginx/%(build)s.conf' % env env.supervisord_conf = 'www/deploy/supervisord/%(build)s.conf' % env env.wsgi = 'deploy/wsgi/%(build)s.wsgi' % env env.webserver_user = 'www-data' def prod(): _configure('prod') env.hosts = ['ec2-54-77-186-157.eu-west-1.compute.amazonaws.com'] env.remote_user = 'ubuntu' def test(): _configure('test') env.remote_user = 'ubuntu'
Switch to new ec2 instance
from fabric.api import env env.client = 'zsoobhan' env.project_code = 'prometheus' env.web_dir = 'www' # Environment-agnostic folders env.project_dir = '/var/www/%(client)s/%(project_code)s' % env env.static_dir = '/mnt/static/%(client)s/%(project_code)s' % env env.builds_dir = '%(project_dir)s/builds' % env def _configure(build_name): env.build = build_name env.virtualenv = '%(project_dir)s/virtualenvs/%(build)s/' % env env.data_dir = '%(project_dir)s/data/%(build)s/' % env env.nginx_conf = 'www/deploy/nginx/%(build)s.conf' % env env.supervisord_conf = 'www/deploy/supervisord/%(build)s.conf' % env env.wsgi = 'deploy/wsgi/%(build)s.wsgi' % env env.webserver_user = 'www-data' def prod(): _configure('prod') env.hosts = ['ec2-54-154-143-128.eu-west-1.compute.amazonaws.com'] env.remote_user = 'ubuntu' def test(): _configure('test') env.remote_user = 'ubuntu'
from __future__ import print_function from timeit import default_timer as timer import json import datetime print('Loading function') def eratosthenes(n): sieve = [ True for i in range(n+1) ] def markOff(pv): for i in range(pv+pv, n+1, pv): sieve[i] = False markOff(2) for i in range(3, n+1): if sieve[i]: markOff(i) return [ i for i in range(1, n+1) if sieve[i] ] def lambda_handler(event, context): start = timer() #print("Received event: " + json.dumps(event, indent=2)) maxPrime = int(event['queryStringParameters']['max']) numLoops = int(event['queryStringParameters']['loops']) print("looping " + str(numLoops) + " time(s)") for loop in range (0, numLoops): primes = eratosthenes(maxPrime) print("Highest 3 primes: " + str(primes.pop()) + ", " + str(primes.pop()) + ", " + str(primes.pop())) durationSeconds = timer() - start return {"statusCode": 200, \ "headers": {"Content-Type": "application/json"}, \ "body": "{\"durationSeconds\": " + str(durationSeconds) + \ ", \"max\": " + str(maxPrime) + ", \"loops\": " + str(numLoops) + "}"}
Convert tabs to spaces per PEP 8.
from __future__ import print_function from timeit import default_timer as timer import json import datetime print('Loading function') def eratosthenes(n): sieve = [ True for i in range(n+1) ] def markOff(pv): for i in range(pv+pv, n+1, pv): sieve[i] = False markOff(2) for i in range(3, n+1): if sieve[i]: markOff(i) return [ i for i in range(1, n+1) if sieve[i] ] def lambda_handler(event, context): start = timer() #print("Received event: " + json.dumps(event, indent=2)) maxPrime = int(event['queryStringParameters']['max']) numLoops = int(event['queryStringParameters']['loops']) print("looping " + str(numLoops) + " time(s)") for loop in range (0, numLoops): primes = eratosthenes(maxPrime) print("Highest 3 primes: " + str(primes.pop()) + ", " + str(primes.pop()) + ", " + str(primes.pop())) durationSeconds = timer() - start return {"statusCode": 200, \ "headers": {"Content-Type": "application/json"}, \ "body": "{\"durationSeconds\": " + str(durationSeconds) + \ ", \"max\": " + str(maxPrime) + ", \"loops\": " + str(numLoops) + "}"}
# -*- encoding: utf-8 -*- ############################################################################## # # Copyright (C) 2014-2015 Compassion CH (http://www.compassion.ch) # Releasing children from poverty in Jesus' name # @author: Emanuel Cino <ecino@compassion.ch> # # The licence is in the file __openerp__.py # ############################################################################## from openerp.osv import orm from . import gp_connector class project_compassion(orm.Model): _inherit = 'compassion.project' def write(self, cr, uid, ids, vals, context=None): """Update Project in GP.""" res = super(project_compassion, self).write(cr, uid, ids, vals, context) gp_connect = gp_connector.GPConnect() for project in self.browse(cr, uid, ids, context): gp_connect.upsert_project(uid, project) return res
Fix bug in write project.
# -*- encoding: utf-8 -*- ############################################################################## # # Copyright (C) 2014-2015 Compassion CH (http://www.compassion.ch) # Releasing children from poverty in Jesus' name # @author: Emanuel Cino <ecino@compassion.ch> # # The licence is in the file __openerp__.py # ############################################################################## from openerp.osv import orm from . import gp_connector class project_compassion(orm.Model): _inherit = 'compassion.project' def write(self, cr, uid, ids, vals, context=None): """Update Project in GP.""" res = super(project_compassion, self).write(cr, uid, ids, vals, context) if not isinstance(ids, list): ids = [ids] gp_connect = gp_connector.GPConnect() for project in self.browse(cr, uid, ids, context): gp_connect.upsert_project(uid, project) return res
import os import zipfile import tempfile from datapackage_pipelines.wrapper import ingest, spew import gobble params, datapackage, res_iter = ingest() spew(datapackage, res_iter) user = gobble.user.User() in_filename = open(params['in-file'], 'rb') in_file = zipfile.ZipFile(in_filename) temp_dir = tempfile.mkdtemp() for name in in_file.namelist(): in_file.extract(name, temp_dir) in_file.close() datapackage_json = os.path.join(temp_dir, 'datapackage.json') package = gobble.fiscal.FiscalDataPackage(datapackage_json, user=user) package.upload(skip_validation=True, publish=False)
Set the publication with a parameter.
import os import zipfile import tempfile from datapackage_pipelines.wrapper import ingest, spew import gobble params, datapackage, res_iter = ingest() spew(datapackage, res_iter) user = gobble.user.User() in_filename = open(params['in-file'], 'rb') in_file = zipfile.ZipFile(in_filename) temp_dir = tempfile.mkdtemp() for name in in_file.namelist(): in_file.extract(name, temp_dir) in_file.close() datapackage_json = os.path.join(temp_dir, 'datapackage.json') package = gobble.fiscal.FiscalDataPackage(datapackage_json, user=user) package.upload(skip_validation=True, publish=params.get('publish', False))
from flask import Blueprint, render_template, abort, request, redirect, session, url_for from flask.ext.login import current_user, login_user from sqlalchemy import desc from packages.objects import * from packages.common import * from packages.config import _cfg import os import zipfile import urllib api = Blueprint('api', __name__) @api.route("/test") @json_output def test(): return { 'value': 'Hello world!' }
Add API endpoint for logging in
from flask import Blueprint, render_template, abort, request, redirect, session, url_for from flask.ext.login import current_user, login_user from sqlalchemy import desc from packages.objects import * from packages.common import * from packages.config import _cfg import os import zipfile import urllib api = Blueprint('api', __name__) @api.route("/api/v1/login", methods=['POST']) @json_output def login(): username = request.form['username'] password = request.form['password'] user = User.query.filter(User.username.ilike(username)).first() if not user: return { 'success': False, 'error': 'Your username or password is incorrect.' } if user.confirmation != '' and user.confirmation != None: return { 'success': False, 'error': 'Your account is pending. Check your email or contact support@knightos.org' } if not bcrypt.checkpw(password, user.password): return { 'success': False, 'error': 'Your username or password is incorrect.' } login_user(user) return { 'success': True }
from functools import wraps import os from functools import wraps def restoring_chdir(fn): @wraps(fn) def decorator(*args, **kw): try: path = os.getcwd() return fn(*args, **kw) finally: os.chdir(path) return decorator class BaseBuilder(object): """ The Base for all Builders. Defines the API for subclasses. """ _changed = True @restoring_chdir def force(self, version): """ An optional step to force a build even when nothing has changed. """ print "Forcing a build by touching files" os.chdir(version.project.conf_dir(version.slug)) os.system('touch * && touch */*') def clean(self, version): """ Clean up the version so it's ready for usage. This is used to add RTD specific stuff to Sphinx, and to implement whitelists on projects as well. It is guaranteed to be called before your project is built. """ raise NotImplementedError def build(self, version): """ Do the actual building of the documentation. """ raise NotImplementedError def move(self, version): """ Move the documentation from it's generated place to its final home. This needs to understand both a single server dev environment, as well as a multi-server environment. """ raise NotImplementedError @property def changed(self): """ Says whether the documentation has changed, and requires further action. This is mainly used to short-circuit more expensive builds of other output formats if the project docs didn't change on an update. Defaults to `True` """ return self._changed
Kill _changed from the Base so subclassing makes more sense.
from functools import wraps import os from functools import wraps def restoring_chdir(fn): @wraps(fn) def decorator(*args, **kw): try: path = os.getcwd() return fn(*args, **kw) finally: os.chdir(path) return decorator class BaseBuilder(object): """ The Base for all Builders. Defines the API for subclasses. """ @restoring_chdir def force(self, version): """ An optional step to force a build even when nothing has changed. """ print "Forcing a build by touching files" os.chdir(version.project.conf_dir(version.slug)) os.system('touch * && touch */*') def clean(self, version): """ Clean up the version so it's ready for usage. This is used to add RTD specific stuff to Sphinx, and to implement whitelists on projects as well. It is guaranteed to be called before your project is built. """ raise NotImplementedError def build(self, version): """ Do the actual building of the documentation. """ raise NotImplementedError def move(self, version): """ Move the documentation from it's generated place to its final home. This needs to understand both a single server dev environment, as well as a multi-server environment. """ raise NotImplementedError @property def changed(self): """ Says whether the documentation has changed, and requires further action. This is mainly used to short-circuit more expensive builds of other output formats if the project docs didn't change on an update. Defaults to `True` """ return getattr(self, '_changed', True)
from sharepa.search import ShareSearch, basic_search # noqa from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa def source_counts(): return bucket_to_dataframe( 'total_source_counts', basic_search.execute().aggregations.sourceAgg.buckets )
Make total_source_counts always be a full query
from sharepa.search import ShareSearch, basic_search # noqa from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa def source_counts(): return bucket_to_dataframe( 'total_source_counts', ShareSearch().execute().aggregations.sourceAgg.buckets )
from collections import Counter class APMTracker(object): def handleInitGame(self, event, replay): for player in replay.players: player.apm = Counter() player.aps = Counter() player.seconds_played = replay.length.seconds def handlePlayerActionEvent(self, event, replay): event.player.aps[event.second] += 1 event.player.apm[event.second/60] += 1 def handlePlayerLeaveEvent(self, event, replay): event.player.seconds_played = event.second def handleEndGame(self, event, replay): print "Handling End Game" for player in replay.players: if len(player.apm.keys()) > 0: player.avg_apm = sum(player.apm.values())/float(player.seconds_played)*60 else: player.avg_apm = 0
Fix the engine's APM plugin and add some documentation.
from collections import Counter class APMTracker(object): """ Builds ``player.aps`` and ``player.apm`` dictionaries where an action is any Selection, Hotkey, or Ability event. Also provides ``player.avg_apm`` which is defined as the sum of all the above actions divided by the number of seconds played by the player (not necessarily the whole game) multiplied by 60. APM is 0 for games under 1 minute in length. """ def handleInitGame(self, event, replay): for player in replay.players: player.apm = Counter() player.aps = Counter() player.seconds_played = replay.length.seconds def handlePlayerActionEvent(self, event, replay): event.player.aps[event.second] += 1 event.player.apm[event.second/60] += 1 def handlePlayerLeaveEvent(self, event, replay): event.player.seconds_played = event.second def handleEndGame(self, event, replay): print "Handling End Game" for player in replay.players: if len(player.apm.keys()) > 0: player.avg_apm = sum(player.aps.values())/float(player.seconds_played)*60 else: player.avg_apm = 0
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.utils.translation import ugettext_lazy as _ import horizon class Instances(horizon.Panel): name = _("Instances") slug = 'instances' permissions = ('openstack.services.compute',) policy_rules = ((("compute", "context_is_admin"), ("compute", "compute:get_all")),)
Fix an incorrect policy rule in Admin > Instances Change-Id: I765ae0c36d19c88138fbea9545a2ca4791377ffb Closes-Bug: #1703066
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.utils.translation import ugettext_lazy as _ import horizon class Instances(horizon.Panel): name = _("Instances") slug = 'instances' permissions = ('openstack.services.compute',) policy_rules = ((("compute", "context_is_admin"), ("compute", "os_compute_api:servers:detail")),)
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2018-02-28 16:47 from __future__ import unicode_literals from django.db import migrations, models def instance_types_to_list(apps, schema_editor): PoolConfiguration = apps.get_model("ec2spotmanager", "PoolConfiguration") for pool in PoolConfiguration.objects.all(): pool.ec2_instance_types_list = [pool.ec2_instance_types] pool.save() class Migration(migrations.Migration): dependencies = [ ('ec2spotmanager', '0006_auto_20150625_2050'), ] operations = [ migrations.AlterField( model_name='poolconfiguration', name='ec2_instance_type', field=models.CharField(blank=True, max_length=1023, null=True), ), migrations.RenameField( model_name='poolconfiguration', old_name='ec2_instance_type', new_name='ec2_instance_types', ), migrations.RunPython(instance_types_to_list), ]
Fix migration. Custom triggers are not run in data migrations.
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2018-02-28 16:47 from __future__ import print_function, unicode_literals import json import sys from django.db import migrations, models def instance_type_to_list(apps, schema_editor): PoolConfiguration = apps.get_model("ec2spotmanager", "PoolConfiguration") for pool in PoolConfiguration.objects.all(): if pool.ec2_instance_type: pool.ec2_instance_type = json.dumps([pool.ec2_instance_type]) pool.save() def instance_type_from_list(apps, schema_editor): PoolConfiguration = apps.get_model("ec2spotmanager", "PoolConfiguration") for pool in PoolConfiguration.objects.all(): if pool.ec2_instance_types: types = json.loads(pool.ec2_instance_types) if len(types) > 1: print("pool %d had instance types %r, choosing %s for reverse migration" % (pool.id, types, types[0]), file=sys.stderr) pool.ec2_instance_types = types[0] pool.save() class Migration(migrations.Migration): dependencies = [ ('ec2spotmanager', '0006_auto_20150625_2050'), ] operations = [ migrations.AlterField( model_name='poolconfiguration', name='ec2_instance_type', field=models.CharField(blank=True, max_length=1023, null=True), ), migrations.RunPython( code=instance_type_to_list, reverse_code=instance_type_from_list, ), migrations.RenameField( model_name='poolconfiguration', old_name='ec2_instance_type', new_name='ec2_instance_types', ), ]
import re from django_webtest import WebTest class TestConstituencyDetailView(WebTest): def test_constituencies_page(self): # Just a smoke test to check that the page loads: response = self.app.get('/constituencies') aberdeen_north = response.html.find( 'a', text=re.compile(r'York Outer') ) self.assertTrue(aberdeen_north)
Make test_constituencies_page work without PopIt
import re from mock import patch from django_webtest import WebTest class TestConstituencyDetailView(WebTest): @patch('candidates.popit.PopIt') def test_constituencies_page(self, mock_popit): # Just a smoke test to check that the page loads: response = self.app.get('/constituencies') aberdeen_north = response.html.find( 'a', text=re.compile(r'York Outer') ) self.assertTrue(aberdeen_north)
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ActiveMaster definition.""" from config_bootstrap import Master class ChromiumFYI(Master.Master1): project_name = 'Chromium FYI' master_port = 8011 slave_port = 8111 master_port_alt = 8211 buildbot_url = 'http://build.chromium.org/p/chromium.fyi/' reboot_on_step_timeout = True pubsub_service_account_file = 'service-account-pubsub.json' pubsub_topic_url = 'projects/luci-milo/topics/public-buildbot' name = 'chromium.fyi'
Revert pubsub roll on FYI BUG= TBR=estaab Review URL: https://codereview.chromium.org/1688503002 git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@298680 0039d316-1c4b-4281-b951-d872f2087c98
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ActiveMaster definition.""" from config_bootstrap import Master class ChromiumFYI(Master.Master1): project_name = 'Chromium FYI' master_port = 8011 slave_port = 8111 master_port_alt = 8211 buildbot_url = 'http://build.chromium.org/p/chromium.fyi/' reboot_on_step_timeout = True
from django.db import models class Dependency(models.Model): source = models.CharField(max_length=255, db_index=True) depends_on = models.CharField(max_length=255, db_index=True) class Meta: unique_together = ("source", "depends_on")
Add __unicode__ to Dependency model
from __future__ import unicode_literals from django.db import models class Dependency(models.Model): source = models.CharField(max_length=255, db_index=True) depends_on = models.CharField(max_length=255, db_index=True) class Meta: unique_together = ("source", "depends_on") def __unicode__(self): return "{0} depends on {1}".format(self.source, self.depends_on)
#!/usr/bin/python # -*- coding: utf-8 -*- import sys import os sys.path.append(os.path.dirname(os.path.dirname(__file__)) + '/src') import DataVisualizing if len(sys.argv) != 2: print 'usage: create_heatmap.py <data file>' print ' expected infile is a datafile containing tracking data' print ' this is a csv file with the following columns:' print ' cartodb_id, date_time, day_of_year, distance_from_nest_in_meters' sys.exit(-1) def main(): dvis = DataVisualizing.TrackingVisualizer(infile=sys.argv[1]) print ('var day_month_heatdata = {0};'.format(dvis.as_heatmap_json(domain='month', agg_function='max'))) print ('var hour_month_heatdata = {0};'.format(dvis.as_heatmap_json(domain='month', subdomain='hour', agg_function='max'))) main()
Add raw line data to output
#!/usr/bin/python # -*- coding: utf-8 -*- import sys import os sys.path.append(os.path.dirname(os.path.dirname(__file__)) + '/src') import DataVisualizing if len(sys.argv) != 2: print 'usage: create_heatmap.py <data file>' print ' expected infile is a datafile containing tracking data' print ' this is a csv file with the following columns:' print ' cartodb_id, date_time, day_of_year, distance_from_nest_in_meters' sys.exit(-1) def main(): dvis = DataVisualizing.TrackingVisualizer(infile=sys.argv[1]) print ('var day_month_heatdata = {0};'.format(dvis.as_heatmap_json(domain='month', agg_function='max'))) print ('var hour_month_heatdata = {0};'.format(dvis.as_heatmap_json(domain='month', subdomain='hour', agg_function='max'))) print ('var hour_month_linedata = [ {{ \'key\': \'Maximum distance\', \'color\': \'green\', \'values\': {0} }} ];'.format(dvis.as_raw_line_json(agg_function='max'))) main()
DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'tests.db', }, } INSTALLED_APPS = [ 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.contenttypes', 'django.contrib.staticfiles', 'django_easyfilters', 'test_app', ] TEMPLATE_CONTEXT_PROCESSORS = [ "django.contrib.auth.context_processors.auth", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.static", "django.core.context_processors.tz", "django.contrib.messages.context_processors.messages", "django.core.context_processors.request", ] ROOT_URLCONF = 'test_app.urls' DEBUG = True SITE_ID = 1 STATIC_URL = '/static/' SECRET_KEY = 'x'
Remove tz context processor (not available in 1.3)
DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'tests.db', }, } INSTALLED_APPS = [ 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.contenttypes', 'django.contrib.staticfiles', 'django_easyfilters', 'test_app', ] TEMPLATE_CONTEXT_PROCESSORS = [ "django.contrib.auth.context_processors.auth", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.static", "django.contrib.messages.context_processors.messages", "django.core.context_processors.request", ] ROOT_URLCONF = 'test_app.urls' DEBUG = True SITE_ID = 1 STATIC_URL = '/static/' SECRET_KEY = 'x'
Add a small test for renderer
import mfr import json from tests import utils from tornado import testing class TestRenderHandler(utils.HandlerTestCase): @testing.gen_test def test_options_skips_prepare(self): # Would crash b/c lack of mocks yield self.http_client.fetch( self.get_url('/render'), method='OPTIONS' )
#!/usr/bin/env python def createBlankGrid(row,column): blankgrid = [[0 for x in range(column)] for y in range(row)] return blankgrid def getHeight(grid): return len(grid) def getWidth(grid): return len(grid[0]) def printGrid(grid): numRow = len(grid) for i in range(0,numRow): row = grid[i] rowstr = '' for j in row: rowstr += str(j)+' ' print(rowstr) def serializeGrid(grid): numRow = len(grid) numCol = len(grid[0]) gridstr = '' for j in range(0,numCol): for i in range(0,numRow): gridstr += str(grid[i][j]) return gridstr
Add setGrid and resetGrid functions
#!/usr/bin/env python def createBlankGrid(row,column): blankgrid = [[0 for x in range(column)] for y in range(row)] return blankgrid def getHeight(grid): return len(grid) def getWidth(grid): return len(grid[0]) def printGrid(grid): numRow = len(grid) for i in range(0,numRow): row = grid[i] rowstr = '' for j in row: rowstr += str(j)+' ' print(rowstr) def serializeGrid(grid): numRow = len(grid) numCol = len(grid[0]) gridstr = '' for j in range(0,numCol): for i in range(0,numRow): gridstr += str(grid[i][j]) return gridstr def setGrid(grid, setlist, rowoffset, coloffset): for entry in setlist: grid[entry[0]+rowoffset][entry[1]+coloffset] = 1 return grid def resetGrid(grid, setlist, rowoffset, coloffset): for entry in setlist: grid[entry[0]+rowoffset][entry[1]+coloffset] = 0 return grid
# from jaraco.util.itertools def always_iterable(item): """ Given an object, always return an iterable. If the item is not already iterable, return a tuple containing only the item. >>> always_iterable([1,2,3]) [1, 2, 3] >>> always_iterable('foo') ('foo',) >>> always_iterable(None) (None,) >>> always_iterable(xrange(10)) xrange(10) """ if isinstance(item, basestring) or not hasattr(item, '__iter__'): item = item, return item def total_seconds(td): """ Python 2.7 adds a total_seconds method to timedelta objects. See http://docs.python.org/library/datetime.html#datetime.timedelta.total_seconds >>> import datetime >>> total_seconds(datetime.timedelta(hours=24)) 86400.0 """ try: result = td.total_seconds() except AttributeError: seconds = td.seconds + td.days * 24 * 3600 result = (td.microseconds + seconds * 10**6) / 10**6 return result
Use float so test passes on Python 2.6
# from jaraco.util.itertools def always_iterable(item): """ Given an object, always return an iterable. If the item is not already iterable, return a tuple containing only the item. >>> always_iterable([1,2,3]) [1, 2, 3] >>> always_iterable('foo') ('foo',) >>> always_iterable(None) (None,) >>> always_iterable(xrange(10)) xrange(10) """ if isinstance(item, basestring) or not hasattr(item, '__iter__'): item = item, return item def total_seconds(td): """ Python 2.7 adds a total_seconds method to timedelta objects. See http://docs.python.org/library/datetime.html#datetime.timedelta.total_seconds >>> import datetime >>> total_seconds(datetime.timedelta(hours=24)) 86400.0 """ try: result = td.total_seconds() except AttributeError: seconds = td.seconds + td.days * 24 * 3600 result = float((td.microseconds + seconds * 10**6) / 10**6) return result
import sys from CITests import CITests # Libs in Application Examples appExamples = { #"KundurSMIB":"/ApplicationExamples/KundurSMIB/package.mo", #"TwoAreas":"/ApplicationExamples/TwoAreas/package.mo", #"SevenBus":"/ApplicationExamples/SevenBus/package.mo", #"IEEE9":"/ApplicationExamples/IEEE9/package.mo", #"IEEE14":"/ApplicationExamples/IEEE14/package.mo", #"AKD":"/ApplicationExamples/AKD/package.mo", #"N44":"/ApplicationExamples/N44/package.mo", #"OpenCPSD5d3B":"/ApplicationExamples/OpenCPSD5d3B/package.mo", #"RaPIdExperiments":"/ApplicationExamples/RaPIdExperiments/package.mo" } # Instance of CITests ci = CITests("/OpenIPSL") # Run Check on OpenIPSL passLib = ci.runSyntaxCheck("OpenIPSL","/OpenIPSL/package.mo") if not passLib: # Error in OpenIPSL sys.exit(1) else: # Run Check on App Examples passAppEx = 1 for package in appExamples.keys(): passAppEx = passAppEx * ci.runSyntaxCheck(package,appExamples[package]) # The tests are failing if the number of failed check > 0 if passAppEx: # Everything is fine sys.exit(0) else: # Exit with error sys.exit(1)
Fix the location path of OpenIPSL
import sys from CITests import CITests # Libs in Application Examples appExamples = { #"KundurSMIB":"/ApplicationExamples/KundurSMIB/package.mo", #"TwoAreas":"/ApplicationExamples/TwoAreas/package.mo", #"SevenBus":"/ApplicationExamples/SevenBus/package.mo", #"IEEE9":"/ApplicationExamples/IEEE9/package.mo", #"IEEE14":"/ApplicationExamples/IEEE14/package.mo", #"AKD":"/ApplicationExamples/AKD/package.mo", #"N44":"/ApplicationExamples/N44/package.mo", #"OpenCPSD5d3B":"/ApplicationExamples/OpenCPSD5d3B/package.mo", #"RaPIdExperiments":"/ApplicationExamples/RaPIdExperiments/package.mo" } # Instance of CITests ci = CITests("/OpenIPSL") # Run Check on OpenIPSL passLib = ci.runSyntaxCheck("OpenIPSL","/OpenIPSL/OpenIPSL/package.mo") if not passLib: # Error in OpenIPSL sys.exit(1) else: # Run Check on App Examples passAppEx = 1 for package in appExamples.keys(): passAppEx = passAppEx * ci.runSyntaxCheck(package,appExamples[package]) # The tests are failing if the number of failed check > 0 if passAppEx: # Everything is fine sys.exit(0) else: # Exit with error sys.exit(1)
Add sane log output formatter
############################################################################### # # Copyright (c) 2012 Ruslan Spivak # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### __author__ = 'Ruslan Spivak <ruslan.spivak@gmail.com>' import time import logging _DEFAULT_FMT = '%(levelname)-8s [%(asctime)s] %(name)s: %(message)s' class Formatter(logging.Formatter): def __init__(self, fmt=None, datefmt=None): super(Formatter, self).__init__(fmt or _DEFAULT_FMT, datefmt) # A user-configurable function to convert the creation time to a tuple. # It's used by Format.formatTime method and default is time.localtime() # We set it to convert time to a struct_time in UTC self.converter = time.gmtime def formatException(self, exc_info): text = super(Formatter, self).formatException(exc_info) # Prepend ! mark to every line text = '\n'.join(('! %s' % line) for line in text.splitlines()) return text
import unittest from src.dojo import Dojo class TestCreateRoom (unittest.TestCase): def test_create_room_successfully(self): my_class_instance = Dojo() initial_room_count = len(my_class_instance.all_rooms) blue_office = my_class_instance.create_room("office", "Blue") self.assertTrue(blue_office) new_room_count = len(my_class_instance.all_rooms) self.assertEqual(new_room_count - initial_room_count, 1) def test_create_rooms_successfully(self): my_class_instance = Dojo() initial_room_count = len(my_class_instance.all_rooms) offices = my_class_instance.create_room("office", "Blue", "Black", "Brown") self.assertTrue(offices) new_room_count = len(my_class_instance.all_rooms) self.assertEqual(new_room_count - initial_room_count, 3) def test_person_added_to_system(self): initial_person_count = len(self.dojo.all_people) person = self.dojo.add_person("Neil", "Armstrong", "Staff") self.assertTrue(person) new_person_count = len(self.dojo.all_people) self.assertEqual(new_person_count - initial_person_count, 1)
Add test to check that person has been given office
import unittest from src.dojo import Dojo class TestCreateRoom (unittest.TestCase): def test_create_room_successfully(self): my_class_instance = Dojo() initial_room_count = len(my_class_instance.all_rooms) blue_office = my_class_instance.create_room("office", "Blue") self.assertTrue(blue_office) new_room_count = len(my_class_instance.all_rooms) self.assertEqual(new_room_count - initial_room_count, 1) def test_create_rooms_successfully(self): my_class_instance = Dojo() initial_room_count = len(my_class_instance.all_rooms) offices = my_class_instance.create_room("office", "Blue", "Black", "Brown") self.assertTrue(offices) new_room_count = len(my_class_instance.all_rooms) self.assertEqual(new_room_count - initial_room_count, 3) def test_person_added_to_system(self): initial_person_count = len(self.dojo.all_people) person = self.dojo.add_person("Neil", "Armstrong", "Staff") self.assertTrue(person) new_person_count = len(self.dojo.all_people) self.assertEqual(new_person_count - initial_person_count, 1) def test_person_has_been_assigned_office(self): person = self.dojo.add_person("Neil", "Armstrong", "Staff") self.assertTrue(person) self.assertTrue(self.dojo.all_people[-1].has_office)
from . import platform_specific, input from .graphics import screen from .run_loop import main_run_loop, every platform_specific.fixup_env() def run(): main_run_loop.add_wait_callback(input.check_for_quit_event) main_run_loop.add_after_action_callback(screen.after_loop) main_run_loop.run()
Allow run argument to avoid @every template
from . import platform_specific, input from .graphics import screen from .run_loop import main_run_loop, every platform_specific.fixup_env() def run(loop=None): if loop is not None: every(seconds=1.0/30)(loop) main_run_loop.add_wait_callback(input.check_for_quit_event) main_run_loop.add_after_action_callback(screen.after_loop) main_run_loop.run()
import requests from flask import Flask, render_template app = Flask(__name__, instance_relative_config=True) app.config.from_pyfile("appconfig.py") BBC_id= "bbc-news" @app.route("/") def index(): r = requests.get( f"https://newsapi.org/v1/articles?source={BBC_id}&sortBy=top&apiKey={app.config['API_KEY']}" ) return render_template("index.html", articles=r.json().get("articles")) if __name__ == "__main__": app.run()
Create dynamic routing for supported sources.
import requests from flask import Flask, render_template app = Flask(__name__, instance_relative_config=True) app.config.from_pyfile("appconfig.py") sources = { "bbc": "bbc-news", "cnn": "cnn", "hackernews": "hacker-news" } def create_link(source): if source in sources.keys(): return f"https://newsapi.org/v1/articles?source={sources[source]}&sortBy=top&apiKey={app.config['API_KEY']}" @app.route("/") @app.route("/<source>") def index(source="bbc"): r = requests.get(create_link(source)) return render_template("index.html", articles=r.json().get("articles"), source=source) if __name__ == "__main__": app.run()
# -*- coding: utf-8 -*- __version__ = '$Id$' import family # The Wikia Search family # user-config.py: usernames['wikia']['wikia'] = 'User name' class Family(family.Family): def __init__(self): family.Family.__init__(self) self.name = u'wikia' self.langs = { u'wikia': None, } def hostname(self, code): return u'www.wikia.com' def version(self, code): return "1.15.1" def scriptpath(self, code): return '' def apipath(self, code): return '/api.php'
Update a version number from trunk r9016
# -*- coding: utf-8 -*- __version__ = '$Id$' import family # The Wikia Search family # user-config.py: usernames['wikia']['wikia'] = 'User name' class Family(family.Family): def __init__(self): family.Family.__init__(self) self.name = u'wikia' self.langs = { u'wikia': None, } def hostname(self, code): return u'www.wikia.com' def version(self, code): return "1.16.2" def scriptpath(self, code): return '' def apipath(self, code): return '/api.php'
#!/usr/bin/env python # -*- coding: utf-8 -*- '''Migration script for Search-enabled Models.''' from __future__ import absolute_import import logging from modularodm.query.querydialect import DefaultQueryDialect as Q from website.models import Node from framework.auth import User import website.search.search as search from website.app import init_app logger = logging.getLogger(__name__) app = init_app("website.settings", set_backends=True, routes=True) def migrate_nodes(): nodes = Node.find(Q('is_public', 'eq', True) & Q('is_deleted', 'eq', False)) for i, node in enumerate(nodes): node.update_search() return i + 1 # Started counting from 0 def migrate_users(): for i, user in enumerate(User.find()): if user.is_active: user.update_search() return i + 1 # Started counting from 0 def main(): ctx = app.test_request_context() ctx.push() search.delete_all() search.create_index() logger.info("Nodes migrated: {}".format(migrate_nodes())) logger.info("Users migrated: {}".format(migrate_users())) ctx.pop() if __name__ == '__main__': main()
Add additional logging for users'
#!/usr/bin/env python # -*- coding: utf-8 -*- '''Migration script for Search-enabled Models.''' from __future__ import absolute_import import logging from modularodm.query.querydialect import DefaultQueryDialect as Q from website.models import Node from framework.auth import User import website.search.search as search from website.app import init_app logger = logging.getLogger(__name__) app = init_app("website.settings", set_backends=True, routes=True) def migrate_nodes(): nodes = Node.find(Q('is_public', 'eq', True) & Q('is_deleted', 'eq', False)) for i, node in enumerate(nodes): node.update_search() logger.info('Nodes migrated: {}'.format(i + 1)) def migrate_users(): n_iter = 0 for i, user in enumerate(User.find()): if user.is_active: user.update_search() n_iter += 1 logger.info('Users iterated: {0}\nUsers migrated: {1}'.format(i + 1, n_iter)) def main(): ctx = app.test_request_context() ctx.push() search.delete_all() search.create_index() migrate_nodes() migrate_users() ctx.pop() if __name__ == '__main__': main()
from django.conf.urls import url, include from rest_framework import routers from api import views router = routers.DefaultRouter() router.register(r'categories', views.CategoryViewSet) router.register(r'commodities', views.CommodityViewSet) router.register(r'economies', views.EconomyViewSet) router.register(r'factions', views.FactionViewSet) router.register(r'governments', views.GovernmentViewSet) router.register(r'allegiances', views.AllegianceViewSet) router.register(r'states', views.StateViewSet) router.register(r'securities', views.SecurityViewSet) router.register(r'systems', views.SystemViewSet) router.register(r'station_types', views.StationTypeViewSet) router.register(r'stations', views.StationViewSet) router.register(r'listings', views.ListingViewSet) # Wire up our API using automatic URL routing. # Additionally, we include login URLs for the browsable API. urlpatterns = [ url(r'^/', include(router.urls)), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')) ]
Fix for the api at root url.
from django.conf.urls import url, include from rest_framework import routers from api import views router = routers.DefaultRouter() router.register(r'categories', views.CategoryViewSet) router.register(r'commodities', views.CommodityViewSet) router.register(r'economies', views.EconomyViewSet) router.register(r'factions', views.FactionViewSet) router.register(r'governments', views.GovernmentViewSet) router.register(r'allegiances', views.AllegianceViewSet) router.register(r'states', views.StateViewSet) router.register(r'securities', views.SecurityViewSet) router.register(r'systems', views.SystemViewSet) router.register(r'station_types', views.StationTypeViewSet) router.register(r'stations', views.StationViewSet) router.register(r'listings', views.ListingViewSet) # Wire up our API using automatic URL routing. # Additionally, we include login URLs for the browsable API. urlpatterns = [ url(r'^', include(router.urls)), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')) ]
#!/usr/bin/env python3 # coding=utf-8 import argparse import json import jsonschema import awp.packager import awp.validator # Parse arguments given via command-line interface def parse_cli_args(): parser = argparse.ArgumentParser() parser.add_argument( '--force', '-f', action='store_true', help='forces the copying of all files and directories') parser.add_argument( '--export', '-e', nargs='?', const='', default=None, help='exports the installed workflow to the local project directory') parser.add_argument( '--version', '-v', help='the new version number to use for the workflow') return parser.parse_args() # Locate and parse the configuration for the utility def get_utility_config(): with open('packager.json', 'r') as config_file: return json.load(config_file) def main(): cli_args = parse_cli_args() config = get_utility_config() try: awp.validator.validate_config(config) awp.packager.package_workflow( config, version=cli_args.version, export_file=cli_args.export, force=cli_args.force) except jsonschema.exceptions.ValidationError as error: print(error.message) if __name__ == '__main__': main()
Clarify where packager.json validation error originates
#!/usr/bin/env python3 # coding=utf-8 import argparse import json import jsonschema import awp.packager import awp.validator # Parse arguments given via command-line interface def parse_cli_args(): parser = argparse.ArgumentParser() parser.add_argument( '--force', '-f', action='store_true', help='forces the copying of all files and directories') parser.add_argument( '--export', '-e', nargs='?', const='', default=None, help='exports the installed workflow to the local project directory') parser.add_argument( '--version', '-v', help='the new version number to use for the workflow') return parser.parse_args() # Locate and parse the configuration for the utility def get_utility_config(): with open('packager.json', 'r') as config_file: return json.load(config_file) def main(): cli_args = parse_cli_args() config = get_utility_config() try: awp.validator.validate_config(config) awp.packager.package_workflow( config, version=cli_args.version, export_file=cli_args.export, force=cli_args.force) except jsonschema.exceptions.ValidationError as error: print('awp (from packager.json): {}'.format(error.message)) if __name__ == '__main__': main()
def YilmIndexVector(i, l, m): """ Compute the index of an 1D array of spherical harmonic coefficients corresponding to i, l, and m. Usage ----- index = YilmIndexVector (i, l, m) Returns ------- index : integer Index of an 1D array of spherical harmonic coefficients corresponding to i, l, and m. Parameters ---------- i : integer 1 corresponds to the cosine coefficient cilm[0,:,:], and 2 corresponds to the sine coefficient cilm[1,:,:]. l : integer The spherical harmonic degree. m : integer The angular order. Notes ----- YilmIndexVector will calculate the index of a 1D vector of spherical harmonic coefficients corresponding to degree l, angular order m and i (1 = cosine, 2 = sine). The index is given by l**2+(i-1)*l+m. """ return l**2 + (i - 1) * l + m
Add error checks to YilmIndexVector (and update docs)
def YilmIndexVector(i, l, m): """ Compute the index of a 1D array of spherical harmonic coefficients corresponding to i, l, and m. Usage ----- index = YilmIndexVector (i, l, m) Returns ------- index : integer Index of a 1D array of spherical harmonic coefficients corresponding to i, l, and m. Parameters ---------- i : integer 1 corresponds to the cosine coefficient Ylm = cilm[0,:,:], and 2 corresponds to the sine coefficient Yl,-m = cilm[1,:,:]. l : integer The spherical harmonic degree. m : integer The angular order, which must be greater or equal to zero. Notes ----- YilmIndexVector will calculate the index of a 1D vector of spherical harmonic coefficients corresponding to degree l, (positive) angular order m and i (1 = cosine, 2 = sine). The index is given by l**2+(i-1)*l+m. """ if l < 0: raise ValueError('The spherical harmonic degree must be positive. ' 'Input value is {:s}'.format(repr(l))) if m < 0: raise ValueError('The angular order must be positive. ' 'Input value is {:s}'.format(repr(m))) if m >= l: raise ValueError('The angular order must be less than or equal to ' 'the spherical harmonic degree. Input degree is {:s}.' ' Input order is {:s}.'.format(repr(l), repr(m))) return l**2 + (i - 1) * l + m
""" Harvester of pubmed for the SHARE notification service """ from __future__ import unicode_literals from scrapi.base import schemas from scrapi.base import helpers from scrapi.base import OAIHarvester def oai_extract_url_pubmed(identifiers): identifiers = [identifiers] if not isinstance(identifiers, list) else identifiers for item in identifiers: try: found_url = helpers.URL_REGEX.search(item).group() if 'viewcontent' not in found_url and '/pubmed/' in found_url: return found_url.decode('utf-8') except AttributeError: continue class PubMedHarvester(OAIHarvester): short_name = 'pubmedcentral' long_name = 'PubMed Central' url = 'http://www.ncbi.nlm.nih.gov/pmc/' schema = helpers.updated_schema( schemas.OAISCHEMA, { "uris": { "canonicalUri": ('//dc:identifier/node()', oai_extract_url_pubmed) } } ) base_url = 'http://www.pubmedcentral.nih.gov/oai/oai.cgi' property_list = [ 'type', 'source', 'publisher', 'rights', 'format', 'setSpec', 'date', 'identifier' ]
Add API call to top docstring
""" Harvester of PubMed Central for the SHARE notification service Example API call: http://www.pubmedcentral.nih.gov/oai/oai.cgi?verb=ListRecords&metadataPrefix=oai_dc&from=2015-04-13&until=2015-04-14 """ from __future__ import unicode_literals from scrapi.base import schemas from scrapi.base import helpers from scrapi.base import OAIHarvester def oai_extract_url_pubmed(identifiers): identifiers = [identifiers] if not isinstance(identifiers, list) else identifiers for item in identifiers: try: found_url = helpers.URL_REGEX.search(item).group() if 'viewcontent' not in found_url and '/pubmed/' in found_url: return found_url.decode('utf-8') except AttributeError: continue class PubMedHarvester(OAIHarvester): short_name = 'pubmedcentral' long_name = 'PubMed Central' url = 'http://www.ncbi.nlm.nih.gov/pmc/' schema = helpers.updated_schema( schemas.OAISCHEMA, { "uris": { "canonicalUri": ('//dc:identifier/node()', oai_extract_url_pubmed) } } ) base_url = 'http://www.pubmedcentral.nih.gov/oai/oai.cgi' property_list = [ 'type', 'source', 'publisher', 'rights', 'format', 'setSpec', 'date', 'identifier' ]
from .KEYNOTFOUND import KEYNOTFOUNDIN1 from .dict_diff import dict_diff def get_old_dict_values(old, new): # Returns the "old" value for two dicts. diff = dict_diff(old, new) return {key: diff[key][0] if diff[key][0] != KEYNOTFOUNDIN1 else None for key in diff}
Expand an object comprehension onto several lines
from .KEYNOTFOUND import KEYNOTFOUNDIN1 from .dict_diff import dict_diff def get_old_dict_values(old, new): # Returns the "old" value for two dicts. diff = dict_diff(old, new) return {key: diff[key][0] if diff[key][0] != KEYNOTFOUNDIN1 else None for key in diff}
import logging import time class PeriodicFilter: """ Periodic Filter to help keep down clutter in the console. Simply add this filter to your logger and the logger will only print periodically. The logger will always print logging levels of WARNING or higher """ def __init__(self, period, bypassLevel=logging.WARN): ''' :param period: Wait period (in seconds) between logs :param bypassLevel: Lowest logging level that the filter should ignore ''' self._period = period self._loggingLoop = True self._last_log = -period self._bypassLevel = bypassLevel def filter(self, record): """Performs filtering action for logger""" self._refresh_logger() return self._loggingLoop or record.levelno >= self._bypassLevel def _refresh_logger(self): """Determine if the log wait period has passed""" now = time.monotonic() self._loggingLoop = False if now - self._last_log > self._period: self._loggingLoop = True self._last_log = now
Create example usage. Rename bypass_level
import logging import time class PeriodicFilter: """ Periodic Filter to help keep down clutter in the console. Simply add this filter to your logger and the logger will only print periodically. The logger will always print logging levels of WARNING or higher, unless given a different bypass level Example class Component1: def setup(self): # Set period to 3 seconds, set bypass_level to WARN self.logger.addFilter(PeriodicFilter(3, bypass_level=logging.WARN)) def execute(self): # This message will be printed once every three seconds self.logger.info('Component1 Executing') # This message will be printed out every loop self.logger.warn('Uh oh, this shouldn't have happened...') """ def __init__(self, period, bypass_level=logging.WARN): ''' :param period: Wait period (in seconds) between logs :param bypass_level: Lowest logging level that the filter should ignore ''' self._period = period self._loggingLoop = True self._last_log = -period self._bypass_level = bypass_level def filter(self, record): """Performs filtering action for logger""" self._refresh_logger() return self._loggingLoop or record.levelno >= self._bypass_level def _refresh_logger(self): """Determine if the log wait period has passed""" now = time.monotonic() self._loggingLoop = False if now - self._last_log > self._period: self._loggingLoop = True self._last_log = now
# Copyright (c) 2012-2020, Mark Peek <mark@peek.org> # All rights reserved. # # See LICENSE file for full license. # # *** Do not modify - this file is autogenerated *** # Resource specification version: 18.6.0 from . import AWSObject from troposphere import Tags class Assignment(AWSObject): resource_type = "AWS::SSO::Assignment" props = { 'InstanceArn': (basestring, True), 'PermissionSetArn': (basestring, True), 'PrincipalId': (basestring, True), 'PrincipalType': (basestring, True), 'TargetId': (basestring, True), 'TargetType': (basestring, True), } class PermissionSet(AWSObject): resource_type = "AWS::SSO::PermissionSet" props = { 'Description': (basestring, False), 'InlinePolicy': (basestring, False), 'InstanceArn': (basestring, True), 'ManagedPolicies': ([basestring], False), 'Name': (basestring, True), 'RelayStateType': (basestring, False), 'SessionDuration': (basestring, False), 'Tags': (Tags, False), }
Update SSO per 2020-12-18 changes
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org> # All rights reserved. # # See LICENSE file for full license. # # *** Do not modify - this file is autogenerated *** # Resource specification version: 25.0.0 from . import AWSObject from . import AWSProperty from troposphere import Tags class Assignment(AWSObject): resource_type = "AWS::SSO::Assignment" props = { 'InstanceArn': (basestring, True), 'PermissionSetArn': (basestring, True), 'PrincipalId': (basestring, True), 'PrincipalType': (basestring, True), 'TargetId': (basestring, True), 'TargetType': (basestring, True), } class AccessControlAttributeValueSourceList(AWSProperty): props = { 'AccessControlAttributeValueSourceList': ([basestring], False), } class AccessControlAttributeValue(AWSProperty): props = { 'Source': (AccessControlAttributeValueSourceList, True), } class AccessControlAttribute(AWSProperty): props = { 'Key': (basestring, True), 'Value': (AccessControlAttributeValue, True), } class InstanceAccessControlAttributeConfiguration(AWSObject): resource_type = "AWS::SSO::InstanceAccessControlAttributeConfiguration" props = { 'AccessControlAttributes': ([AccessControlAttribute], False), 'InstanceAccessControlAttributeConfiguration': (dict, False), 'InstanceArn': (basestring, True), } class PermissionSet(AWSObject): resource_type = "AWS::SSO::PermissionSet" props = { 'Description': (basestring, False), 'InlinePolicy': (dict, False), 'InstanceArn': (basestring, True), 'ManagedPolicies': ([basestring], False), 'Name': (basestring, True), 'RelayStateType': (basestring, False), 'SessionDuration': (basestring, False), 'Tags': (Tags, False), }
# third party from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import String # relative from . import Base class NodeRoute(Base): __tablename__ = "node_route" id = Column(Integer(), primary_key=True, autoincrement=True) node_id = Column(Integer, ForeignKey("node.id")) host_or_ip = Column(String(255)) is_vpn = Column(Boolean(), default=False)
ADD vpn_endpoint and vpn_key columns
# third party from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import String # relative from . import Base class NodeRoute(Base): __tablename__ = "node_route" id = Column(Integer(), primary_key=True, autoincrement=True) node_id = Column(Integer, ForeignKey("node.id")) host_or_ip = Column(String(255), default="") is_vpn = Column(Boolean(), default=False) vpn_endpoint = Column(String(255), default="") vpn_key = Column(String(255), default="")
from setuptools import setup, find_packages setup( name='acc_provision', version='1.9.6', description='Tool to provision ACI for ACI Containers Controller', author="Cisco Systems, Inc.", author_email="apicapi@noironetworks.com", url='http://github.com/noironetworks/aci-containers/', license="http://www.apache.org/licenses/LICENSE-2.0", packages=find_packages(), include_package_data=True, zip_safe=False, entry_points={ 'console_scripts': [ 'acc-provision=acc_provision.acc_provision:main', ] }, install_requires=[ 'requests', 'pyyaml', 'jinja2', 'pyopenssl', ], )
Update acc-provision version to 1.9.7
from setuptools import setup, find_packages setup( name='acc_provision', version='1.9.7', description='Tool to provision ACI for ACI Containers Controller', author="Cisco Systems, Inc.", author_email="apicapi@noironetworks.com", url='http://github.com/noironetworks/aci-containers/', license="http://www.apache.org/licenses/LICENSE-2.0", packages=find_packages(), include_package_data=True, zip_safe=False, entry_points={ 'console_scripts': [ 'acc-provision=acc_provision.acc_provision:main', ] }, install_requires=[ 'requests', 'pyyaml', 'jinja2', 'pyopenssl', ], )
from django.contrib.auth.models import User def user_new_str(self): return self.username if self.get_full_name() == "" else self.get_full_name() # Replace the __str__ method in the User class with our new implementation User.__str__ = user_new_str
Change titles for the site
from django.contrib import admin from django.contrib.auth.models import User def user_new_str(self): return self.username if self.get_full_name() == "" else self.get_full_name() # Replace the __str__ method in the User class with our new implementation User.__str__ = user_new_str admin.site.site_header = 'SENKUMBA' admin.site.site_title = 'SENKUMBA' admin.site.index_title = 'SENKUMBA'
import sublime, sublime_plugin, webbrowser QD_URL = "http://localhost:5000/" class GoToQuantifiedDevDashboardCommand(sublime_plugin.TextCommand): def run(self,edit): SETTINGS = {} SETTINGS_FILE = "QuantifiedDev.sublime-settings" SETTINGS = sublime.load_settings(SETTINGS_FILE) stream_id = SETTINGS.get("streamId") read_token = SETTINGS.get("readToken") qd_url = QD_URL url = "%(qd_url)sdashboard?streamId=%(stream_id)s&readToken=%(read_token)s" % locals() webbrowser.open_new_tab(url)
Fix url to be consistent.
import sublime, sublime_plugin, webbrowser QD_URL = "http://localhost:5000" class GoToQuantifiedDevDashboardCommand(sublime_plugin.TextCommand): def run(self,edit): SETTINGS = {} SETTINGS_FILE = "QuantifiedDev.sublime-settings" SETTINGS = sublime.load_settings(SETTINGS_FILE) stream_id = SETTINGS.get("streamId") read_token = SETTINGS.get("readToken") qd_url = QD_URL url = "%(qd_url)s/dashboard?streamId=%(stream_id)s&readToken=%(read_token)s" % locals() webbrowser.open_new_tab(url)
from django.conf import settings class XmlJsonImportModuleException(Exception): pass
Throw exception for not existing XSLT_FILES_DIR setting
from django.conf import settings class XmlJsonImportModuleException(Exception): pass if not hasattr(settings, 'XSLT_FILES_DIR'): raise XmlJsonImportModuleException('Settings must contain XSLT_FILES_DIR parameter')
from django.conf.urls import url from api.logs import views urlpatterns = [ url(r'^(?P<log_id>\w+)/$', views.NodeLogDetail.as_view(), name=views.NodeLogDetail.view_name), url(r'^(?P<log_id>\w+)/nodes/$', views.LogNodeList.as_view(), name=views.LogNodeList.view_name), ]
Add /v2/logs/log_id/added_contributors/ to list of URL's.
from django.conf.urls import url from api.logs import views urlpatterns = [ url(r'^(?P<log_id>\w+)/$', views.NodeLogDetail.as_view(), name=views.NodeLogDetail.view_name), url(r'^(?P<log_id>\w+)/nodes/$', views.LogNodeList.as_view(), name=views.LogNodeList.view_name), url(r'^(?P<log_id>\w+)/added_contributors/$', views.NodeLogAddedContributors.as_view(), name=views.NodeLogAddedContributors.view_name), ]
from setuptools import setup, find_packages from os import path here = path.abspath(path.dirname(__file__)) description = 'The official Python3 Domo API SDK - Domo, Inc.' long_description = 'See https://github.com/domoinc/domo-python-sdk for more details.' setup( name='pydomo', version='0.3.0.01', description=description, long_description=long_description, author='Jeremy Morris', author_email='jeremy.morris@domo.com', url='https://github.com/domoinc/domo-python-sdk', download_url='https://github.com/domoinc/domo-python-sdk/tarball/0.2.2.1', keywords='domo api sdk', license='MIT', packages=find_packages(exclude=['examples']), install_requires=[ 'requests', 'requests_toolbelt', ], python_requires='>=3', )
Update to prep for v0.3.0.2
from setuptools import setup, find_packages from os import path here = path.abspath(path.dirname(__file__)) description = 'The official Python3 Domo API SDK - Domo, Inc.' long_description = 'See https://github.com/domoinc/domo-python-sdk for more details.' setup( name='pydomo', version='0.3.0.2', description=description, long_description=long_description, author='Jeremy Morris', author_email='jeremy.morris@domo.com', url='https://github.com/domoinc/domo-python-sdk', download_url='https://github.com/domoinc/domo-python-sdk/tarball/0.2.2.1', keywords='domo api sdk', license='MIT', packages=find_packages(exclude=['examples']), install_requires=[ 'requests', 'requests_toolbelt', ], python_requires='>=3', )
#!/usr/bin/env python from flask import Flask, abort, make_response from socket import inet_aton, error as socket_error from .db import Database app = Flask(__name__) db = Database() @app.route('/ip/<ip>') def lookup(ip): try: k = inet_aton(ip) except socket_error: abort(400) info_as_json = db.lookup(k) if info_as_json is None: abort(404) response = make_response(info_as_json) response.headers['Content-type'] = 'application/json' return response if __name__ == '__main__': import argparse import sys parser = argparse.ArgumentParser() parser.add_argument('--host', default='localhost') parser.add_argument('--port', default=5555, type=int) parser.add_argument('--debug', default=False, action='store_true') args = parser.parse_args() try: app.run(**vars(args)) except KeyboardInterrupt: sys.stderr.write("Aborting...\n")
Use WHIP_SETTINGS environment var for Flask app
#!/usr/bin/env python from flask import Flask, abort, make_response from socket import inet_aton, error as socket_error from .db import Database app = Flask(__name__) app.config.from_envvar('WHIP_SETTINGS') db = Database(app.config['DATABASE_DIR']) @app.route('/ip/<ip>') def lookup(ip): try: k = inet_aton(ip) except socket_error: abort(400) info_as_json = db.lookup(k) if info_as_json is None: abort(404) response = make_response(info_as_json) response.headers['Content-type'] = 'application/json' return response if __name__ == '__main__': import argparse import sys parser = argparse.ArgumentParser() parser.add_argument('--host', default='localhost') parser.add_argument('--port', default=5555, type=int) parser.add_argument('--debug', default=False, action='store_true') args = parser.parse_args() try: app.run(**vars(args)) except KeyboardInterrupt: sys.stderr.write("Aborting...\n")
from setuptools import setup setup( name='syslog2IRC', version='0.8', description='A proxy to forward syslog messages to IRC', url='http://homework.nwsnet.de/releases/c474/#syslog2irc', author='Jochen Kupperschmidt', author_email='homework@nwsnet.de', license='MIT', classifiers=[ 'Environment :: Console', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: Communications :: Chat :: Internet Relay Chat', 'Topic :: Internet', 'Topic :: System :: Logging', 'Topic :: System :: Monitoring', 'Topic :: System :: Networking :: Monitoring', 'Topic :: System :: Systems Administration', ], )
Include README content as long description.
# -*- coding: utf-8 -*- import codecs from setuptools import setup with codecs.open('README.rst', encoding='utf-8') as f: long_description = f.read() setup( name='syslog2IRC', version='0.8', description='A proxy to forward syslog messages to IRC', long_description=long_description, url='http://homework.nwsnet.de/releases/c474/#syslog2irc', author='Jochen Kupperschmidt', author_email='homework@nwsnet.de', license='MIT', classifiers=[ 'Environment :: Console', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: Communications :: Chat :: Internet Relay Chat', 'Topic :: Internet', 'Topic :: System :: Logging', 'Topic :: System :: Monitoring', 'Topic :: System :: Networking :: Monitoring', 'Topic :: System :: Systems Administration', ], )
#!/usr/bin/env python from setuptools import setup execfile('kronos/version.py') setup( name = 'django-kronos', version = __version__, description = 'Kronos is a Django application that makes it easy to define and schedule tasks with cron.', long_description = open('README.rst').read(), author = 'Johannes Gorset', author_email = 'jgorset@gmail.com', url = 'http://github.com/jgorset/kronos', packages = ['kronos', 'kronos.management', 'kronos.management.commands'] )
Add history to long description
#!/usr/bin/env python from setuptools import setup execfile('kronos/version.py') readme = open('README.rst').read() history = open('HISTORY.rst').read() setup( name = 'django-kronos', version = __version__, description = 'Kronos is a Django application that makes it easy to define and schedule tasks with cron.', long_description = readme + '\n\n' + history, author = 'Johannes Gorset', author_email = 'jgorset@gmail.com', url = 'http://github.com/jgorset/kronos', packages = ['kronos', 'kronos.management', 'kronos.management.commands'] )
__author__ = 'katharine' import sys from setuptools import setup, find_packages requires = [ 'libpebble2==0.0.14', 'httplib2==0.9.1', 'oauth2client==1.4.12', 'progressbar2==2.7.3', 'pyasn1==0.1.8', 'pyasn1-modules==0.0.6', 'pypng==0.0.17', 'pyqrcode==1.1', 'requests==2.7.0', 'rsa==3.1.4', 'pyserial==2.7', 'six==1.9.0', 'websocket-client==0.32.0', 'wheel==0.24.0', 'colorama==0.3.3', ] if sys.version_info < (3, 4, 0): requires.append('enum34==1.0.4') setup(name='pebble-tool', version='3.6', description='Tool for interacting with pebbles.', url='https://github.com/pebble/pebble-tool', author='Pebble Technology Corporation', author_email='katharine@pebble.com', license='MIT', packages=find_packages(), install_requires=requires, entry_points={ 'console_scripts': ['pebble=pebble_tool:run_tool'], }, zip_safe=True)
Make sure our python alias is included in packaged versions.
__author__ = 'katharine' import sys from setuptools import setup, find_packages requires = [ 'libpebble2==0.0.14', 'httplib2==0.9.1', 'oauth2client==1.4.12', 'progressbar2==2.7.3', 'pyasn1==0.1.8', 'pyasn1-modules==0.0.6', 'pypng==0.0.17', 'pyqrcode==1.1', 'requests==2.7.0', 'rsa==3.1.4', 'pyserial==2.7', 'six==1.9.0', 'websocket-client==0.32.0', 'wheel==0.24.0', 'colorama==0.3.3', ] if sys.version_info < (3, 4, 0): requires.append('enum34==1.0.4') setup(name='pebble-tool', version='3.6', description='Tool for interacting with pebbles.', url='https://github.com/pebble/pebble-tool', author='Pebble Technology Corporation', author_email='katharine@pebble.com', license='MIT', packages=find_packages(), package_data={ 'pebble_tool.commands.sdk': ['python'], }, install_requires=requires, entry_points={ 'console_scripts': ['pebble=pebble_tool:run_tool'], }, zip_safe=False)
# Python imports from setuptools import setup # Project imports from notable import app # Attributes AUTHOR = 'John McFarlane' DESCRIPTION = 'A very simple note taking application' EMAIL = 'john.mcfarlane@rockfloat.com' NAME = 'Notable' PYPI = 'http://pypi.python.org/packages/source/N/Notable' URL = 'https://github.com/jmcfarlane/Notable' CLASSIFIERS = """ Development Status :: 2 - Pre-Alpha Intended Audience :: Developers License :: OSI Approved :: MIT License Operating System :: OS Independent Programming Language :: Python Topic :: Internet :: WWW/HTTP Intended Audience :: End Users/Desktop Topic :: Office/Business :: News/Diary Topic :: Security :: Cryptography Topic :: Utilities """ setup( author = AUTHOR, author_email = EMAIL, classifiers = [c for c in CLASSIFIERS.split('\n') if c], description = DESCRIPTION, download_url = '%s/Notable-%s.tar.gz' % (PYPI, app.version), include_package_data = True, name = NAME, packages = ['notable'], scripts = ['scripts/notable'], url = URL, version = app.version )
Set nose.collector as the test_suite
# Python imports from setuptools import setup # Project imports from notable import app # Attributes AUTHOR = 'John McFarlane' DESCRIPTION = 'A very simple note taking application' EMAIL = 'john.mcfarlane@rockfloat.com' NAME = 'Notable' PYPI = 'http://pypi.python.org/packages/source/N/Notable' URL = 'https://github.com/jmcfarlane/Notable' CLASSIFIERS = """ Development Status :: 2 - Pre-Alpha Intended Audience :: Developers License :: OSI Approved :: MIT License Operating System :: OS Independent Programming Language :: Python Topic :: Internet :: WWW/HTTP Intended Audience :: End Users/Desktop Topic :: Office/Business :: News/Diary Topic :: Security :: Cryptography Topic :: Utilities """ setup( author = AUTHOR, author_email = EMAIL, classifiers = [c for c in CLASSIFIERS.split('\n') if c], description = DESCRIPTION, download_url = '%s/Notable-%s.tar.gz' % (PYPI, app.version), include_package_data = True, name = NAME, packages = ['notable'], scripts = ['scripts/notable'], test_suite='nose.collector', url = URL, version = app.version )
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup def version(): """Return version string.""" with open('autopep8.py') as input_file: for line in input_file: if line.startswith('__version__'): import ast return ast.literal_eval(line.split('=')[1].strip()) with open('README.rst') as readme: setup( name='autopep8', version=version(), description='A tool that automatically formats Python code to conform ' 'to the PEP 8 style guide', long_description=readme.read(), license='Expat License', author='Hideo Hattori', author_email='hhatto.jp@gmail.com', url='https://github.com/hhatto/autopep8', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Programming Language :: Unix Shell', ], keywords='automation, pep8, format', install_requires=['pep8'], test_suite='test.test_autopep8', py_modules=['autopep8'], zip_safe=False, entry_points={'console_scripts': ['autopep8 = autopep8:main']}, )
Make pep8 dependency more explicit
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup def version(): """Return version string.""" with open('autopep8.py') as input_file: for line in input_file: if line.startswith('__version__'): import ast return ast.literal_eval(line.split('=')[1].strip()) with open('README.rst') as readme: setup( name='autopep8', version=version(), description='A tool that automatically formats Python code to conform ' 'to the PEP 8 style guide', long_description=readme.read(), license='Expat License', author='Hideo Hattori', author_email='hhatto.jp@gmail.com', url='https://github.com/hhatto/autopep8', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Programming Language :: Unix Shell', ], keywords='automation, pep8, format', install_requires=['pep8 >= 1.3'], test_suite='test.test_autopep8', py_modules=['autopep8'], zip_safe=False, entry_points={'console_scripts': ['autopep8 = autopep8:main']}, )
from better_zoom import BetterZoom from better_selecting_zoom import BetterSelectingZoom from broadcaster import BroadcasterTool from dataprinter import DataPrinter from data_label_tool import DataLabelTool from drag_zoom import DragZoom from enthought.enable.tools.drag_tool import DragTool from draw_points_tool import DrawPointsTool from highlight_tool import HighlightTool from image_inspector_tool import ImageInspectorTool, ImageInspectorOverlay from lasso_selection import LassoSelection from legend_tool import LegendTool from legend_highlighter import LegendHighlighter from line_inspector import LineInspector from line_segment_tool import LineSegmentTool from move_tool import MoveTool from pan_tool import PanTool from point_marker import PointMarker from range_selection import RangeSelection from range_selection_2d import RangeSelection2D from range_selection_overlay import RangeSelectionOverlay from regression_lasso import RegressionLasso, RegressionOverlay from save_tool import SaveTool from scatter_inspector import ScatterInspector from select_tool import SelectTool from simple_inspector import SimpleInspectorTool from tracking_pan_tool import TrackingPanTool from tracking_zoom import TrackingZoom from traits_tool import TraitsTool from zoom_tool import ZoomTool # EOF
[Chaco] Remove deprecated DragZoom from Chaco tools API to eliminate irrelevant BaseZoomTool deprecation warning. DragZoom is still used in 4 Chaco examples
from better_zoom import BetterZoom from better_selecting_zoom import BetterSelectingZoom from broadcaster import BroadcasterTool from dataprinter import DataPrinter from data_label_tool import DataLabelTool from enthought.enable.tools.drag_tool import DragTool from draw_points_tool import DrawPointsTool from highlight_tool import HighlightTool from image_inspector_tool import ImageInspectorTool, ImageInspectorOverlay from lasso_selection import LassoSelection from legend_tool import LegendTool from legend_highlighter import LegendHighlighter from line_inspector import LineInspector from line_segment_tool import LineSegmentTool from move_tool import MoveTool from pan_tool import PanTool from point_marker import PointMarker from range_selection import RangeSelection from range_selection_2d import RangeSelection2D from range_selection_overlay import RangeSelectionOverlay from regression_lasso import RegressionLasso, RegressionOverlay from save_tool import SaveTool from scatter_inspector import ScatterInspector from select_tool import SelectTool from simple_inspector import SimpleInspectorTool from tracking_pan_tool import TrackingPanTool from tracking_zoom import TrackingZoom from traits_tool import TraitsTool from zoom_tool import ZoomTool # EOF
# # Copyright (c) 2011 rPath, Inc. # # All Rights Reserved # BAD_REQUEST = 400 NOT_FOUND = 404 INTERNAL_SERVER_ERROR = 500 class RbuilderError(Exception): "An unknown error has occured." status = INTERNAL_SERVER_ERROR def __init__(self, **kwargs): cls = self.__class__ self.msg = cls.__doc__ self.kwargs = kwargs def __str__(self): try: return self.msg % self.kwargs except TypeError: return self.msg class CollectionPageNotFound(RbuilderError): "The requested page of the collection was not found." status = NOT_FOUND class UknownFilterOperator(RbuilderError): "%(filter)s is an invalid filter operator." status = BAD_REQUEST class InvalidFilterValue(RbuilderError): "%(value)s in an invalid value for filter operator %(filter)s" status = BAD_REQUEST
Fix typo in exception name
# # Copyright (c) 2011 rPath, Inc. # # All Rights Reserved # BAD_REQUEST = 400 NOT_FOUND = 404 INTERNAL_SERVER_ERROR = 500 class RbuilderError(Exception): "An unknown error has occured." status = INTERNAL_SERVER_ERROR def __init__(self, **kwargs): cls = self.__class__ self.msg = cls.__doc__ self.kwargs = kwargs def __str__(self): try: return self.msg % self.kwargs except TypeError: return self.msg class CollectionPageNotFound(RbuilderError): "The requested page of the collection was not found." status = NOT_FOUND class UnknownFilterOperator(RbuilderError): "%(filter)s is an invalid filter operator." status = BAD_REQUEST class InvalidFilterValue(RbuilderError): "%(value)s in an invalid value for filter operator %(filter)s" status = BAD_REQUEST
try: from functools import wraps except ImportError: from django.utils.functional import wraps from feincms.models import Page def add_page_to_extra_context(view_func): def inner(request, *args, **kwargs): kwargs.setdefault('extra_context', {}) kwargs['extra_context']['feincms_page'] = Page.objects.best_match_from_request(request) return view_func(request, *args, **kwargs) return wraps(view_func)(inner)
Fix page import path in view decorator module
try: from functools import wraps except ImportError: from django.utils.functional import wraps from feincms.module.page.models import Page def add_page_to_extra_context(view_func): def inner(request, *args, **kwargs): kwargs.setdefault('extra_context', {}) kwargs['extra_context']['feincms_page'] = Page.objects.best_match_from_request(request) return view_func(request, *args, **kwargs) return wraps(view_func)(inner)
from __future__ import absolute_import from __future__ import division from __future__ import print_function def make_set(): pass def link(): pass def find(): pass def union(): pass def kruskal(): """Kruskal's algorithm for minimum spanning tree in weighted graph. Time complexity for graph G(V, E): O(|E|+|V|+|E|log(|V|)) = O(|E|log(|V|^2)) = O(|E|log(|V|)). """ pass def main(): w_graph_d = { 'a': {'b': 1, 'd': 4, 'e': 3}, 'b': {'a': 1, 'd': 4, 'e': 2}, 'c': {'e': 4, 'f': 5}, 'd': {'a': 4, 'b': 4, 'e': 4}, 'e': {'a': 3, 'b': 2, 'c': 4, 'd': 4, 'f': 7}, 'f': {'c': 5, 'e': 7} } print('w_graph_d:\n{}'.format(w_graph_d)) print('Kruskal\'s minimum spanning tree:') pass if __name__ == '__main__': main()
Revise doc string by adding "undirected"
from __future__ import absolute_import from __future__ import division from __future__ import print_function def make_set(): pass def link(): pass def find(): pass def union(): pass def kruskal(): """Kruskal's algorithm for minimum spanning tree in weighted undirected graph. Time complexity for graph G(V, E): O(|E|+|V|+|E|log(|V|)) = O(|E|log(|V|^2)) = O(|E|log(|V|)). """ pass def main(): w_graph_d = { 'a': {'b': 1, 'd': 4, 'e': 3}, 'b': {'a': 1, 'd': 4, 'e': 2}, 'c': {'e': 4, 'f': 5}, 'd': {'a': 4, 'b': 4, 'e': 4}, 'e': {'a': 3, 'b': 2, 'c': 4, 'd': 4, 'f': 7}, 'f': {'c': 5, 'e': 7} } print('w_graph_d:\n{}'.format(w_graph_d)) print('Kruskal\'s minimum spanning tree:') pass if __name__ == '__main__': main()
import random import unittest from aioes.pool import RandomSelector, RoundRobinSelector class TestRandomSelector(unittest.TestCase): def setUp(self): random.seed(123456) def tearDown(self): random.seed(None) def test_select(self): s = RandomSelector() r = s.select([1, 2, 3]) self.assertEqual(2, r) class TestRoundRobinSelector(unittest.TestCase): def test_select(self): s = RoundRobinSelector() r = s.select([1, 2, 3]) self.assertEqual(2, r) r = s.select([1, 2, 3]) self.assertEqual(3, r) r = s.select([1, 2, 3]) self.assertEqual(1, r) r = s.select([1, 2, 3]) self.assertEqual(2, r)
Add more tests for pool
import asyncio import random import unittest from aioes.pool import RandomSelector, RoundRobinSelector, ConnectionPool from aioes.transport import Endpoint from aioes.connection import Connection class TestRandomSelector(unittest.TestCase): def setUp(self): random.seed(123456) def tearDown(self): random.seed(None) def test_select(self): s = RandomSelector() r = s.select([1, 2, 3]) self.assertEqual(2, r) class TestRoundRobinSelector(unittest.TestCase): def test_select(self): s = RoundRobinSelector() r = s.select([1, 2, 3]) self.assertEqual(2, r) r = s.select([1, 2, 3]) self.assertEqual(3, r) r = s.select([1, 2, 3]) self.assertEqual(1, r) r = s.select([1, 2, 3]) self.assertEqual(2, r) class TestConnectionPool(unittest.TestCase): def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(None) def tearDown(self): self.loop.close() def make_pool(self): conn = Connection(Endpoint('localhost', 9200), loop=self.loop) pool = ConnectionPool([conn], loop=self.loop) self.addCleanup(pool.close) return pool def test_ctor(self): pool = self.make_pool() self.assertAlmostEqual(60, pool.dead_timeout) self.assertAlmostEqual(5, pool.timeout_cutoff)
#import spam import filter_lta #VALID_FILES = filter_lta.VALID_OBS().split('\n') print filter_lta.VALID_OBS() #def extract_basename(): def main(): #Convert the LTA file to the UVFITS format #Generates UVFITS file with same basename as LTA file spam.convert_lta_to_uvfits('Name of the file') #Take generated UVFITS file as input and precalibrate targets #Generates files (RRLL with the name of the source (can be obtained using ltahdr) spam.precalibrate_targets('Name of UVFITS output file') #Take the generated RRLL UVFITS file and process to generate the image #Generates final image <source name>.SP2B.PBCOR.FITS #Also generates log file spam_<source name>_<start date>_start_time>.log in #datfil dir spam.process_target()
Add code for thread to filter valid files for processing
#import spam import filter_lta #List of all directories containing valid observations VALID_FILES = filter_lta.VALID_OBS() #List of all directories for current threads to process THREAD_FILES = VALID_FILES[0:len(VALID_FILES):5] print THREAD_FILES def main(): for i in THREAD_FILES: LTA_FILES = os.chdir(i) """ #Convert the LTA file to the UVFITS format #Generates UVFITS file with same basename as LTA file spam.convert_lta_to_uvfits('Name of the file') #Take generated UVFITS file as input and precalibrate targets #Generates files (RRLL with the name of the source (can be obtained using ltahdr) spam.precalibrate_targets('Name of UVFITS output file') #Take the generated RRLL UVFITS file and process to generate the image #Generates final image <source name>.SP2B.PBCOR.FITS #Also generates log file spam_<source name>_<start date>_start_time>.log in #datfil dir spam.process_target() """
import rospy from time import time def wait_for_param(param_name, timeout=None, poll_rate=0.1): '''Blocking wait for a parameter named $parameter_name to exist Poll at frequency $poll_rate Once the parameter exists, return get and return it. This function intentionally leaves failure logging duties to the developer ''' start_time = time() rate = rospy.Rate(poll_rate) while not rospy.is_shutdown(): # Check if the parameter now exists if rospy.has_param(param_name): return rospy.get_param(param_name) # If we exceed a defined timeout, return None if timeout is not None: if time() - start_time > timeout: return None # Continue to poll at poll_rate rate.sleep()
UTILS: Add init-helper 'wait for subscriber' For integration-testing purposes it is often useful to wait until a particular node subscribes to you
import rospy import rostest import time def wait_for_param(param_name, timeout=None, poll_rate=0.1): '''Blocking wait for a parameter named $parameter_name to exist Poll at frequency $poll_rate Once the parameter exists, return get and return it. This function intentionally leaves failure logging duties to the developer ''' start_time = time.time() rate = rospy.Rate(poll_rate) while not rospy.is_shutdown(): # Check if the parameter now exists if rospy.has_param(param_name): return rospy.get_param(param_name) # If we exceed a defined timeout, return None if timeout is not None: if time.time() - start_time > timeout: return None # Continue to poll at poll_rate rate.sleep() def wait_for_subscriber(node_name, topic, timeout=5.0): '''Blocks until $node_name subscribes to $topic Useful mostly in integration tests -- I would counsel against use elsewhere ''' end_time = time.time() + timeout resolved_topic = rospy.resolve_name(topic) resolved_node = rospy.resolve_name(node_name) # Wait for time-out or ros-shutdown while (time.time() < end_time) and (not rospy.is_shutdown()): subscribed = rostest.is_subscriber( rospy.resolve_name(topic), rospy.resolve_name(node_name) ) # Success scenario: node subscribes if subscribed: break time.sleep(0.1) # Could do this with a while/else # But chose to explicitly check success = rostest.is_subscriber( rospy.resolve_name(topic), rospy.resolve_name(node_name) ) return success