input
stringlengths
0
2.17k
instruction
stringlengths
18
2.94k
output
stringlengths
47
3.36k
# pylint: disable=unused-import, unused-variable, missing-docstring def _readline(): try: import readline except ImportError: print("Module readline not available.") else: import rlcompleter readline.parse_and_bind("tab: complete") import os histfile = os.path.join(os.environ["HOME"], '.python_history') try: readline.read_history_file(histfile) except IOError: pass import atexit atexit.register(readline.write_history_file, histfile) del os, histfile _readline() del _readline import sys sys.ps1 = "\001\033[01;33m\002>>>\001\033[00m\002 " sys.ps2 = "\001\033[01;33m\002...\001\033[00m\002 "
Move python rl history file just to help clean up ~/
# pylint: disable=unused-import, unused-variable, missing-docstring def _readline(): try: import readline except ImportError: print("Module readline not available.") else: import rlcompleter readline.parse_and_bind("tab: complete") import os histfile = os.path.join(os.environ["HOME"], 'python', '.history') try: readline.read_history_file(histfile) except IOError: pass import atexit atexit.register(readline.write_history_file, histfile) del os, histfile _readline() del _readline import sys sys.ps1 = "\001\033[01;33m\002>>>\001\033[00m\002 " sys.ps2 = "\001\033[01;33m\002...\001\033[00m\002 "
import os import string import importlib import traceback from .. import irc def execute(**kwargs): module_string = string.join([__name__, kwargs['command']], '.') module = None try: module = importlib.import_module(module_string) except ImportError as e: traceback.print_exc() irc.send_to_channel(kwargs['channel'], "No such command.") if not module == None: module.execute(**kwargs)
Add support for hyphens, and list of commands Adds a function to retrieve all commands, and converts incoming commands from hyphenated to underscored form.
import os import fnmatch import string import importlib import traceback from .. import irc def get_all(): files = os.listdir('./nickenbot/command') files.remove('__init__.py') commands = [os.path.splitext(f)[0] for f in files if fnmatch.fnmatch(f, '*.py')] commands = [string.replace(c, '_', '-') for c in commands] return commands def execute(**kwargs): print(kwargs['command']) command = string.replace(kwargs['command'], '-', '_') print(command) module_string = string.join([__name__, command], '.') module = None try: module = importlib.import_module(module_string) except ImportError as e: traceback.print_exc() irc.send_to_channel(kwargs['channel'], "No such command.") if not module == None: module.execute(**kwargs)
import javabridge as jv import bioformats as bf def start(max_heap_size='8G'): """Start the Java Virtual Machine, enabling bioformats IO. Parameters ---------- max_heap_size : string, optional The maximum memory usage by the virtual machine. Valid strings include '256M', '64k', and '2G'. Expect to need a lot. """ jv.start_vm(class_path=bf.JARS, max_heap_size=max_heap_size) def done(): """Kill the JVM. Once killed, it cannot be restarted. Notes ----- See the python-javabridge documentation for more information. """ jv.kill_vm()
Add function to determine metadata length
import numpy as np import javabridge as jv import bioformats as bf def start(max_heap_size='8G'): """Start the Java Virtual Machine, enabling bioformats IO. Parameters ---------- max_heap_size : string, optional The maximum memory usage by the virtual machine. Valid strings include '256M', '64k', and '2G'. Expect to need a lot. """ jv.start_vm(class_path=bf.JARS, max_heap_size=max_heap_size) def done(): """Kill the JVM. Once killed, it cannot be restarted. Notes ----- See the python-javabridge documentation for more information. """ jv.kill_vm() def lif_metadata_string_size(filename): """Get the length in bytes of the metadata string of a LIF file. Parameters ---------- filename : string Path to the LIF file. Returns ------- length : int The length in bytes of the metadata string. Notes ----- This is based on code by Lee Kamentsky. [1] References ---------- [1] https://github.com/CellProfiler/python-bioformats/issues/8 """ with open(filename, 'rb') as fd: fd.read(9) length = np.frombuffer(fd.read(4), "<i4")[0] return length
# -#- coding: utf-8 -#- from django.db import models from django.utils.translation import ugettext_lazy as _ from leonardo.module.web.models import Widget LOGIN_TYPE_CHOICES = ( (1, _("Admin")), (2, _("Public")), ) class UserLoginWidget(Widget): type = models.PositiveIntegerField(verbose_name=_( "type"), choices=LOGIN_TYPE_CHOICES, default=2) def get_context_data(self, request): context = super(UserLoginWidget, self).get_context_data(request) if 'next' in request.GET: context['next'] = request.GET['next'] return context class Meta: abstract = True verbose_name = _("user login") verbose_name_plural = _("user logins")
Fix missing next in context.
# -#- coding: utf-8 -#- from django.db import models from django.utils.translation import ugettext_lazy as _ from leonardo.module.web.models import Widget LOGIN_TYPE_CHOICES = ( (1, _("Admin")), (2, _("Public")), ) class UserLoginWidget(Widget): type = models.PositiveIntegerField(verbose_name=_( "type"), choices=LOGIN_TYPE_CHOICES, default=2) def get_context_data(self, request): context = super(UserLoginWidget, self).get_context_data(request) if 'next' in request.GET: context['next'] = request.GET['next'] else: context['next'] = request.path return context class Meta: abstract = True verbose_name = _("user login") verbose_name_plural = _("user logins")
#!/usr/bin/env python # -*- coding: utf-8 -*- from django.contrib import admin from .models import Logging class LoggingAdmin(admin.ModelAdmin): model = Logging raw_id_fields = ('user',) exclude = ('site_iid', 'site_domain') admin.site.register(Logging, LoggingAdmin)
Add field mirror_site at exclude on LoggingAdmin
#!/usr/bin/env python # -*- coding: utf-8 -*- from django.contrib import admin from .models import Logging class LoggingAdmin(admin.ModelAdmin): model = Logging raw_id_fields = ('user',) exclude = ('site_iid', 'site_domain', 'mirror_site') admin.site.register(Logging, LoggingAdmin)
#! /usr/bin/env python import sys import json for filepath in sys.argv[1:]: with open(filepath) as f: try: oyster = json.load(f) except ValueError: sys.stderr.write("In file: {}\n".format(filepath)) raise with open(filepath, 'w') as f: json.dump(oyster, f, indent=4, separators=(',', ': '), sort_keys=True) f.write('\n') # add a trailing newline.
Make this work for non-ASCII chars as well.
#! /usr/bin/env python3 import sys import json for filepath in sys.argv[1:]: with open(filepath) as f: try: oyster = json.load(f) except ValueError: sys.stderr.write("In file: {}\n".format(filepath)) raise with open(filepath, 'w') as f: json.dump(oyster, f, ensure_ascii=False, indent=4, separators=(',', ': '), sort_keys=True) f.write('\n') # add a trailing newline.
"""set suppliers active flag NOT NULLABLE Ensure that all suppliers are either active or inactive. Revision ID: 1340 Revises: 1330 Create Date: 2019-06-26 11:53:56.085586 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '1340' down_revision = '1330' def upgrade(): # We want this column to be NOT NULLABLE, so we need to set any NULL # values. NULLs are active suppliers (i.e. they have not been made # inactive). op.execute("UPDATE suppliers SET active = true WHERE active = NULL") op.alter_column('suppliers', 'active', nullable=False) def downgrade(): op.alter_column('suppliers', 'active', nullable=True)
Fix comparison with NULL bug
"""set suppliers active flag NOT NULLABLE Ensure that all suppliers are either active or inactive. Revision ID: 1340 Revises: 1330 Create Date: 2019-06-26 11:53:56.085586 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '1340' down_revision = '1330' def upgrade(): # We want this column to be NOT NULLABLE, so we need to set any NULL # values. NULLs are active suppliers (i.e. they have not been made # inactive). op.execute("UPDATE suppliers SET active = true WHERE active is NULL") op.alter_column('suppliers', 'active', nullable=False) def downgrade(): op.alter_column('suppliers', 'active', nullable=True)
""" This file demonstrates writing tests using the unittest module. These will pass when you run "manage.py test". Replace this with more appropriate tests for your application. """ from django.test import TestCase class SimpleTest(TestCase): def test_basic_addition(self): """ Tests that 1 + 1 always equals 2. """ self.assertEqual(1 + 1, 2)
Test public body showing, json view and csv export
from django.test import TestCase from django.core.urlresolvers import reverse from publicbody.models import PublicBody class PublicBodyTest(TestCase): fixtures = ['auth.json', 'publicbodies.json', 'foirequest.json'] def test_web_page(self): response = self.client.get(reverse('publicbody-list')) self.assertEqual(response.status_code, 200) pb = PublicBody.objects.all()[0] response = self.client.get(reverse('publicbody-show', kwargs={"slug": pb.slug})) self.assertEqual(response.status_code, 200) response = self.client.get(reverse('publicbody-show_json', kwargs={"pk": pb.pk, "format": "json"})) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertIn('"name":', response.content) self.assertIn('"laws": [{', response.content) response = self.client.get(reverse('publicbody-show_json', kwargs={"slug": pb.slug, "format": "json"})) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') def test_csv(self): csv = PublicBody.export_csv() self.assertTrue(csv) def test_search(self): response = self.client.get(reverse('publicbody-search')+"?q=umwelt") self.assertIn("Umweltbundesamt", response.content) self.assertEqual(response['Content-Type'], 'application/json')
import argparse import json import sys import gcl from gcl import query from gcl import util def main(argv=None, stdin=None): parser = argparse.ArgumentParser(description='Convert (parts of) a GCL model file to JSON.') parser.add_argument('file', metavar='FILE', type=str, nargs='?', help='File to parse') parser.add_argument('selectors', metavar='SELECTOR', type=str, nargs='*', help='Subnodes to convert. The first selector will be treated as the root of the printed output.') args = parser.parse_args(argv or sys.argv[1:]) try: if args.file and args.file != '-': model = gcl.load(args.file) else: model = gcl.loads((stdin or sys.stdin).read(), filename='<stdin>') sels = query.GPath(args.selectors) if not sels.everything(): model = sels.select(model).deep() plain = util.to_python(model) sys.stdout.write(json.dumps(plain)) except (gcl.ParseError, RuntimeError) as e: sys.stderr.write(str(e) + '\n') sys.exit(1)
Add proper root selector to gcl2json
import argparse import json import sys import gcl from gcl import query from gcl import util def select(dct, path): for part in path: if not hasattr(dct, 'keys'): raise RuntimeError('Value %r cannot be indexed with %r' % (dct, part)) if part not in dct: raise RuntimeError('Value %r has no key %r' % (dct, part)) dct = dct[part] return dct def main(argv=None, stdin=None): parser = argparse.ArgumentParser(description='Convert (parts of) a GCL model file to JSON.') parser.add_argument('file', metavar='FILE', type=str, nargs='?', help='File to parse') parser.add_argument('selectors', metavar='SELECTOR', type=str, nargs='*', help='Select nodes to include in the JSON.') parser.add_argument('--root', '-r', metavar='PATH', type=str, default='', help='Use the indicated root path as the root of the output JSON object (like a.b.c but without wildcards)') args = parser.parse_args(argv or sys.argv[1:]) try: if args.file and args.file != '-': model = gcl.load(args.file) else: model = gcl.loads((stdin or sys.stdin).read(), filename='<stdin>') sels = query.GPath(args.selectors) if not sels.everything(): model = sels.select(model).deep() plain = util.to_python(model) selectors = args.root.split('.') if args.root else [] selected = select(plain, selectors) sys.stdout.write(json.dumps(selected)) except (gcl.ParseError, RuntimeError) as e: sys.stderr.write(str(e) + '\n') sys.exit(1)
import os import shutil import pytest from LiSE.engine import Engine from LiSE.examples.kobold import inittest def test_keyframe_load_init(tempdir): """Can load a keyframe at start of branch, including locations""" eng = Engine(tempdir) inittest(eng) eng.branch = 'new' eng.snap_keyframe() eng.close() eng = Engine(tempdir) assert 'kobold' in eng.character['physical'].thing assert (0, 0) in eng.character['physical'].place assert (0, 1) in eng.character['physical'].portal[0, 0] eng.close() def test_multi_keyframe(tempdir): eng = Engine(tempdir) inittest(eng, kobold_pos=(9, 9)) eng.snap_keyframe() tick0 = eng.tick eng.turn = 1 eng.character['physical'].thing['kobold']['location'] = (3, 3) eng.snap_keyframe() tick1 = eng.tick eng.close() eng = Engine(tempdir) eng._load_at('trunk', 0, tick0+1) assert eng._things_cache.keyframe['physical']['trunk'][0][tick0]\ != eng._things_cache.keyframe['physical']['trunk'][1][tick1]
Make test_multi_keyframe demonstrate what it's supposed to I was testing a cache that wasn't behaving correctly for unrelated reasons.
import os import shutil import pytest from LiSE.engine import Engine from LiSE.examples.kobold import inittest def test_keyframe_load_init(tempdir): """Can load a keyframe at start of branch, including locations""" eng = Engine(tempdir) inittest(eng) eng.branch = 'new' eng.snap_keyframe() eng.close() eng = Engine(tempdir) assert 'kobold' in eng.character['physical'].thing assert (0, 0) in eng.character['physical'].place assert (0, 1) in eng.character['physical'].portal[0, 0] eng.close() def test_multi_keyframe(tempdir): eng = Engine(tempdir) inittest(eng) eng.snap_keyframe() tick0 = eng.tick eng.turn = 1 del eng.character['physical'].place[3, 3] eng.snap_keyframe() tick1 = eng.tick eng.close() eng = Engine(tempdir) eng._load_at('trunk', 0, tick0+1) assert eng._nodes_cache.keyframe['physical', ]['trunk'][0][tick0]\ != eng._nodes_cache.keyframe['physical', ]['trunk'][1][tick1]
from troposphere import Join, iam, logs from .common import arn_prefix from .template import template container_log_group = logs.LogGroup( "ContainerLogs", template=template, RetentionInDays=365, DeletionPolicy="Retain", ) logging_policy = iam.Policy( PolicyName="LoggingPolicy", PolicyDocument=dict( Statement=[dict( Effect="Allow", Action=[ "logs:Create*", "logs:PutLogEvents", ], Resource=Join("", [ arn_prefix, ":logs:*:*:*", # allow logging to any log group ]), )], ), )
Add logging permissions needed by aws-for-fluent-bit
from troposphere import Join, iam, logs from .common import arn_prefix from .template import template container_log_group = logs.LogGroup( "ContainerLogs", template=template, RetentionInDays=365, DeletionPolicy="Retain", ) logging_policy = iam.Policy( PolicyName="LoggingPolicy", PolicyDocument=dict( Statement=[dict( Effect="Allow", Action=[ "logs:Create*", "logs:PutLogEvents", # Needed by aws-for-fluent-bit: "logs:DescribeLogGroups", "logs:DescribeLogStreams", ], Resource=Join("", [ arn_prefix, ":logs:*:*:*", # allow logging to any log group ]), )], ), )
from pluginbase import PluginBase class PluginManager: def __init__(self, paths, provider): self.paths = [paths] self.provider = provider plugin_base = PluginBase(package='foremast.plugins') self.plugin_source = plugin_base.make_plugin_source(searchpath=self.paths) def plugins(self): for plugin in self.plugin_source.list_plugins(): yield plugin def load(self): return self.plugin_source.load_plugin(self.provider)
chore: Add docstring to plugin manager
"""Manager to handle plugins""" from pluginbase import PluginBase class PluginManager: """Class to manage and create Spinnaker applications Args: paths (str): Path of plugin directory. provider (str): The name of the cloud provider. """ def __init__(self, paths, provider): self.paths = [paths] self.provider = provider plugin_base = PluginBase(package='foremast.plugins') self.plugin_source = plugin_base.make_plugin_source(searchpath=self.paths) def plugins(self): """List of all plugins available.""" for plugin in self.plugin_source.list_plugins(): yield plugin def load(self): """Load the plugin object.""" return self.plugin_source.load_plugin(self.provider)
# -*- coding: utf-8 -*- # Copyright (c) 2016 The Pycroft Authors. See the AUTHORS file. # This file is part of the Pycroft project and licensed under the terms of # the Apache License, Version 2.0. See the LICENSE file for details. import factory from factory.faker import Faker from pycroft.model.user import User, RoomHistoryEntry from .base import BaseFactory from .facilities import RoomFactory from .finance import AccountFactory class UserFactory(BaseFactory): class Meta: model = User login = Faker('user_name') name = Faker('name') registered_at = Faker('date_time') password = Faker('password') email = Faker('email') account = factory.SubFactory(AccountFactory, type="USER_ASSET") room = factory.SubFactory(RoomFactory) address = factory.SelfAttribute('room.address') @factory.post_generation def room_history_entries(self, create, extracted, **kwargs): if self.room is not None: # Set room history entry begin to registration date rhe = RoomHistoryEntry.q.filter_by(user=self, room=self.room).one() rhe.begins_at = self.registered_at class UserWithHostFactory(UserFactory): host = factory.RelatedFactory('tests.factories.host.HostFactory', 'owner') class UserWithMembershipFactory(UserFactory): membership = factory.RelatedFactory('tests.factories.property.MembershipFactory', 'user')
Allow adjusting of RoomHistoryEntry attributes in UserFactory
# -*- coding: utf-8 -*- # Copyright (c) 2016 The Pycroft Authors. See the AUTHORS file. # This file is part of the Pycroft project and licensed under the terms of # the Apache License, Version 2.0. See the LICENSE file for details. import factory from factory.faker import Faker from pycroft.model.user import User, RoomHistoryEntry from .base import BaseFactory from .facilities import RoomFactory from .finance import AccountFactory class UserFactory(BaseFactory): class Meta: model = User login = Faker('user_name') name = Faker('name') registered_at = Faker('date_time') password = Faker('password') email = Faker('email') account = factory.SubFactory(AccountFactory, type="USER_ASSET") room = factory.SubFactory(RoomFactory) address = factory.SelfAttribute('room.address') @factory.post_generation def room_history_entries(self, create, extracted, **kwargs): if self.room is not None: # Set room history entry begin to registration date rhe = RoomHistoryEntry.q.filter_by(user=self, room=self.room).one() rhe.begins_at = self.registered_at for key, value in kwargs.items(): setattr(rhe, key, value) class UserWithHostFactory(UserFactory): host = factory.RelatedFactory('tests.factories.host.HostFactory', 'owner') class UserWithMembershipFactory(UserFactory): membership = factory.RelatedFactory('tests.factories.property.MembershipFactory', 'user')
from sentinels import NOTHING class ErrorContainer(object): def add_error(self, exception, exception_type, traceback, timestamp=NOTHING): return self.client.api.call_function('add_error', {self._get_id_key(): self.id, 'exception': exception, 'exception_type': exception_type, 'traceback': traceback, 'timestamp': timestamp }) def _get_id_key(self): if type(self).__name__ == 'Test': return 'test_id' return 'session_id'
Unify errors and failures in API
from sentinels import NOTHING class ErrorContainer(object): def add_error(self, message, exception_type=NOTHING, traceback=NOTHING, timestamp=NOTHING): return self.client.api.call_function('add_error', {self._get_id_key(): self.id, 'message': message, 'exception_type': exception_type, 'traceback': traceback, 'timestamp': timestamp }) def _get_id_key(self): if type(self).__name__ == 'Test': return 'test_id' return 'session_id'
#!/usr/bin/env python # -*- coding: utf-8 -*- from abstract import Abstract from json import Json from msgpack import MsgPack __all__ = ['Abstract', 'Json', 'MsgPack']
Load resources by absolute path not relative
#!/usr/bin/env python # -*- coding: utf-8 -*- from pygrapes.serializer.abstract import Abstract from pygrapes.serializer.json import Json from pygrapes.serializer.msgpack import MsgPack __all__ = ['Abstract', 'Json', 'MsgPack']
from channels.generic.websockets import WebsocketConsumer, JsonWebsocketConsumer from .jsonrpcwebsocketconsumer import JsonRpcWebsocketConsumer class MyJsonRpcWebsocketConsumer(JsonRpcWebsocketConsumer): # Set to True if you want them, else leave out strict_ordering = False slight_ordering = False def connection_groups(self, **kwargs): """ Called to return the list of groups to automatically add/remove this connection to/from. """ return ["test"] def receive(self, content, **kwargs): """ Called when a message is received with decoded JSON content """ # Simple echo print "received: %s" % content print "kwargs %s" % kwargs self.send(content) def disconnect(self, message, **kwargs): """ Perform things on connection close """ print "disconnect" @MyJsonRpcWebsocketConsumer.rpc_method() def ping(): return "pong"
Print statements updated to be compatible with Python 3.
from channels.generic.websockets import WebsocketConsumer, JsonWebsocketConsumer from .jsonrpcwebsocketconsumer import JsonRpcWebsocketConsumer class MyJsonRpcWebsocketConsumer(JsonRpcWebsocketConsumer): # Set to True if you want them, else leave out strict_ordering = False slight_ordering = False def connection_groups(self, **kwargs): """ Called to return the list of groups to automatically add/remove this connection to/from. """ return ["test"] def receive(self, content, **kwargs): """ Called when a message is received with decoded JSON content """ # Simple echo print("received: %s" % content) print("kwargs %s" % kwargs) self.send(content) def disconnect(self, message, **kwargs): """ Perform things on connection close """ print("disconnect") @MyJsonRpcWebsocketConsumer.rpc_method() def ping(): return "pong"
import TPunitA import TPunitB def __lldb_init_module(debugger,*args): debugger.HandleCommand("command script add -f thepackage.TPunitA.command TPcommandA") debugger.HandleCommand("command script add -f thepackage.TPunitB.command TPcommandB")
Fix TestImport.py to work with Python 3.5. Differential Revision: http://reviews.llvm.org/D16431 git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@258448 91177308-0d34-0410-b5e6-96231b3b80d8
from __future__ import absolute_import from . import TPunitA from . import TPunitB def __lldb_init_module(debugger,*args): debugger.HandleCommand("command script add -f thepackage.TPunitA.command TPcommandA") debugger.HandleCommand("command script add -f thepackage.TPunitB.command TPcommandB")
from fabric.api import env, run, sudo, settings, hide # Default system user env.user = 'ubuntu' # Default puppet environment env.environment = 'prod' # Default puppet module directory env.puppet_module_dir = 'modules/' # Default puppet version # If loom_puppet_version is None, loom installs the latest version env.loom_puppet_version = '3.1.1' # Default librarian version # If loom_librarian_version is None, loom installs the latest version env.loom_librarian_version = '0.9.9' def host_roles(host_string): """ Returns the role of a given host string. """ roles = set() for role, hosts in env.roledefs.items(): if host_string in hosts: roles.add(role) return list(roles) def current_roles(): return host_roles(env.host_string) def has_puppet_installed(): with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True): result = sudo('which puppet') return result.succeeded def has_librarian_installed(): with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True): librarian = sudo('which librarian-puppet') return librarian.succeeded
Revert "sudo is required to run which <gem-exec> on arch." This reverts commit 15162c58c27bc84f1c7fc0326f782bd693ca4d7e.
from fabric.api import env, run, settings, hide # Default system user env.user = 'ubuntu' # Default puppet environment env.environment = 'prod' # Default puppet module directory env.puppet_module_dir = 'modules/' # Default puppet version # If loom_puppet_version is None, loom installs the latest version env.loom_puppet_version = '3.1.1' # Default librarian version # If loom_librarian_version is None, loom installs the latest version env.loom_librarian_version = '0.9.9' def host_roles(host_string): """ Returns the role of a given host string. """ roles = set() for role, hosts in env.roledefs.items(): if host_string in hosts: roles.add(role) return list(roles) def current_roles(): return host_roles(env.host_string) def has_puppet_installed(): with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True): result = run('which puppet') return result.succeeded def has_librarian_installed(): with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True): librarian = run('which librarian-puppet') return librarian.succeeded
import uuid class BaseTransport(object): """Base transport class.""" REQUEST_ID_KEY = 'requestId' REQUEST_ACTION_KEY = 'action' def __init__(self, data_format_class, data_format_options, handler_class, handler_options, name): self._data_format = data_format_class(**data_format_options) self._data_type = self._data_format.data_type self._handler = handler_class(self, **handler_options) self._connected = False self._name = name @staticmethod def _uuid(): return str(uuid.uuid1()) def _assert_not_connected(self): assert not self._connected, 'transport connection already created' def _assert_connected(self): assert self._connected, 'transport connection has not created' def _encode(self, obj): return self._data_format.encode(obj) def _decode(self, data): return self._data_format.decode(data) def _call_handler_method(self, name, *args): getattr(self._handler, name)(*args) def name(self): return self._name def is_connected(self): return self._connected def connect(self, url, **options): raise NotImplementedError def send_request(self, action, request, **params): raise NotImplementedError def request(self, action, request, **params): raise NotImplementedError def close(self): raise NotImplementedError def join(self, timeout=None): raise NotImplementedError class BaseTransportException(Exception): """Base transport exception.""" pass
Rename is_connected method to connected
import uuid class BaseTransport(object): """Base transport class.""" REQUEST_ID_KEY = 'requestId' REQUEST_ACTION_KEY = 'action' def __init__(self, data_format_class, data_format_options, handler_class, handler_options, name): self._data_format = data_format_class(**data_format_options) self._data_type = self._data_format.data_type self._handler = handler_class(self, **handler_options) self._connected = False self._name = name @staticmethod def _uuid(): return str(uuid.uuid1()) def _assert_not_connected(self): assert not self._connected, 'transport connection already created' def _assert_connected(self): assert self._connected, 'transport connection has not created' def _encode(self, obj): return self._data_format.encode(obj) def _decode(self, data): return self._data_format.decode(data) def _call_handler_method(self, name, *args): getattr(self._handler, name)(*args) def name(self): return self._name def connected(self): return self._connected def connect(self, url, **options): raise NotImplementedError def send_request(self, action, request, **params): raise NotImplementedError def request(self, action, request, **params): raise NotImplementedError def close(self): raise NotImplementedError def join(self, timeout=None): raise NotImplementedError class BaseTransportException(Exception): """Base transport exception.""" pass
import json from django.db import models from model_utils.models import TimeStampedModel class ParsedSOPN(TimeStampedModel): """ A model for storing the parsed data out of a PDF """ sopn = models.OneToOneField( "official_documents.OfficialDocument", on_delete=models.CASCADE ) raw_data = models.TextField() raw_data_type = models.CharField(max_length=255, default="pandas") parsed_data = models.TextField(null=True) status = models.CharField(max_length=255, default="unparsed") @property def as_pandas(self): import pandas pandas.set_option("display.max_colwidth", -1) return pandas.DataFrame.from_dict(json.loads(self.raw_data)) @property def data_as_html(self): if self.raw_data_type == "pandas": data = self.as_pandas header = data.iloc[0] data = data[1:] data.columns = header return data.to_html(index=False, escape=False).replace( "\\n", "<br>" )
Use None rather than -1 for Pandas
import json from django.db import models from model_utils.models import TimeStampedModel class ParsedSOPN(TimeStampedModel): """ A model for storing the parsed data out of a PDF """ sopn = models.OneToOneField( "official_documents.OfficialDocument", on_delete=models.CASCADE ) raw_data = models.TextField() raw_data_type = models.CharField(max_length=255, default="pandas") parsed_data = models.TextField(null=True) status = models.CharField(max_length=255, default="unparsed") @property def as_pandas(self): import pandas pandas.set_option("display.max_colwidth", None) return pandas.DataFrame.from_dict(json.loads(self.raw_data)) @property def data_as_html(self): if self.raw_data_type == "pandas": data = self.as_pandas header = data.iloc[0] data = data[1:] data.columns = header return data.to_html(index=False, escape=False).replace( "\\n", "<br>" )
"""Main Module of PDF Splitter""" import argparse import os from PyPDF2 import PdfFileWriter from Util import all_pdf_files_in_directory, split_on_condition, concat_pdf_pages parser = \ argparse.ArgumentParser( description='Split all the pages of multiple PDF files in a directory by document number' ) parser.add_argument( 'directory', metavar='PATH', type=str, help='path to a directory' ) def width_greater_than_height(page): box = page.mediaBox return box.getWidth() > box.getHeight() if __name__ == '__main__': args = parser.parse_args() directory = args.directory all_pdf_files = [os.path.join(directory, filename) for filename in all_pdf_files_in_directory(directory)] opened_files = map(lambda path: open(path, 'rb'), all_pdf_files) all_pages = concat_pdf_pages(opened_files) for idx, pages in enumerate(split_on_condition(all_pages, predicate=width_greater_than_height), start=1): pdf_writer = PdfFileWriter() map(pdf_writer.addPage, pages) output_filename = '{0:05}.pdf'.format(idx) with open(output_filename, 'wb') as output_file: pdf_writer.write(output_file) output_file.flush() os.fsync(output_file.fileno()) map(lambda f: f.close, opened_files)
Refactor main as a separate function
"""Main Module of PDF Splitter""" import argparse import os from PyPDF2 import PdfFileWriter from Util import all_pdf_files_in_directory, split_on_condition, concat_pdf_pages parser = \ argparse.ArgumentParser( description='Split all the pages of multiple PDF files in a directory by document number' ) parser.add_argument( 'directory', metavar='PATH', type=str, help='path to a directory' ) def main(): args = parser.parse_args() directory = args.directory all_pdf_files = [os.path.join(directory, filename) for filename in all_pdf_files_in_directory(directory)] opened_files = map(lambda path: open(path, 'rb'), all_pdf_files) all_pages = concat_pdf_pages(opened_files) for idx, pages in enumerate(split_on_condition(all_pages, predicate=width_greater_than_height), start=1): pdf_writer = PdfFileWriter() map(pdf_writer.addPage, pages) output_filename = '{0:05}.pdf'.format(idx) with open(output_filename, 'wb') as output_file: pdf_writer.write(output_file) output_file.flush() os.fsync(output_file.fileno()) map(lambda f: f.close, opened_files) def width_greater_than_height(page): box = page.mediaBox return box.getWidth() > box.getHeight() if __name__ == '__main__': main()
#!/usr/bin/env python import search import tmap if __name__ == "__main__": from pprint import pprint as pp import sys to_dict = lambda r: r.to_dict() h = search.HuluSearch() a = search.AmazonSearch() n = search.NetflixSearch() # get the query from the first argument or from user input if len(sys.argv) > 1: query = sys.argv[1] else: query = raw_input("search: ") # get a shorter query to use for autocomplete ac_query = query[0:3] ac_results = tmap.map(lambda s: s.autocomplete(ac_query), (a, h, n), num_threads=3) autocomplete_results = { "amazon": ac_results[0], "hulu": ac_results[1], "netflix": ac_results[2], } print "autocomplete results for '" + ac_query + "':" pp(autocomplete_results) print results = tmap.map(lambda s: s.find(query), (a, h, n), num_threads=3) search_results = { "amazon": map(to_dict, results[0]), "hulu": map(to_dict, results[1]), "netflix": map(to_dict, results[2]) } print "search results for '" + query + "':" pp(search_results) print
Change CL to require a non-blank query
#!/usr/bin/env python import search import tmap if __name__ == "__main__": from pprint import pprint as pp import sys to_dict = lambda r: r.to_dict() h = search.HuluSearch() a = search.AmazonSearch() n = search.NetflixSearch() # get the query from the first argument or from user input if len(sys.argv) > 1: query = sys.argv[1] if query.strip() == "": raise ValueError("Non-blank query string is required!") else: query = "" # get a non-blank query string while query.strip() == "": query = raw_input("search: ") # get a shorter query to use for autocomplete ac_query = query[0:3] ac_results = tmap.map(lambda s: s.autocomplete(ac_query), (a, h, n), num_threads=3) autocomplete_results = { "amazon": ac_results[0], "hulu": ac_results[1], "netflix": ac_results[2], } print "autocomplete results for '" + ac_query + "':" pp(autocomplete_results) print results = tmap.map(lambda s: s.find(query), (a, h, n), num_threads=3) search_results = { "amazon": map(to_dict, results[0]), "hulu": map(to_dict, results[1]), "netflix": map(to_dict, results[2]) } print "search results for '" + query + "':" pp(search_results) print
from eth_utils import ( is_address, is_checksum_address, is_checksum_formatted_address, is_dict, is_list_like, ) def validate_abi(abi): """ Helper function for validating an ABI """ if not is_list_like(abi): raise ValueError("'abi' is not a list") for e in abi: if not is_dict(e): raise ValueError("The elements of 'abi' are not all dictionaries") def validate_address(value): """ Helper function for validating an address """ if not is_address(value): raise ValueError("'{0}' is not an address".format(value)) validate_address_checksum(value) def validate_address_checksum(value): """ Helper function for validating an address EIP55 checksum """ if is_checksum_formatted_address(value): if not is_checksum_address(value): raise ValueError("'{0}' has an invalid EIP55 checksum".format(value))
Raise error specific to address checksum failure Because is_address() also checks for a valid checksum, the old code showed a generic "not an address" error if the checksum failed.
from eth_utils import ( is_address, is_checksum_address, is_checksum_formatted_address, is_dict, is_list_like, ) def validate_abi(abi): """ Helper function for validating an ABI """ if not is_list_like(abi): raise ValueError("'abi' is not a list") for e in abi: if not is_dict(e): raise ValueError("The elements of 'abi' are not all dictionaries") def validate_address(value): """ Helper function for validating an address """ validate_address_checksum(value) if not is_address(value): raise ValueError("'{0}' is not an address".format(value)) def validate_address_checksum(value): """ Helper function for validating an address EIP55 checksum """ if is_checksum_formatted_address(value): if not is_checksum_address(value): raise ValueError("'{0}' has an invalid EIP55 checksum".format(value))
from firecares.settings.base import * # noqa INSTALLED_APPS += ('debug_toolbar', 'fixture_magic', 'django_extensions') # noqa MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware', ) # noqa # The Django Debug Toolbar will only be shown to these client IPs. INTERNAL_IPS = ( '127.0.0.1', ) DEBUG_TOOLBAR_CONFIG = { 'INTERCEPT_REDIRECTS': False, 'SHOW_TEMPLATE_CONTEXT': True, 'HIDE_DJANGO_SQL': False, } CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'magic' } } LOGGING['loggers'] = { # noqa 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, 'osgeo_importer': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': True, }, } def show_toolbar(request): return False DEBUG_TOOLBAR_CONFIG = { "SHOW_TOOLBAR_CALLBACK": show_toolbar, } # EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' CELERY_ALWAYS_EAGER = True try: from local_settings import * # noqa except ImportError: pass
Set registration open by default
from firecares.settings.base import * # noqa INSTALLED_APPS += ('debug_toolbar', 'fixture_magic', 'django_extensions') # noqa MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware', ) # noqa # The Django Debug Toolbar will only be shown to these client IPs. INTERNAL_IPS = ( '127.0.0.1', ) DEBUG_TOOLBAR_CONFIG = { 'INTERCEPT_REDIRECTS': False, 'SHOW_TEMPLATE_CONTEXT': True, 'HIDE_DJANGO_SQL': False, } CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'magic' } } LOGGING['loggers'] = { # noqa 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, 'osgeo_importer': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': True, }, } def show_toolbar(request): return False DEBUG_TOOLBAR_CONFIG = { "SHOW_TOOLBAR_CALLBACK": show_toolbar, } # EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' CELERY_ALWAYS_EAGER = True REGISTRATION_OPEN = True try: from local_settings import * # noqa except ImportError: pass
def execute(): import webnotes gd = webnotes.model.code.get_obj('Global Defaults') gd.doc.maintain_same_rate = 1 gd.doc.save() gd.on_update()
Maintain same rate throughout pur cycle: in global defaults, by default set true
def execute(): import webnotes from webnotes.model.code import get_obj gd = get_obj('Global Defaults') gd.doc.maintain_same_rate = 1 gd.doc.save() gd.on_update()
# This app doesn't contain any models, but as its template tags need to # be added to built-ins at start-up time, this is a good place to do it. from django.template.loader import add_to_builtins add_to_builtins("overextends.templatetags.overextends_tags")
Fix import path of add_to_builtins
# This app doesn't contain any models, but as its template tags need to # be added to built-ins at start-up time, this is a good place to do it. from django.template.base import add_to_builtins add_to_builtins("overextends.templatetags.overextends_tags")
# -*- coding: utf-8 -*- import logging from flask import g from celery import group from website import settings logger = logging.getLogger(__name__) def celery_before_request(): g._celery_tasks = [] def celery_teardown_request(error=None): if error is not None: return try: tasks = g._celery_tasks if tasks: group(*tasks)() except AttributeError: if not settings.DEBUG_MODE: logger.error('Task queue not initialized') def enqueue_task(signature): if signature not in g._celery_tasks: g._celery_tasks.append(signature) handlers = { 'before_request': celery_before_request, 'teardown_request': celery_teardown_request, }
Handle queued tasks when working outside request context.
# -*- coding: utf-8 -*- import logging from flask import g from celery import group from website import settings logger = logging.getLogger(__name__) def celery_before_request(): g._celery_tasks = [] def celery_teardown_request(error=None): if error is not None: return try: tasks = g._celery_tasks if tasks: group(*tasks)() except AttributeError: if not settings.DEBUG_MODE: logger.error('Task queue not initialized') def enqueue_task(signature): """If working in a request context, push task signature to ``g`` to run after request is complete; else run signature immediately. :param signature: Celery task signature """ try: if signature not in g._celery_tasks: g._celery_tasks.append(signature) except RuntimeError: signature() handlers = { 'before_request': celery_before_request, 'teardown_request': celery_teardown_request, }
# Copyright (c) 2015 "Hugo Herter http://hugoherter.com" # # This file is part of Billabong. # # Intercom is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Test CLI interface.""" import os from .fixtures import record assert record def run(cmd): """Helper to test running a CLI command.""" os.system('python -m billabong ' + cmd) def test_cli(record): """Test main supported CLI commands.""" ID = record['id'] run('ls') run('blobs') run('info ' + ID) run('search txt') run('check') run('push') run('pull') run('echo ' + ID) run('status') run('version')
Add test for cli 'add' command
# Copyright (c) 2015 "Hugo Herter http://hugoherter.com" # # This file is part of Billabong. # # Intercom is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Test CLI interface.""" import os from .fixtures import record assert record def run(cmd): """Helper to test running a CLI command.""" os.system('python -m billabong ' + cmd) def test_cli(record): """Test main supported CLI commands.""" ID = record['id'] run('ls') run('records') run('blobs') run('info ' + ID) run('info ' + ID + ' --no-color') run('search txt') run('check') run('push') run('pull') run('echo ' + ID) run('status') run('version') run('add hello.txt')
from django.conf.urls import patterns, url from django.views.generic import TemplateView from core.auth import perm import search.views urlpatterns = patterns('', url(r'^document/$', perm('any', search.views.DocumentSearchTemplate), name='search'), url(r'^document/query/$',perm('any', search.views.DocumentSearchQuery), name='search_documents_query'), url(r'^image/$', perm('user', search.views.ImageSearchTemplate), name='search_images'), url(r'^image/query/$', perm('user', search.views.SearchImageQuery), name='search_images_query'), url(r'^social/$', perm('user', TemplateView, template_name='search/search_social.jinja'), name='search_social'), url(r'^social/query/$', perm('user', search.views.SearchSocialQuery), name='search_social_query'), )
Allow any logged-in user to perform image searches.
from django.conf.urls import patterns, url from django.views.generic import TemplateView from core.auth import perm import search.views urlpatterns = patterns('', url(r'^document/$', perm('any', search.views.DocumentSearchTemplate), name='search'), url(r'^document/query/$',perm('any', search.views.DocumentSearchQuery), name='search_documents_query'), url(r'^image/$', perm('loggedin', search.views.ImageSearchTemplate), name='search_images'), url(r'^image/query/$', perm('loggedin', search.views.SearchImageQuery), name='search_images_query'), url(r'^social/$', perm('user', TemplateView, template_name='search/search_social.jinja'), name='search_social'), url(r'^social/query/$', perm('user', search.views.SearchSocialQuery), name='search_social_query'), )
# -*- coding: utf-8 -*- from __future__ import print_function from __future__ import unicode_literals from __future__ import division import re from django import template from django.conf import settings register = template.Library() _remove_slash_re = re.compile(r'/+') def _urljoin(*args): """Joins relative URLs, collapsing consecutive '/'""" url = "/".join(args) return _remove_slash_re.sub('/', url) @register.filter def static_url(static_file): if settings.DEBUG: return _urljoin(settings.STATIC_URL, static_file) static_file_mapping = settings.STATIC_FILES_MAPPING if static_file not in static_file_mapping: raise Exception('Static file %s not found in rev-manifest.json, ' 'did you forget to run "npm run build"?' % static_file) return _urljoin(settings.STATIC_URL, static_file_mapping[static_file])
Add warning message to tricky template tag
# -*- coding: utf-8 -*- from __future__ import print_function from __future__ import unicode_literals from __future__ import division import re from django import template from django.conf import settings register = template.Library() _remove_slash_re = re.compile(r'/+') def _urljoin(*args): """Joins relative URLs, collapsing consecutive '/'""" url = "/".join(args) return _remove_slash_re.sub('/', url) @register.filter def static_url(static_file): if settings.DEBUG: return _urljoin(settings.STATIC_URL, static_file) static_file_mapping = settings.STATIC_FILES_MAPPING if static_file not in static_file_mapping: # !!! WARNING !!! this may cause your templates to silently fail # If template A includes template B and template B has uses this # templatetag and results in this exception, template B will be # rendered blank inside of template A, instead of crashing. raise Exception('Static file %s not found in rev-manifest.json, ' 'did you forget to run "npm run build"?' % static_file) return _urljoin(settings.STATIC_URL, static_file_mapping[static_file])
#!/usr/bin/env python import os import sys import django from django.conf import settings from django.test.utils import get_runner if __name__ == "__main__": os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.test_settings' django.setup() TestRunner = get_runner(settings) test_runner = TestRunner() failures = test_runner.run_tests(["tests"]) sys.exit(bool(failures))
Tests: Make it possible to run individual tests.
#!/usr/bin/env python import os import sys import django from django.conf import settings from django.test.utils import get_runner if __name__ == "__main__": tests = "tests" if len(sys.argv) == 1 else sys.argv[1] os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.test_settings' django.setup() TestRunner = get_runner(settings) test_runner = TestRunner() failures = test_runner.run_tests([tests]) sys.exit(bool(failures))
""" Command function to schema-validate a HXL dataset. David Megginson November 2014 Can use a whitelist of HXL tags, a blacklist, or both. Usage: import sys from hxl.scripts.hxlvalidate import hxlvalidate hxlvalidate(sys.stdin, sys.stdout, open('MySchema.csv', 'r')) License: Public Domain Documentation: http://hxlstandard.org """ import sys import argparse from hxl.parser import HXLReader from hxl.schema import loadHXLSchema def hxlvalidate(input, output=sys.stdout, schema_input=None): parser = HXLReader(input) schema = loadHXLSchema(schema_input) schema.validate(parser) # end
Return result of validation from the command script.
""" Command function to schema-validate a HXL dataset. David Megginson November 2014 Can use a whitelist of HXL tags, a blacklist, or both. Usage: import sys from hxl.scripts.hxlvalidate import hxlvalidate hxlvalidate(sys.stdin, sys.stdout, open('MySchema.csv', 'r')) License: Public Domain Documentation: http://hxlstandard.org """ import sys from hxl.parser import HXLReader from hxl.schema import loadHXLSchema def hxlvalidate(input=sys.stdin, output=sys.stderr, schema_input=None): parser = HXLReader(input) schema = loadHXLSchema(schema_input) return schema.validate(parser) # end
# -*- coding: utf-8 -*- # © 2015 Antiun Ingeniería S.L. - Jairo Llopis # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). { "name": "Set Snippet's Anchor", "summary": "Allow to reach a concrete section in the page", "version": "8.0.1.0.0", "category": "Website", "website": "http://www.antiun.com", "author": "Antiun Ingeniería S.L., Odoo Community Association (OCA)", "license": "AGPL-3", "application": False, "installable": True, "external_dependencies": { "python": [], "bin": [], }, "depends": [ "website", ], "data": [ "views/assets.xml", "views/snippets.xml", ], }
Remove unused keys from manifest.
# -*- coding: utf-8 -*- # © 2015 Antiun Ingeniería S.L. - Jairo Llopis # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). { "name": "Set Snippet's Anchor", "summary": "Allow to reach a concrete section in the page", "version": "8.0.1.0.0", "category": "Website", "website": "http://www.antiun.com", "author": "Antiun Ingeniería S.L., Odoo Community Association (OCA)", "license": "AGPL-3", "application": False, "installable": True, "depends": [ "website", ], "data": [ "views/assets.xml", "views/snippets.xml", ], }
#!/usr/bin/env python import os from apiclient.discovery import build from apiclient import errors PROJECT_NAME = os.getenv('PROJECT_NAME') TASK_QUEUE_NAME = os.getenv('QUEUE_NAME') TASK_LEASE_SECONDS = os.getenv('TASK_LEASE_SECONDS', 300) TASK_BATCH_SIZE = os.getenv('TASK_BATCH_SIZE', 10) assert PROJECT_NAME assert TASK_QUEUE_NAME def main(): task_api = build('taskqueue', 'v1beta2') try: lease_request = task_api.tasks().lease( project=PROJECT_NAME, taskqueue=TASK_QUEUE_NAME, leaseSecs=TASK_LEASE_SECONDS, numTasks=TASK_BATCH_SIZE, # body={}, ) result = lease_request.execute() print '------------' print repr(result) return result except errors.HttpError, e: logger.error('Error during lease request: %s' % str(e)) return None if __name__ == '__main__': main()
Fix up logging and env vars.
#!/usr/bin/env python import os import logging from apiclient.discovery import build from apiclient import errors PROJECT_NAME = os.getenv('PROJECT_NAME') TASKQUEUE_NAME = os.getenv('TASKQUEUE_NAME', 'builds') TASKQUEUE_LEASE_SECONDS = os.getenv('TASKQUEUE_LEASE_SECONDS', 300) TASKQUEUE_BATCH_SIZE = os.getenv('TASKQUEUE_BATCH_SIZE', 10) assert PROJECT_NAME assert TASKQUEUE_NAME def main(): task_api = build('taskqueue', 'v1beta2') try: lease_request = task_api.tasks().lease( project=PROJECT_NAME, taskqueue=TASKQUEUE_NAME, leaseSecs=TASKQUEUE_LEASE_SECONDS, numTasks=TASKQUEUE_BATCH_SIZE, # body={}, ) result = lease_request.execute() print '------------' print repr(result) return result except errors.HttpError, e: logging.error('Error during lease request: %s' % str(e)) return None if __name__ == '__main__': main()
import json import base64 import urllib from Crypto.Cipher import AES from Crypto.Protocol.KDF import PBKDF2 class OXSessionDecryptor(object): def __init__(self, secret_key_base, salt="encrypted cookie", keylen=64, iterations=1000): self.secret = PBKDF2(secret_key_base, salt.encode(), keylen, iterations) def get_cookie_data(self, cookie): cookie = base64.b64decode(urllib.parse.unquote(cookie).split('--')[0]) encrypted_data, iv = map(base64.b64decode, cookie.split('--'.encode())) cipher = AES.new(self.secret[:32], AES.MODE_CBC, iv) return json.loads(unpad(cipher.decrypt(encrypted_data)))
Add unpad function for unpacking cookie
import json import base64 import urllib from Crypto.Cipher import AES from Crypto.Protocol.KDF import PBKDF2 unpad = lambda s: s[:-ord(s[len(s) - 1:])] class OXSessionDecryptor(object): def __init__(self, secret_key_base, salt="encrypted cookie", keylen=64, iterations=1000): self.secret = PBKDF2(secret_key_base, salt.encode(), keylen, iterations) def get_cookie_data(self, cookie): cookie = base64.b64decode(urllib.parse.unquote(cookie).split('--')[0]) encrypted_data, iv = map(base64.b64decode, cookie.split('--'.encode())) cipher = AES.new(self.secret[:32], AES.MODE_CBC, iv) return json.loads(unpad(cipher.decrypt(encrypted_data)))
#!/usr/bin/env python # vim: set fileencoding=UTF-8 : from __future__ import print_function ADD_OP = '+' MULTIPLY_OP = '*' OPERATORS = [ADD_OP, MULTIPLY_OP] def pprint_expr_trees(trees): from parser import ExprParser print('[') for t in trees: print(' ', ExprParser(t)) print(']')
Update pprint_expr_trees to adopt Expr
#!/usr/bin/env python # vim: set fileencoding=UTF-8 : from __future__ import print_function ADD_OP = '+' MULTIPLY_OP = '*' OPERATORS = [ADD_OP, MULTIPLY_OP] def pprint_expr_trees(trees): print('[') for t in trees: print(' ', t) print(']')
# Licensed under an MIT open source license - see LICENSE ''' Test functions for PSpec ''' from unittest import TestCase import numpy as np import numpy.testing as npt from ..statistics import PowerSpectrum, PSpec_Distance from ._testing_data import \ dataset1, dataset2, computed_data, computed_distances class testPSpec(TestCase): def setUp(self): self.dataset1 = dataset1 self.dataset2 = dataset2 def test_PSpec_method(self): self.tester = \ PowerSpectrum(dataset1["moment0"], weights=dataset1["moment0_error"][0] ** 2.) self.tester.run() npt.assert_allclose(self.tester.ps1D, computed_data['pspec_val']) def test_PSpec_distance(self): self.tester_dist = \ PSpec_Distance(dataset1["moment0"], dataset2["moment0"], weights1=dataset1["moment0_error"][0] ** 2., weights2=dataset2["moment0_error"][0] ** 2.) self.tester_dist.distance_metric() npt.assert_almost_equal(self.tester_dist.distance, computed_distances['pspec_distance'])
Add test to ensure power spectrum slope is same w/ transposed array
# Licensed under an MIT open source license - see LICENSE ''' Test functions for PSpec ''' from unittest import TestCase import numpy as np import numpy.testing as npt from ..statistics import PowerSpectrum, PSpec_Distance from ._testing_data import \ dataset1, dataset2, computed_data, computed_distances class testPSpec(TestCase): def setUp(self): self.dataset1 = dataset1 self.dataset2 = dataset2 def test_PSpec_method(self): self.tester = \ PowerSpectrum(dataset1["moment0"], weights=dataset1["moment0_error"][0] ** 2.) self.tester.run() npt.assert_allclose(self.tester.ps1D, computed_data['pspec_val']) def test_PSpec_distance(self): self.tester_dist = \ PSpec_Distance(dataset1["moment0"], dataset2["moment0"], weights1=dataset1["moment0_error"][0] ** 2., weights2=dataset2["moment0_error"][0] ** 2.) self.tester_dist.distance_metric() npt.assert_almost_equal(self.tester_dist.distance, computed_distances['pspec_distance']) def test_pspec_nonequal_shape(): mom0_sliced = dataset1["moment0"][0][:16, :] mom0_hdr = dataset1["moment0"][1] test = PowerSpectrum((mom0_sliced, mom0_hdr)).run() test_T = PowerSpectrum((mom0_sliced.T, mom0_hdr)).run() npt.assert_almost_equal(test.slope, test_T.slope, decimal=7)
""" Map puzzle URLs to views. Also maps the root URL to the latest puzzle. """ from django.conf.urls import include, url from django.contrib.auth import views as auth_views from puzzle import views from puzzle.feeds import PuzzleFeed urlpatterns = [ #pylint: disable=invalid-name url(r'^$', views.latest, name='latest'), url(r'^login/$', auth_views.login, {'template_name': 'puzzle/login.html'}, name='login'), url(r'^logout/$', auth_views.logout, {'next_page': 'latest'}, name='logout'), url(r'^create/$', views.create, name='create'), url(r'^save/$', views.save, name='save'), url(r'^rss/$', PuzzleFeed(), name='rss'), url(r'^archive/$', views.users, name='users'), url(r'^profile/$', views.profile, name='profile'), url(r'^puzzle/(?P<number>\d+)/$', views.puzzle_redirect), url(r'^setter/(?P<author>\w+)/(?P<number>\d+)/', include([ url(r'^$', views.puzzle, name='puzzle'), url(r'^solution/$', views.solution, name='solution'), url(r'^edit/$', views.edit, name='edit'), ])), ]
Replace deprecated login/logout function-based views
""" Map puzzle URLs to views. Also maps the root URL to the latest puzzle. """ from django.conf.urls import include, url from django.contrib.auth import views as auth_views from puzzle import views from puzzle.feeds import PuzzleFeed urlpatterns = [ #pylint: disable=invalid-name url(r'^$', views.latest, name='latest'), url(r'^login/$', auth_views.LoginView.as_view(template_name='puzzle/login.html'), name='login'), url(r'^logout/$', auth_views.LogoutView.as_view(next_page='latest'), name='logout'), url(r'^create/$', views.create, name='create'), url(r'^save/$', views.save, name='save'), url(r'^rss/$', PuzzleFeed(), name='rss'), url(r'^archive/$', views.users, name='users'), url(r'^profile/$', views.profile, name='profile'), url(r'^puzzle/(?P<number>\d+)/$', views.puzzle_redirect), url(r'^setter/(?P<author>\w+)/(?P<number>\d+)/', include([ url(r'^$', views.puzzle, name='puzzle'), url(r'^solution/$', views.solution, name='solution'), url(r'^edit/$', views.edit, name='edit'), ])), ]
""" Module that provides a connection to the ModuleStore specified in the django settings. Passes settings.MODULESTORE as kwargs to MongoModuleStore """ from __future__ import absolute_import from importlib import import_module from django.conf import settings _MODULESTORES = {} FUNCTION_KEYS = ['render_template'] def load_function(path): """ Load a function by name. path is a string of the form "path.to.module.function" returns the imported python object `function` from `path.to.module` """ module_path, _, name = path.rpartition('.') return getattr(import_module(module_path), name) def modulestore(name='default'): global _MODULESTORES if name not in _MODULESTORES: class_ = load_function(settings.MODULESTORE[name]['ENGINE']) options = {} options.update(settings.MODULESTORE[name]['OPTIONS']) for key in FUNCTION_KEYS: if key in options: options[key] = load_function(options[key]) _MODULESTORES[name] = class_( **options ) return _MODULESTORES[name] # Initialize the modulestores immediately for store_name in settings.MODULESTORE: modulestore(store_name)
Put quick check so we don't load course modules on init unless we're actually running in Django
""" Module that provides a connection to the ModuleStore specified in the django settings. Passes settings.MODULESTORE as kwargs to MongoModuleStore """ from __future__ import absolute_import from importlib import import_module from os import environ from django.conf import settings _MODULESTORES = {} FUNCTION_KEYS = ['render_template'] def load_function(path): """ Load a function by name. path is a string of the form "path.to.module.function" returns the imported python object `function` from `path.to.module` """ module_path, _, name = path.rpartition('.') return getattr(import_module(module_path), name) def modulestore(name='default'): global _MODULESTORES if name not in _MODULESTORES: class_ = load_function(settings.MODULESTORE[name]['ENGINE']) options = {} options.update(settings.MODULESTORE[name]['OPTIONS']) for key in FUNCTION_KEYS: if key in options: options[key] = load_function(options[key]) _MODULESTORES[name] = class_( **options ) return _MODULESTORES[name] if 'DJANGO_SETTINGS_MODULE' in environ: # Initialize the modulestores immediately for store_name in settings.MODULESTORE: modulestore(store_name)
import sys from java.lang import String from java.util import HashSet from java.util import HashMap import java globdict = globals() def loadFilesService(): global globdict exec open("filesAdmin.py").read()
Customize scripts to work with menu
import sys from java.lang import String from java.util import HashSet from java.util import HashMap import java import lotusConnectionsCommonAdmin globdict = globals() def loadFilesService(): global globdict exec open("filesAdmin.py").read()
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals import os import sys sys.path.append(os.curdir) from pelicanconf import * SITEURL = 'https://softwarejourneyman.com' RELATIVE_URLS = False FEED_ALL_ATOM = 'feeds/all.atom.xml' CATEGORY_FEED_ATOM = 'feeds/{slug}.atom.xml' DELETE_OUTPUT_DIRECTORY = False DISQUS_SITENAME = "pappasam-github-io" GOOGLE_ANALYTICS = "UA-117115805-1"
Add samroeca.com to url pointing
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals import os import sys sys.path.append(os.curdir) from pelicanconf import * SITEURL = 'https://samroeca.com' RELATIVE_URLS = False FEED_ALL_ATOM = 'feeds/all.atom.xml' CATEGORY_FEED_ATOM = 'feeds/{slug}.atom.xml' DELETE_OUTPUT_DIRECTORY = False DISQUS_SITENAME = "pappasam-github-io" GOOGLE_ANALYTICS = "UA-117115805-1"
''' Salt module to manage monit ''' def version(): ''' List monit version Cli Example:: salt '*' monit.version ''' cmd = 'monit -V' res = __salt__['cmd.run'](cmd) return res.split("\n")[0] def status(): ''' Monit status CLI Example:: salt '*' monit.status ''' cmd = 'monit status' res = __salt__['cmd.run'](cmd) return res.split("\n") def start(): ''' Starts monit CLI Example:: salt '*' monit.start *Note need to add check to insure its running* `ps ax | grep monit | grep -v grep or something` ''' cmd = 'monit' res = __salt__['cmd.run'](cmd) return "Monit started" def stop(): ''' Stop monit CLI Example:: salt '*' monit.stop *Note Needs check as above* ''' def _is_bsd(): return True if __grains__['os'] == 'FreeBSD' else False if _is_bsd(): cmd = "/usr/local/etc/rc.d/monit stop" else: cmd = "/etc/init.d/monit stop" res = __salt__['cmd.run'](cmd) return "Monit Stopped" def monitor_all(): ''' Initializing all monit modules. ''' cmd = 'monit monitor all' res = __salt__['cmd.run'](cmd) if res: return "All Services initaialized" return "Issue starting monitoring on all services" def unmonitor_all(): ''' unmonitor all services. ''' cmd = 'monit unmonitor all' res = __salt__['cmd.run'](cmd) if res: return "All Services unmonitored" return "Issue unmonitoring all services"
Check to see if we are going donw the right path
''' Monit service module. This module will create a monit type service watcher. ''' import os def start(name): ''' CLI Example:: salt '*' monit.start <service name> ''' cmd = "monit start {0}".format(name) return not __salt__['cmd.retcode'](cmd) def stop(name): ''' Stops service via monit CLI Example:: salt '*' monit.stop <service name> ''' cmd = "monit stop {0}".format(name) return not __salt__['cmd.retcode'](cmd) def restart(name): ''' Restart service via monit CLI Example:: salt '*' monit.restart <service name> ''' cmd = "monit restart {0}".format(name) return not __salt__['cmd.retcode'](cmd)
from django.db import models class Playbook(models.Model): class Meta: verbose_name_plural = "playbooks" name = models.CharField(max_length=200) path = models.CharField(max_length=200, default="~/") ansible_config = models.CharField(max_length=200, default="~/") inventory = models.CharField(max_length=200, default="hosts") user = models.CharField(max_length=200, default="ubuntu") def __str__(self): return "Playbook name: %s" % self.playbook.name
Fix string output of Playbook
from django.db import models class Playbook(models.Model): class Meta: verbose_name_plural = "playbooks" name = models.CharField(max_length=200) path = models.CharField(max_length=200, default="~/") ansible_config = models.CharField(max_length=200, default="~/") inventory = models.CharField(max_length=200, default="hosts") user = models.CharField(max_length=200, default="ubuntu") def __str__(self): return "%s" % self.name
# The version number must follow these rules: # - When the server is released, a client with exactly the same version number # should be released. # - Bugfixes should be released as consecutive post-releases, # that is versions of the form X.Y.Z.postN, where X.Y.Z is # the AG version number and N increases with each fix. # - Code from the development branch may be released any time # with a version of the form X.Y.ZrcN (rc = release candidate). # # When this file is committed to git the version should look like this: # - In any branch that has already been released: X.Y.Z # AG and Python client versions should be the same. # - In a 'stable' branch: X.Y.ZpostN, where X.Y.Z is the PREVIOUS # version of AG. # - In the development branch: X.Y.ZpreN. # # The full range of valid version numbers is described here: # https://www.python.org/dev/peps/pep-0440/ __version__ = u'6.1.3.post1'
Set __version__ to 6.1.4 in preparation for the v6.1.4 release That is all. Change-Id: I79edd9574995e50c17c346075bf158e6f1d64a0c Reviewed-on: https://gerrit.franz.com:9080/6845 Reviewed-by: Tadeusz Sznuk <4402abb98f9559cbfb6d73029f928227b498069b@franz.com> Reviewed-by: Ahmon Dancy <8f7d8ce2c6797410ae95fecd4c30801ee9f760ac@franz.com> Tested-by: Ahmon Dancy <8f7d8ce2c6797410ae95fecd4c30801ee9f760ac@franz.com>
# The version number must follow these rules: # - When the server is released, a client with exactly the same version number # should be released. # - Bugfixes should be released as consecutive post-releases, # that is versions of the form X.Y.Z.postN, where X.Y.Z is # the AG version number and N increases with each fix. # - Code from the development branch may be released any time # with a version of the form X.Y.ZrcN (rc = release candidate). # # When this file is committed to git the version should look like this: # - In any branch that has already been released: X.Y.Z # AG and Python client versions should be the same. # - In a 'stable' branch: X.Y.ZpostN, where X.Y.Z is the PREVIOUS # version of AG. # - In the development branch: X.Y.ZpreN. # # The full range of valid version numbers is described here: # https://www.python.org/dev/peps/pep-0440/ __version__ = u'6.1.4'
import eventlet eventlet.monkey_patch() # NOLINT import importlib import sys from weaveserver.main import create_app from weaveserver.core.logger import configure_logging def handle_launch(): import signal from weaveserver.core.config_loader import get_config configure_logging() token = sys.stdin.readline().strip() name = sys.argv[1] module = importlib.import_module(name) meta = module.__meta__ config = get_config(meta.get("config")) app = meta["class"](token, config) signal.signal(signal.SIGTERM, lambda x, y: app.on_service_stop()) signal.signal(signal.SIGINT, lambda x, y: app.on_service_stop()) app.before_service_start() app.on_service_start() def handle_main(): configure_logging() main_app = create_app() main_app.start()
Support 2nd parameter for weave-launch so that a plugin from any directory can be loaded.
import eventlet eventlet.monkey_patch() # NOLINT import importlib import os import sys from weaveserver.main import create_app from weaveserver.core.logger import configure_logging def handle_launch(): import signal from weaveserver.core.config_loader import get_config configure_logging() token = sys.stdin.readline().strip() name = sys.argv[1] if len(sys.argv) > 2: # This is mostly for plugins. Need to change dir so imports can succeed. os.chdir(sys.argv[2]) sys.path.append(sys.argv[2]) module = importlib.import_module(name) meta = module.__meta__ config = get_config(meta.get("config")) app = meta["class"](token, config) signal.signal(signal.SIGTERM, lambda x, y: app.on_service_stop()) signal.signal(signal.SIGINT, lambda x, y: app.on_service_stop()) app.before_service_start() app.on_service_start() def handle_main(): configure_logging() main_app = create_app() main_app.start()
""" The following exceptions may be raised during the course of using :py:class:`tornado_aws.client.AWSClient` and :py:class:`tornado_aws.client.AsyncAWSClient`: """ class AWSClientException(Exception): """Base exception class for AWSClient :ivar msg: The error message """ fmt = 'An error occurred' def __init__(self, **kwargs): super(AWSClientException, self).__init__(self.fmt.format(**kwargs)) class ConfigNotFound(AWSClientException): """The configuration file could not be parsed. :ivar path: The path to the config file """ fmt = 'The config file could not be found ({path})' class ConfigParserError(AWSClientException): """Error raised when parsing a configuration file with :py:class`configparser.RawConfigParser` :ivar path: The path to the config file """ fmt = 'Unable to parse config file ({path})' class NoCredentialsError(AWSClientException): """Raised when the credentials could not be located.""" fmt = 'Credentials not found' class NoProfileError(AWSClientException): """Raised when the specified profile could not be located. :ivar path: The path to the config file :ivar profile: The profile that was specified """ fmt = 'Profile ({profile}) not found ({path})'
Add a new generic AWS Error exception
""" The following exceptions may be raised during the course of using :py:class:`tornado_aws.client.AWSClient` and :py:class:`tornado_aws.client.AsyncAWSClient`: """ class AWSClientException(Exception): """Base exception class for AWSClient :ivar msg: The error message """ fmt = 'An error occurred' def __init__(self, **kwargs): super(AWSClientException, self).__init__(self.fmt.format(**kwargs)) class AWSError(AWSClientException): """Raised when the credentials could not be located.""" fmt = '{message}' class ConfigNotFound(AWSClientException): """The configuration file could not be parsed. :ivar path: The path to the config file """ fmt = 'The config file could not be found ({path})' class ConfigParserError(AWSClientException): """Error raised when parsing a configuration file with :py:class`configparser.RawConfigParser` :ivar path: The path to the config file """ fmt = 'Unable to parse config file ({path})' class NoCredentialsError(AWSClientException): """Raised when the credentials could not be located.""" fmt = 'Credentials not found' class NoProfileError(AWSClientException): """Raised when the specified profile could not be located. :ivar path: The path to the config file :ivar profile: The profile that was specified """ fmt = 'Profile ({profile}) not found ({path})'
import pytest from kafka import KafkaConsumer, KafkaProducer from test.conftest import version from test.testutil import random_string @pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set") def test_end_to_end(kafka_broker): connect_str = 'localhost:' + str(kafka_broker.port) producer = KafkaProducer(bootstrap_servers=connect_str, max_block_ms=10000, value_serializer=str.encode) consumer = KafkaConsumer(bootstrap_servers=connect_str, consumer_timeout_ms=10000, auto_offset_reset='earliest', value_deserializer=bytes.decode) topic = random_string(5) for i in range(1000): producer.send(topic, 'msg %d' % i) producer.flush() producer.close() consumer.subscribe([topic]) msgs = set() for i in range(1000): try: msgs.add(next(consumer).value) except StopIteration: break assert msgs == set(['msg %d' % i for i in range(1000)])
Disable auto-commit / group assignment in producer test
import pytest from kafka import KafkaConsumer, KafkaProducer from test.conftest import version from test.testutil import random_string @pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set") def test_end_to_end(kafka_broker): connect_str = 'localhost:' + str(kafka_broker.port) producer = KafkaProducer(bootstrap_servers=connect_str, max_block_ms=10000, value_serializer=str.encode) consumer = KafkaConsumer(bootstrap_servers=connect_str, group_id=None, consumer_timeout_ms=10000, auto_offset_reset='earliest', value_deserializer=bytes.decode) topic = random_string(5) for i in range(1000): producer.send(topic, 'msg %d' % i) producer.flush() producer.close() consumer.subscribe([topic]) msgs = set() for i in range(1000): try: msgs.add(next(consumer).value) except StopIteration: break assert msgs == set(['msg %d' % i for i in range(1000)])
import os import sys # Modify the sys.path to allow tests to be run without # installing the module. test_path = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(1, test_path + '/../') import pypm import pytest def test_patch_replaces_and_restores(): i = __import__ pypm.patch_import() assert i is not __import__ pypm.unpatch_import() assert i is __import__ def test_require_gets_local(): t1_import_test = pypm.require('import_test') assert '.pymodules' in repr(t1_import_test) def test_require_uses_module_cache(): t2_import_test = pypm.require('import_test') t3_import_test = pypm.require('import_test') assert t2_import_test is t3_import_test def test_require_not_conflict_with_import(): setuptools = pypm.require('setuptools') import setuptools as setuptools2 assert setuptools2 is not setuptools @pytest.mark.xfail def test_BUG_require_cannot_override_standard_lib(): re2 = pypm.require('re') assert '.pymodules' in repr(re2)
Change import path in tests to reflect new name Signed-off-by: Kevin Conway <3473c1f185ca03eadc40ad288d84425b54fd7d57@gmail.com>
import os import sys # Modify the sys.path to allow tests to be run without # installing the module. test_path = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(1, test_path + '/../') import require import pytest def test_patch_replaces_and_restores(): i = __import__ require.patch_import() assert i is not __import__ require.unpatch_import() assert i is __import__ def test_require_gets_local(): t1_import_test = require.require('import_test') assert '.pymodules' in repr(t1_import_test) def test_require_uses_module_cache(): t2_import_test = require.require('import_test') t3_import_test = require.require('import_test') assert t2_import_test is t3_import_test def test_require_not_conflict_with_import(): setuptools = require.require('setuptools') import setuptools as setuptools2 assert setuptools2 is not setuptools @pytest.mark.xfail def test_BUG_require_cannot_override_standard_lib(): re2 = require.require('re') assert '.pymodules' in repr(re2)
#!/usr/bin/env python # Copyright 2012 Citrix Systems, Inc. Licensed under the # Apache License, Version 2.0 (the "License"); you may not use this # file except in compliance with the License. Citrix Systems, Inc. from distutils.core import setup from sys import version if version < "2.7": print "Marvin needs at least python 2.7, found : \n%s"%version else: try: import paramiko except ImportError: print "Marvin requires paramiko to be installed" raise setup(name="Marvin", version="0.1.0", description="Marvin - Python client for testing cloudstack", author="Edison Su", author_email="Edison.Su@citrix.com", maintainer="Prasanna Santhanam", maintainer_email="Prasanna.Santhanam@citrix.com", long_description="Marvin is the cloudstack testclient written around the python unittest framework", platforms=("Any",), url="http://jenkins.cloudstack.org:8080/job/marvin", packages=["marvin", "marvin.cloudstackAPI", "marvin.sandbox", "marvin.pymysql", "marvin.pymysql.constants", "marvin.pymysql.tests"], license="LICENSE.txt", install_requires=[ "Python>=2.7", "paramiko", "nose" ], )
Install paramiko as a dependency, don't complain about the requirement
#!/usr/bin/env python # Copyright 2012 Citrix Systems, Inc. Licensed under the # Apache License, Version 2.0 (the "License"); you may not use this # file except in compliance with the License. Citrix Systems, Inc. from distutils.core import setup from sys import version if version < "2.7": print "Marvin needs at least python 2.7, found : \n%s"%version raise setup(name="Marvin", version="0.1.0", description="Marvin - Python client for testing cloudstack", author="Edison Su", author_email="Edison.Su@citrix.com", maintainer="Prasanna Santhanam", maintainer_email="Prasanna.Santhanam@citrix.com", long_description="Marvin is the cloudstack testclient written around the python unittest framework", platforms=("Any",), url="http://jenkins.cloudstack.org:8080/job/marvin", packages=["marvin", "marvin.cloudstackAPI", "marvin.sandbox", "marvin.pymysql", "marvin.pymysql.constants", "marvin.pymysql.tests"], license="LICENSE.txt", install_requires=[ "Python>=2.7", "paramiko", "nose" ], )
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2019 Compassion CH (http://www.compassion.ch) # @author: Emanuel Cino <ecino@compassion.ch> # @author: Théo Nikles <theo.nikles@gmail.com> # # The licence is in the file __manifest__.py # ############################################################################## from odoo import models, fields class PrivacyStatementAgreement(models.Model): _inherit = 'privacy.statement.agreement' origin_signature = fields.Selection( selection_add=[('mobile_app', 'Mobile App Registration')]) def mobile_get_privacy_notice(self, language, **params): return {'PrivacyNotice': self.env['compassion.privacy.statement'] .with_context(lang=language) .sudo().search([], limit=1).text}
FIX language of privacy statement
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2019 Compassion CH (http://www.compassion.ch) # @author: Emanuel Cino <ecino@compassion.ch> # @author: Théo Nikles <theo.nikles@gmail.com> # # The licence is in the file __manifest__.py # ############################################################################## from ..controllers.mobile_app_controller import _get_lang from odoo import models, fields class PrivacyStatementAgreement(models.Model): _inherit = 'privacy.statement.agreement' origin_signature = fields.Selection( selection_add=[('mobile_app', 'Mobile App Registration')]) def mobile_get_privacy_notice(self, **params): lang = _get_lang(self, params) return {'PrivacyNotice': self.env['compassion.privacy.statement'] .with_context(lang=lang) .sudo().search([], limit=1).text}
#!/usr/bin/env python2 from setuptools import setup setup( name='bouncer-plumbing', description='Glue scripts to integrate oonib with mlab-ns (simulator).', version='0.1.dev0', author='LeastAuthority', author_email='consultancy@leastauthority.com', license='FIXME', url='https://github.com/LeastAuthority/ooni-support', scripts = [ './collector-to-mlab/getconfig.py', './mlab-to-bouncer/makeconfig.py', ], data_files = [ ('/home/mlab_ooni/bin/', ['collector-to-mlab/get_ipv4.sh']), ], install_requires=[ 'PyYaml', # BUG: Put a version constraint. ], )
Add ``update_bouncer.sh`` as a data file to the python package for bouncer_plumbing.
#!/usr/bin/env python2 from setuptools import setup setup( name='bouncer-plumbing', description='Glue scripts to integrate oonib with mlab-ns (simulator).', version='0.1.dev0', author='LeastAuthority', author_email='consultancy@leastauthority.com', license='FIXME', url='https://github.com/LeastAuthority/ooni-support', scripts = [ './collector-to-mlab/getconfig.py', './mlab-to-bouncer/makeconfig.py', ], data_files = [ ('/home/mlab_ooni/bin/', ['collector-to-mlab/get_ipv4.sh', 'mlab-to-bouncer/update-bouncer.sh', ], ), ], install_requires=[ 'PyYaml', # BUG: Put a version constraint. ], )
# -*- coding: UTF-8 -*- from boilerpipe.extract import Extractor URL='http://sportv.globo.com/site/eventos/mundial-de-motovelocidade/noticia/2016/06/em-duelo-eletrizante-rossi-vence-marquez-salom-e-homenageado.html' extractor = Extractor(extractor='ArticleExtractor', url=URL) print extractor.getText().encode('utf-8')
Add one more url to example 1
# -*- coding: UTF-8 -*- from boilerpipe.extract import Extractor URL='http://sportv.globo.com/site/eventos/mundial-de-motovelocidade/noticia/2016/06/em-duelo-eletrizante-rossi-vence-marquez-salom-e-homenageado.html' # URL='http://grandepremio.uol.com.br/motogp/noticias/rossi-supera-largada-ruim-vence-duelo-com-marquez-e-chega-a-10-vitoria-na-catalunha-lorenzo-abandona' extractor = Extractor(extractor='ArticleExtractor', url=URL) print extractor.getText().encode('utf-8')
from __future__ import unicode_literals from mopidy import models import spotify def to_track(sp_track): if not sp_track.is_loaded: return if sp_track.error != spotify.ErrorType.OK: return if sp_track.availability != spotify.TrackAvailability.AVAILABLE: return # TODO artists # TODO album # TODO date from album # TODO bitrate return models.Track( uri=sp_track.link.uri, name=sp_track.name, length=sp_track.duration, track_no=sp_track.index) def to_playlist(sp_playlist, folders=None, username=None): if not isinstance(sp_playlist, spotify.Playlist): return if not sp_playlist.is_loaded: return name = sp_playlist.name if name is None: name = 'Starred' # TODO Reverse order of tracks in starred playlists? if folders is not None: name = '/'.join(folders + [name]) if username is not None and sp_playlist.owner.canonical_name != username: name = '%s by %s' % (name, sp_playlist.owner.canonical_name) tracks = [to_track(sp_track) for sp_track in sp_playlist.tracks] tracks = filter(None, tracks) return models.Playlist( uri=sp_playlist.link.uri, name=name, tracks=tracks)
Add TODOs on how to expose non-playable tracks
from __future__ import unicode_literals from mopidy import models import spotify def to_track(sp_track): if not sp_track.is_loaded: return # TODO Return placeholder "[loading]" track? if sp_track.error != spotify.ErrorType.OK: return # TODO Return placeholder "[error]" track? if sp_track.availability != spotify.TrackAvailability.AVAILABLE: return # TODO Return placeholder "[unavailable]" track? # TODO artists # TODO album # TODO date from album # TODO bitrate return models.Track( uri=sp_track.link.uri, name=sp_track.name, length=sp_track.duration, track_no=sp_track.index) def to_playlist(sp_playlist, folders=None, username=None): if not isinstance(sp_playlist, spotify.Playlist): return if not sp_playlist.is_loaded: return # TODO Return placeholder "[loading]" playlist? name = sp_playlist.name if name is None: name = 'Starred' # TODO Reverse order of tracks in starred playlists? if folders is not None: name = '/'.join(folders + [name]) if username is not None and sp_playlist.owner.canonical_name != username: name = '%s by %s' % (name, sp_playlist.owner.canonical_name) tracks = [to_track(sp_track) for sp_track in sp_playlist.tracks] tracks = filter(None, tracks) return models.Playlist( uri=sp_playlist.link.uri, name=name, tracks=tracks)
import unittest import delighted class ClientTest(unittest.TestCase): def test_instantiating_client_requires_api_key(self): self.assertRaises(ValueError, lambda: delighted.Client()) delighted.Client(api_key='abc123')
Make no-api-key test more reliable
import unittest import delighted class ClientTest(unittest.TestCase): def test_instantiating_client_requires_api_key(self): original_api_key = delighted.api_key try: delighted.api_key = None self.assertRaises(ValueError, lambda: delighted.Client()) delighted.Client(api_key='abc123') except: delighted.api_key = original_api_key
#!@PYTHON_EXECUTABLE@ #ckwg +5 # Copyright 2011 by Kitware, Inc. All Rights Reserved. Please refer to # KITWARE_LICENSE.TXT for licensing information, or contact General Counsel, # Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065. def test_import(): try: import vistk.pipeline_util.bake except: test_error("Failed to import the bake module") def test_api_calls(path): from vistk.scoring import scoring_result result = scoring_result.ScoringResult(1, 1, 1) result.hit_count result.miss_count result.truth_count result.percent_detection() result.precision() result + result def main(testname): if testname == 'import': test_import() elif testname == 'api_calls': test_api_calls() else: test_error("No such test '%s'" % testname) if __name__ == '__main__': import os import sys if not len(sys.argv) == 5: test_error("Expected four arguments") sys.exit(1) testname = sys.argv[1] os.chdir(sys.argv[2]) sys.path.append(sys.argv[3]) from vistk.test.test import * try: main(testname) except BaseException as e: test_error("Unexpected exception: %s" % str(e))
Fix the expected argument check in scoring tests
#!@PYTHON_EXECUTABLE@ #ckwg +5 # Copyright 2011 by Kitware, Inc. All Rights Reserved. Please refer to # KITWARE_LICENSE.TXT for licensing information, or contact General Counsel, # Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065. def test_import(): try: import vistk.pipeline_util.bake except: test_error("Failed to import the bake module") def test_api_calls(path): from vistk.scoring import scoring_result result = scoring_result.ScoringResult(1, 1, 1) result.hit_count result.miss_count result.truth_count result.percent_detection() result.precision() result + result def main(testname): if testname == 'import': test_import() elif testname == 'api_calls': test_api_calls() else: test_error("No such test '%s'" % testname) if __name__ == '__main__': import os import sys if not len(sys.argv) == 4: test_error("Expected three arguments") sys.exit(1) testname = sys.argv[1] os.chdir(sys.argv[2]) sys.path.append(sys.argv[3]) from vistk.test.test import * try: main(testname) except BaseException as e: test_error("Unexpected exception: %s" % str(e))
import importlib import pkgutil import sys from collections import OrderedDict from inselect.lib.metadata import MetadataTemplate from inselect.lib.utils import debug_print _library = None def library(): """Returns a list of MetadataTemplate instances """ global _library if not _library: _library = _load_library() return _library def _load_library(): # Import everything inselect.lib.templates that has a 'template' name # that is an instance of MetadataTemplate. # Returns an instance of OrderedDict with items sorted by key. try: templates = importlib.import_module('.lib.templates', 'inselect') except ImportError,e: debug_print(e) else: library = {} for loader, name, is_pkg in pkgutil.iter_modules(templates.__path__): try: pkg = importlib.import_module('{0}.{1}'.format(templates.__name__, name)) except ImportError,e: debug_print(u'Error importing [{0}]: [{1}]'.format(name, e)) else: template = getattr(pkg, 'template', None) if isinstance(template, MetadataTemplate): debug_print('Loaded MetadataTemplate from [{0}]'.format(name)) # TODO Raise if duplicated name library[template.name] = template else: msg = u'Not an instance of MetadataTemplate [{0}]' debug_print(msg.format(name)) return OrderedDict(sorted(library.iteritems()))
Fix metadata template import on OS X
import importlib import pkgutil import sys from collections import OrderedDict from inselect.lib.metadata import MetadataTemplate from inselect.lib.utils import debug_print from inselect.lib.templates import dwc, price if True: _library = {} for template in [p.template for p in (dwc, price)]: _library[template.name] = template _library = OrderedDict(sorted(_library.iteritems())) def library(): return _library else: # More flexible solution that breaks with frozen build on OS X using # PyInstaller _library = None def library(): """Returns a list of MetadataTemplate instances """ global _library if not _library: _library = _load_library() return _library def _load_library(): # Import everything inselect.lib.templates that has a 'template' name # that is an instance of MetadataTemplate. # Returns an instance of OrderedDict with items sorted by key. templates = importlib.import_module('inselect.lib.templates') library = {} for loader, name, is_pkg in pkgutil.iter_modules(templates.__path__): try: pkg = importlib.import_module('{0}.{1}'.format(templates.__name__, name)) except ImportError,e: debug_print(u'Error importing [{0}]: [{1}]'.format(name, e)) else: template = getattr(pkg, 'template', None) if isinstance(template, MetadataTemplate): debug_print('Loaded MetadataTemplate from [{0}]'.format(name)) # TODO Raise if duplicated name library[template.name] = template else: msg = u'Not an instance of MetadataTemplate [{0}]' debug_print(msg.format(name)) return OrderedDict(sorted(library.iteritems()))
from setuptools import setup, find_packages setup( name='lightstep', version='2.2.0', description='LightStep Python OpenTracing Implementation', long_description='', author='LightStep', license='', install_requires=['thrift==0.9.2', 'jsonpickle', 'pytest', 'basictracer>=2.2,<2.3', 'opentracing>=1.2,<1.3'], tests_require=['sphinx', 'sphinx-epytext'], classifiers=[ 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', ], keywords=[ 'opentracing', 'lightstep', 'traceguide', 'tracing', 'microservices', 'distributed' ], packages=find_packages(exclude=['docs*', 'tests*', 'sample*']), )
Remove explicit OT dep; we get it via basictracer
from setuptools import setup, find_packages setup( name='lightstep', version='2.2.0', description='LightStep Python OpenTracing Implementation', long_description='', author='LightStep', license='', install_requires=['thrift==0.9.2', 'jsonpickle', 'pytest', 'basictracer>=2.2,<2.3'], tests_require=['sphinx', 'sphinx-epytext'], classifiers=[ 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', ], keywords=[ 'opentracing', 'lightstep', 'traceguide', 'tracing', 'microservices', 'distributed' ], packages=find_packages(exclude=['docs*', 'tests*', 'sample*']), )
from codecs import open as codecs_open from setuptools import setup, find_packages # Get the long description from the relevant file with codecs_open('README.rst', encoding='utf-8') as f: long_description = f.read() setup(name='borica', version='0.0.1', description=u"Python integration for Borica", long_description=long_description, classifiers=[], keywords='', author=u"Jordan Jambazov", author_email='jordan.jambazov@era.io', url='https://github.com/IOEra/borica', license='MIT', packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), include_package_data=True, zip_safe=False, install_requires=[ 'click', 'pycrypto', ], extras_require={ 'test': ['pytest'], }, entry_points=""" [console_scripts] borica=borica.scripts.cli:cli """ )
Fix issue - load README.md, not .rst
from codecs import open as codecs_open from setuptools import setup, find_packages # Get the long description from the relevant file with codecs_open('README.md', encoding='utf-8') as f: long_description = f.read() setup(name='borica', version='0.0.1', description=u"Python integration for Borica", long_description=long_description, classifiers=[], keywords='', author=u"Jordan Jambazov", author_email='jordan.jambazov@era.io', url='https://github.com/IOEra/borica', license='MIT', packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), include_package_data=True, zip_safe=False, install_requires=[ 'click', 'pycrypto', ], extras_require={ 'test': ['pytest'], }, entry_points=""" [console_scripts] borica=borica.scripts.cli:cli """ )
#!/usr/bin/env python from setuptools import setup setup( name="htmlgen", version="1.1.0", description="HTML 5 Generator", long_description=open("README.rst").read(), author="Sebastian Rittau", author_email="srittau@rittau.biz", url="https://github.com/srittau/python-htmlgen", packages=["htmlgen", "test_htmlgen"], package_data={"htmlgen": ["*.pyi", "py.typed"]}, python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", tests_require=["asserts >= 0.8.0, < 0.9", "typing"], license="MIT", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Text Processing :: Markup :: HTML", ], )
Mark as supporting Python 3.7
#!/usr/bin/env python from setuptools import setup setup( name="htmlgen", version="1.1.0", description="HTML 5 Generator", long_description=open("README.rst").read(), author="Sebastian Rittau", author_email="srittau@rittau.biz", url="https://github.com/srittau/python-htmlgen", packages=["htmlgen", "test_htmlgen"], package_data={"htmlgen": ["*.pyi", "py.typed"]}, python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", tests_require=["asserts >= 0.8.0, < 0.9", "typing"], license="MIT", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Text Processing :: Markup :: HTML", ], )
'''setup script for this module''' from setuptools import setup def readme(): '''pull iin the readme file for the long description''' with open('README.md') as rfile: return rfile.read() setup( name='chamberconnectlibrary', version='2.1.2', description='A library for interfacing with Espec North America chambers', long_description=readme(), url='https://github.com/EspecNorthAmerica/ChamberConnectLibrary', author='Espec North America', author_email='mmetzler@espec.com', license='MIT', packages=['chamberconnectlibrary'], install_requires=['pyserial'], zip_safe=False, keywords='Espec P300 SCP220 F4T F4', include_package_data=True, scripts=['bin/chamberconnectlibrary-test.py'] )
Correct pypi package; file naming was wrong.
'''setup script for this module''' from setuptools import setup def readme(): '''pull iin the readme file for the long description''' with open('README.md') as rfile: return rfile.read() setup( name='chamberconnectlibrary', version='2.1.3', description='A library for interfacing with Espec North America chambers', long_description=readme(), url='https://github.com/EspecNorthAmerica/ChamberConnectLibrary', author='Espec North America', author_email='mmetzler@espec.com', license='MIT', packages=['chamberconnectlibrary'], install_requires=['pyserial'], zip_safe=False, keywords='Espec P300 SCP220 F4T F4', include_package_data=True, scripts=['bin/chamberconnectlibrary-test.py'] )
from threading import Thread, Lock import logging import webview from time import sleep from server import run_server server_lock = Lock() logger = logging.getLogger(__name__) def url_ok(url, port): # Use httplib on Python 2 try: from http.client import HTTPConnection except ImportError: from httplib import HTTPConnection try: conn = HTTPConnection(url, port) conn.request('GET', '/') r = conn.getresponse() return r.status == 200 except: logger.exception('Server not started') return False if __name__ == '__main__': logger.debug('Starting server') t = Thread(target=run_server) t.daemon = True t.start() logger.debug('Checking server') while not url_ok('127.0.0.1', 23948): sleep(1) logger.debug('Server started') window = webview.create_window('My first pywebview application', 'http://127.0.0.1:23948') webview.start(debug=True)
Fix Flask example to allow freezing
import logging import webview from contextlib import redirect_stdout from io import StringIO from threading import Thread, Lock from time import sleep from server import run_server server_lock = Lock() logger = logging.getLogger(__name__) def url_ok(url, port): # Use httplib on Python 2 try: from http.client import HTTPConnection except ImportError: from httplib import HTTPConnection try: conn = HTTPConnection(url, port) conn.request('GET', '/') r = conn.getresponse() return r.status == 200 except: logger.exception('Server not started') return False if __name__ == '__main__': stream = StringIO() with redirect_stdout(stream): logger.debug('Starting server') t = Thread(target=run_server) t.daemon = True t.start() logger.debug('Checking server') while not url_ok('127.0.0.1', 23948): sleep(1) logger.debug('Server started') window = webview.create_window('My first pywebview application', 'http://127.0.0.1:23948') webview.start(debug=True)
"""Implements the Runner interface fo LDA """ from microscopes.common import validator from microscopes.common.rng import rng from microscopes.lda.kernels import lda_crp_gibbs from microscopes.lda.kernels import lda_sample_dispersion class runner(object): """The LDA runner Parameters ---------- defn : ``model_definition``: The structural definition. view : A list of list of serializable objects (the 'documents') latent : ``state``: The initialization state. """ def __init__(self, defn, view, latent, kernel_config='assign'): self._defn = defn self._view = view self._latent = latent def run(self, r, niters=10000): """Run the lda kernel for `niters`, in a single thread. Parameters ---------- r : random state niters : int """ validator.validate_type(r, rng, param_name='r') validator.validate_positive(niters, param_name='niters') for _ in xrange(niters): lda_crp_gibbs(self._latent, r) lda_sample_dispersion(self._latent, r)
Use C++ implementations of hp sampling
"""Implements the Runner interface fo LDA """ from microscopes.common import validator from microscopes.common.rng import rng from microscopes.lda.kernels import lda_crp_gibbs from microscopes.lda.kernels import sample_gamma, sample_alpha class runner(object): """The LDA runner Parameters ---------- defn : ``model_definition``: The structural definition. view : A list of list of serializable objects (the 'documents') latent : ``state``: The initialization state. """ def __init__(self, defn, view, latent, kernel_config='assign'): self._defn = defn self._view = view self._latent = latent def run(self, r, niters=10000): """Run the lda kernel for `niters`, in a single thread. Parameters ---------- r : random state niters : int """ validator.validate_type(r, rng, param_name='r') validator.validate_positive(niters, param_name='niters') for _ in xrange(niters): lda_crp_gibbs(self._latent, r) sample_gamma(self._latent, r, 5, 0.1) sample_alpha(self._latent, r, 5, 0.1)
#!/usr/bin/env python import os from tempdir import TempDir import pytest boto = pytest.importorskip('boto') from simplekv.net.botostore import BotoStore from basic_store import BasicStore from url_store import UrlStore from bucket_manager import boto_credentials, boto_bucket @pytest.fixture(params=boto_credentials, ids=[c['access_key'] for c in boto_credentials]) def credentials(request): return request.param @pytest.yield_fixture() def bucket(credentials): with boto_bucket(**credentials) as bucket: yield bucket class TestBotoStorage(BasicStore, UrlStore): @pytest.fixture(params=['', '/test-prefix']) def prefix(self, request): return request.param @pytest.fixture def store(self, bucket, prefix): return BotoStore(bucket, prefix) def test_get_filename_nonexistant(self, store): # NOTE: boto misbehaves here and tries to erase the target file # the parent tests use /dev/null, which you really should not try # to os.remove! with TempDir() as tmpdir: with pytest.raises(KeyError): store.get_file('nonexistantkey', os.path.join(tmpdir, 'a'))
Use key fixture in boto tests.
#!/usr/bin/env python import os from tempdir import TempDir import pytest boto = pytest.importorskip('boto') from simplekv.net.botostore import BotoStore from basic_store import BasicStore from url_store import UrlStore from bucket_manager import boto_credentials, boto_bucket @pytest.fixture(params=boto_credentials, ids=[c['access_key'] for c in boto_credentials]) def credentials(request): return request.param @pytest.yield_fixture() def bucket(credentials): with boto_bucket(**credentials) as bucket: yield bucket class TestBotoStorage(BasicStore, UrlStore): @pytest.fixture(params=['', '/test-prefix']) def prefix(self, request): return request.param @pytest.fixture def store(self, bucket, prefix): return BotoStore(bucket, prefix) def test_get_filename_nonexistant(self, store, key): # NOTE: boto misbehaves here and tries to erase the target file # the parent tests use /dev/null, which you really should not try # to os.remove! with TempDir() as tmpdir: with pytest.raises(KeyError): store.get_file(key, os.path.join(tmpdir, 'a'))
from kivy.animation import Animation class RelativeAnimation(Animation): """Class that extends the Kivy Animation base class to add relative animation property target values that are calculated when the animation starts.""" def _initialize(self, widget): """Initializes the animation and calculates the property target value based on the current value plus the desired delta. Notes: Do not call the base class _initialize method as this override completely replaces the base class method.""" d = self._widgets[widget.uid] = { 'widget': widget, 'properties': {}, 'time': None} # get current values and calculate target values p = d['properties'] for key, value in self._animated_properties.items(): original_value = getattr(widget, key) if isinstance(original_value, (tuple, list)): original_value = original_value[:] target_value = map(lambda x, y: x + y, original_value, value) elif isinstance(original_value, dict): original_value = original_value.copy() target_value = value else: target_value = original_value + value p[key] = (original_value, target_value) # install clock self._clock_install()
Fix relative animation of list values
from kivy.animation import Animation class RelativeAnimation(Animation): """Class that extends the Kivy Animation base class to add relative animation property target values that are calculated when the animation starts.""" def _initialize(self, widget): """Initializes the animation and calculates the property target value based on the current value plus the desired delta. Notes: Do not call the base class _initialize method as this override completely replaces the base class method.""" d = self._widgets[widget.uid] = { 'widget': widget, 'properties': {}, 'time': None} # get current values and calculate target values p = d['properties'] for key, value in self._animated_properties.items(): original_value = getattr(widget, key) if isinstance(original_value, (tuple, list)): original_value = original_value[:] target_value = [x + y for x, y in zip(original_value, value)] elif isinstance(original_value, dict): original_value = original_value.copy() target_value = value else: target_value = original_value + value p[key] = (original_value, target_value) # install clock self._clock_install()
# coding: utf-8 from decouple import config from findaconf.tests.fake_data import fake_conference, seed def set_app(app, db=False): unset_app(db) app.config['TESTING'] = True app.config['WTF_CSRF_ENABLED'] = False if db: app.config['SQLALCHEMY_DATABASE_URI'] = config( 'DATABASE_URL_TEST', default='sqlite:///' + app.config['BASEDIR'].child('findaconf', 'tests', 'tests.db') ) test_app = app.test_client() if db: db.create_all() seed(app, db) [db.session.add(fake_conference(db)) for i in range(1, 43)] db.session.commit() return test_app def unset_app(db=False): if db: db.session.remove() db.drop_all()
Fix bug that used dev db instead of test db
# coding: utf-8 from decouple import config from findaconf.tests.fake_data import fake_conference, seed def set_app(app, db=False): # set test vars app.config['TESTING'] = True app.config['WTF_CSRF_ENABLED'] = False # set test db if db: app.config['SQLALCHEMY_DATABASE_URI'] = config( 'DATABASE_URL_TEST', default='sqlite:///' + app.config['BASEDIR'].child('findaconf', 'tests', 'tests.db') ) # create test app test_app = app.test_client() # create and feed db tables if db: # start from a clean db db.session.remove() db.drop_all() # create tables and feed them db.create_all() seed(app, db) [db.session.add(fake_conference(db)) for i in range(1, 43)] db.session.commit() # return test app return test_app def unset_app(db=False): if db: db.session.remove() db.drop_all()
#!/usr/bin/python3 from random import randint class Student: def __init__(self, id): self.id = id self.papers = [] def assign_paper(self, paper): self.papers.append(paper) def __str__(self): return str(self.id) + ": " + str(self.papers) class Paper: def __init__(self, id): self.id = id def create_bundle_graph(n, k): students = [Student(x + 1) for x in range(n)] papers = [Paper(x + 1) for x in range(n)] while True: for i in range(k): inavai_pap = set() for j in range(len(students)): paper = None while True: paper = papers[randint(0, len(papers) - 1)] if paper.id == students[j].id: continue if paper.id not in inavai_pap and paper.id not in students[j].papers: inavai_pap.add(paper.id) break students[j].assign_paper(paper.id) # make sure not more than one paper is assigned to every two people success = True for i in range(n): for j in range(i + 1, n): cnt = 0 for l in range(k): if students[i].papers[l] == students[j].papers[l]: cnt = cnt + 1 if cnt >= 2: success = False break if not success: break if not success: break if success: break return students
Update validation check for paper bundles.
#!/usr/bin/python3 from random import randint class Student: def __init__(self, id): self.id = id self.papers = [] def assign_paper(self, paper): self.papers.append(paper) def __str__(self): return str(self.id) + ": " + str(self.papers) class Paper: def __init__(self, id): self.id = id def create_bundle_graph(n, k): students = [Student(x + 1) for x in range(n)] papers = [Paper(x + 1) for x in range(n)] while True: for i in range(k): inavai_pap = set() for j in range(len(students)): paper = None while True: paper = papers[randint(0, len(papers) - 1)] if paper.id == students[j].id: continue if paper.id not in inavai_pap and paper.id not in students[j].papers: inavai_pap.add(paper.id) break students[j].assign_paper(paper.id) # make sure not more than one paper is assigned to every two people success = True for i in range(n): for j in range(i + 1, n): cnt = len(set(students[i].papers).intersection(set(students[j].papers))) if cnt >= 2: success = False break if not success: break if success: break return students