commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
7c9a4b72f59d902ab5daa43b7675641a2e81ebb7
Switch to "templates" terminology for VM images/templates.
xblock_skytap/skytap.py
xblock_skytap/skytap.py
""" """ # Imports ########################################################### from __future__ import absolute_import import skytap as skytap_library from xblock.core import XBlock from xblock.fields import Scope, String from xblock.fragment import Fragment from xblockutils.resources import ResourceLoader from xblockutils.studio_editable import StudioEditableXBlockMixin from .default_data import DEFAULT_DATA from .utils import _ # Globals ########################################################### loader = ResourceLoader(__name__) # Functions ######################################################### def get_projects(): """ """ return ('Dummy project A', 'Dummy project B', 'Dummy project C') def get_vm_images(): """ """ return ('Dummy image A', 'Dummy image B', 'Dummy image C') def get_vms(): """ """ return ('Dummy VM A', 'Dummy VM B', 'Dummy VM C') def get_subscription_types(): """ """ return ('All', 'Dummy subscription A', 'Dummy subscription B', 'Dummy subscription C') # Classes ########################################################### class SkytapXBlock(StudioEditableXBlockMixin, XBlock): """ """ display_name = String( display_name=_("Title"), help=_("The title of this problem. Displayed to learners as a tooltip in the navigation bar."), scope=Scope.settings, default=_("Skytap XBlock"), ) project = String( display_name=_("Project"), help=_("Skytap project to pull VM images/templates from."), scope=Scope.settings, values=get_projects, ) vm_images = String( display_name=_("VM images/Templates"), help=_("List of VM images/templates belonging to this exercise environment."), scope=Scope.settings, values=get_vm_images, ) vms = String( display_name=_("VMs"), help=_("List of VMs to start for selected template."), scope=Scope.settings, values=get_vms, ) subscription_types = String( display_name=_("Subscription types"), help=_("List of subscription types that may access this exercise environment."), scope=Scope.settings, values=get_subscription_types, ) organization_rules = String( display_name=_("Organization rules"), help=_( "Rules that define custom behavior for specific organizations. " "To apply a rule to an organization, add one or more identifiers below the rule name." ), scope=Scope.settings, default=DEFAULT_DATA, multiline_editor=True, ) editable_fields = ("display_name", "project", "vm_images", "subscription_types", "organization_rules") def student_view(self, context): """ """ context = context.copy() if context else {} users = skytap_library.Users() context['users'] = users.json() fragment = Fragment() fragment.add_content(loader.render_template("templates/skytap.html", context)) fragment.add_javascript_url( self.runtime.local_resource_url(self, "public/js/src/skytap.js") ) fragment.initialize_js("SkytapXBlock") return fragment
Python
0
@@ -725,23 +725,23 @@ def get_ -vm_imag +templat es():%0A @@ -773,20 +773,23 @@ ('Dummy -imag +templat e A', 'D @@ -793,20 +793,23 @@ 'Dummy -imag +templat e B', 'D @@ -813,20 +813,23 @@ 'Dummy -imag +templat e C')%0A%0A%0A @@ -1530,26 +1530,16 @@ to pull -VM images/ template @@ -1619,23 +1619,23 @@ )%0A%0A -vm_imag +templat es = Str @@ -1663,26 +1663,16 @@ name=_(%22 -VM images/ Template @@ -1704,18 +1704,8 @@ of -VM images/ temp @@ -1802,23 +1802,23 @@ ues=get_ -vm_imag +templat es,%0A @@ -2672,15 +2672,15 @@ %22, %22 -vm_imag +templat es%22,
a76a42b195e3f729b7f3f6ef8389e76a72728c62
add fluentd-elasticsearch addon
src/seedbox/config_renderer/charts.py
src/seedbox/config_renderer/charts.py
import io import os import re import tarfile import requests from jinja2 import Environment from seedbox import config NOT_SPECIFIED = object() jinja_var_env = Environment(autoescape=False) jinja_env = Environment(keep_trailing_newline=True, autoescape=False) class Addon: base_url = 'https://github.com/kubernetes/kubernetes/raw/release-{version}/cluster/addons/{path}/' encoding = 'utf-8' def __init__(self, name, version, manifest_files, vars_map=None, is_salt_template=False, path=None, base_url=None, notes=None): if vars_map is None: vars_map = {} if path is None: path = name if base_url is None: base_url = self.base_url self.name = name self.version = version self.manifest_files = [] for fname in manifest_files: fname = base_url.format(path=path, version=self.version) + fname self.manifest_files.append(fname) self.vars_map = vars_map self.is_salt_template = is_salt_template self.notes = notes def render_files(self, cluster): yield 'Chart.yaml', self.render_chart_yaml() yield 'values.yaml', self.render_values_yaml(cluster) for url in self.manifest_files: filename, content = self.render_manifest_file(cluster, url) yield os.path.join('templates', filename), content if self.notes: yield os.path.join('templates', 'NOTES.txt'), self.notes.encode(self.encoding) def render_chart_yaml(self): return 'name: {}\nversion: {}\n'.format(self.name, self.version).encode(self.encoding) def render_values_yaml(self, cluster): return ''.join('{}: {}\n'.format(var_name, jinja_var_env.from_string(var_tpl).render({ 'config': config, 'cluster': cluster, })) for var_name, var_tpl in self.vars_map.items()).encode(self.encoding) def render_manifest_file(self, cluster, url): pillar = SaltPillarEmulator(cluster) resp = requests.get(url) resp.raise_for_status() content = resp.content if self.is_salt_template: t = jinja_env.from_string(content.decode(self.encoding)) content = t.render({ 'pillar': pillar, }).encode(self.encoding) else: for var_name in self.vars_map.keys(): var_name = var_name.encode(self.encoding) content = content.replace(b'$' + var_name, b'{{ .Values.%s }}' % var_name) filename = os.path.basename(url) m = re.match(r'(.*\.yaml).*', filename) if m: filename = m.group(1) return filename, content class SaltPillarEmulator: def __init__(self, cluster): self.cluster = cluster def get(self, var_name, default=NOT_SPECIFIED): try: return getattr(self, '_' + var_name) except AttributeError: if default is NOT_SPECIFIED: raise else: return default @property def _num_nodes(self): return self.cluster.nodes.count() dashboard_notes = '''1. Start kube proxy: $ kubectl proxy 2. Open dashboard in a browser: http://localhost:8001/ui/ ''' addons = { 'dns': { '1.5': Addon('dns', '1.5', [ 'skydns-rc.yaml.sed', 'skydns-svc.yaml.sed', ], { 'DNS_DOMAIN': '{{ config.k8s_cluster_domain }}', 'DNS_SERVER_IP': '{{ cluster.k8s_dns_service_ip }}', }), '1.6': Addon('dns', '1.6', [ 'kubedns-cm.yaml', 'kubedns-sa.yaml', 'kubedns-controller.yaml.sed', 'kubedns-svc.yaml.sed', ], { 'DNS_DOMAIN': '{{ config.k8s_cluster_domain }}', 'DNS_SERVER_IP': '{{ cluster.k8s_dns_service_ip }}', }), }, 'dns-horizontal-autoscaler': { '1.5': Addon('dns-horizontal-autoscaler', '1.5', ['dns-horizontal-autoscaler.yaml']), '1.6': Addon('dns-horizontal-autoscaler', '1.6', ['dns-horizontal-autoscaler.yaml']), }, 'dashboard': { '1.5': Addon('dashboard', '1.5', [ 'dashboard-controller.yaml', 'dashboard-service.yaml', ], notes=dashboard_notes), '1.6': Addon('dashboard', '1.6', [ 'dashboard-controller.yaml', 'dashboard-service.yaml', ], notes=dashboard_notes), }, 'heapster': { '1.5': Addon('heapster', '1.5', [ 'heapster-controller.yaml', 'heapster-service.yaml', ], is_salt_template=True, path='cluster-monitoring/standalone'), '1.6': Addon('heapster', '1.6', [ 'heapster-controller.yaml', 'heapster-service.yaml', ], is_salt_template=True, path='cluster-monitoring/standalone'), }, } class TarFile(tarfile.TarFile): def adddata(self, path, data): info = tarfile.TarInfo(path) info.size = len(data) self.addfile(info, io.BytesIO(data)) def render_addon_tgz(cluster, addon): tgz_fp = io.BytesIO() with TarFile.open(fileobj=tgz_fp, mode='w:gz') as tgz: for path, content in addon.render_files(cluster): tgz.adddata(os.path.join(addon.name, path), content) return tgz_fp.getvalue()
Python
0
@@ -4875,16 +4875,426 @@ %0A %7D,%0A + 'fluentd-elasticsearch': %7B%0A '1.6': Addon('fluentd-elasticsearch', '1.6', %5B%0A 'es-controller.yaml',%0A 'es-service.yaml',%0A 'fluentd-es-ds.yaml',%0A 'kibana-controller.yaml',%0A 'kibana-service.yaml',%0A %5D, notes='Documentation: '%0A 'https://kubernetes.io/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana/'),%0A %7D,%0A %7D%0A%0A%0Aclas
87d792fda8763f49d83ce274015f3a436a0c89cc
send message after stuff is started
dusty/commands/run.py
dusty/commands/run.py
from ..compiler import (compose as compose_compiler, nginx as nginx_compiler, port_spec as port_spec_compiler, spec_assembler) from ..systems import compose, hosts, nginx, virtualbox def start_local_env(): """ This command will use the compilers to get compose specs will pass those specs to the systems that need them. Those systems will in turn launch the services needed to make the local environment go""" assembled_spec = spec_assembler.get_assembled_specs() port_spec = port_spec_compiler.get_port_spec_document(assembled_spec) nginx_config = nginx_compiler.get_nginx_configuration_spec(port_spec) compose_config = compose_compiler.get_compose_dict(assembled_spec, port_spec) hosts.update_hosts_file_from_port_spec(port_spec) virtualbox.update_virtualbox_port_forwarding_from_port_spec(port_spec) nginx.update_nginx_from_config(nginx_config) compose.update_running_containers_from_spec(compose_config)
Python
0
@@ -968,16 +968,67 @@ compose_config)%0A +%0A yield %22Your local environment is now started%22%0A
7f2ac925b2343e57ad7f4a6d79ee24e14c8f4d78
Add a Bazel rule assignment_notebook().
exercises/defs.bzl
exercises/defs.bzl
# TODO(salikh): Implement the automatic tar rules too def assignment_notebook_macro( name, srcs, language = None, visibility = ["//visibility:private"]): """ Defines a rule for student notebook and autograder generation from a master notebook. Arguments: name: srcs: the file name of the input notebook should end in '-master.ipynb'. """ language_opt = "" if language: language_opt = " --language=" + language native.genrule( name = name + "_student", srcs = srcs, outs = [name + '-student.ipynb'], cmd = """$(location //go/cmd/assign) --input="$<" --output="$@" --preamble=$(location //exercises:preamble.py) --command=student""" + language_opt, tools = [ "//go/cmd/assign", "//exercises:preamble.py", ], ) autograder_output = name + '-autograder' native.genrule( name = name + "_autograder", srcs = srcs, outs = [autograder_output], cmd = """$(location //go/cmd/assign) --input="$<" --output="$@" --command=autograder""" + language_opt, tools = [ "//go/cmd/assign", ], )
Python
0
@@ -1048,20 +1048,2308 @@ /assign%22,%0A%09%5D,%0A )%0A +%0Adef _assignment_notebook_impl(ctx):%0A print(%22src = %22, ctx.attr.src)%0A print(%22src.path = %22, ctx.file.src.path)%0A outs = %5B%5D%0A languages = ctx.attr.languages%0A inputs = %5Bctx.file.src%5D%0A preamble_opt = %22%22%0A if ctx.file.preamble:%0A preamble_opt = %22 --preamble='%22 + ctx.file.preamble.path + %22'%22%0A inputs.append(ctx.file.preamble)%0A if len(languages) == 0:%0A # Force the language-agnostic notebook generation by default.%0A languages = %5B%22%22%5D%0A for lang in languages:%0A outfile = ctx.label.name + (%22-%22 + lang if lang else %22%22) + %22-student.ipynb%22%0A out = ctx.actions.declare_file(outfile)%0A outs.append(out)%0A language_opt = %22%22%0A if lang:%0A language_opt = %22 -language='%22 + lang + %22'%22%0A print(%22 command = %22 + ctx.executable._assign.path + %22 --command=student --input='%22 + ctx.file.src.path + %22'%22 + %22 --output='%22 + out.path + %22'%22 + language_opt + preamble_opt)%0A ctx.actions.run_shell(%0A inputs = inputs,%0A outputs = %5Bout%5D,%0A tools = %5Bctx.executable._assign%5D,%0A progress_message = %22Running %25s%22 %25 ctx.executable._assign.path,%0A command = ctx.executable._assign.path + %22 --command=student --input='%22 + ctx.file.src.path + %22'%22 + %22 --output='%22 + out.path + %22'%22 + language_opt + preamble_opt,%0A )%0A return %5BDefaultInfo(files = depset(outs))%5D%0A%0A# Defines a rule for student notebook and autograder%0A# generation from a master notebook.%0A#%0A# Arguments:%0A# name:%0Aassignment_notebook = rule(%0A implementation = _assignment_notebook_impl,%0A attrs = %7B%0A # Specifies the list of languages to generate student notebooks.%0A # If omitted, defaults to empty list, which means that a%0A # single language-agnostic notebook will be generated.%0A # It is also possible to generate language-agnostic notebook%0A # (skipping filtering by language) by adding an empty string%0A # value to languages.%0A %22languages%22: attr.string_list(default=%5B%5D, mandatory=False),%0A # The file name of the input notebook.%0A %22src%22: attr.label(%0A%09mandatory=True,%0A%09allow_single_file=True),%0A # If present, specifies the label of the preamble file.%0A %22preamble%22: attr.label(%0A%09default=None,%0A%09mandatory=False,%0A allow_single_file=True),%0A %22_assign%22: attr.label(%0A%09default = Label(%22//go/cmd/assign%22),%0A%09allow_single_file = True,%0A%09executable = True,%0A%09cfg = %22host%22,%0A ),%0A %7D,%0A)%0A%0A
36fde4f2795d95d18a2862ba1d8d343f1ebe3185
add interfaces_ip filter
library/napalm_get_facts.py
library/napalm_get_facts.py
#!/usr/bin/python # -*- coding: utf-8 -*- """ (c) 2016 Elisa Jasinska <elisa@bigwaveit.org> This file is part of Ansible Ansible is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Ansible is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Ansible. If not, see <http://www.gnu.org/licenses/>. """ DOCUMENTATION = ''' --- module: napalm_get_facts author: "Elisa Jasinska (@fooelisa)" version_added: "2.1" short_description: "Gathers facts from a network device via napalm" description: - "Gathers facts from a network device via the Python module napalm" requirements: - napalm options: hostname: description: - IP or FQDN of the device you want to connect to required: True username: description: - Username required: True password: description: - Password required: True dev_os: description: - OS of the device required: True choices: ['eos', 'junos', 'iosxr', 'fortios', 'ibm', 'ios', 'nxos', 'panos'] timeout: description: - Time in seconds to wait for the device to respond required: False default: 60 optional_args: description: - Dictionary of additional arguments passed to underlying driver required: False default: None filter: description: - A list of facts to retreive from a device and provided though C(ansible_facts) The following facts are available- facts, environment, interfaces, interfaces_counter, bgp_config, bgp_neighbors, bgp_neighbors_detail, lldp_neighbors, lldp_neighbors_detail Note- not all getters are implemented on all supported devcie types required: False default: ['facts'] ''' EXAMPLES = ''' - name: get facts from device napalm_get_facts: hostname={{ inventory_hostname }} username={{ user }} dev_os={{ os }} password={{ passwd }} filter=['facts'] register: result - name: print data debug: var=result ''' RETURN = ''' changed: description: "whether the command has been executed on the device" returned: always type: bool sample: True ansible_facts: description: "Facts gathered on the device provided via C(ansible_facts)" returned: certain keys are returned depending on filter type: dict ''' try: from napalm_base import get_network_driver except ImportError: napalm_found = False else: napalm_found = True def main(): module = AnsibleModule( argument_spec=dict( hostname=dict(type='str', required=True), username=dict(type='str', required=True), password=dict(type='str', required=True, no_log=True), dev_os=dict(type='str', required=True, choices=['eos', 'junos', 'iosxr', 'fortios', 'ibm', 'ios', 'nxos', 'panos']), timeout=dict(type='int', required=False, default=60), optional_args=dict(type='dict', required=False, default=None), filter=dict(type='list', required=False, default=['facts']), ), supports_check_mode=True ) if not napalm_found: module.fail_json(msg="the python module napalm is required") hostname = module.params['hostname'] username = module.params['username'] dev_os = module.params['dev_os'] password = module.params['password'] timeout = module.params['timeout'] filter_list = module.params['filter'] if module.params['optional_args'] is None: optional_args = {} else: optional_args = module.params['optional_args'] # open device connection try: network_driver = get_network_driver(dev_os) device = network_driver(hostname=hostname, username=username, password=password, timeout=timeout, optional_args=optional_args) device.open() except Exception, e: module.fail_json(msg="cannot connect to device: " + str(e)) # retreive data from device facts = {} try: for filter in filter_list: if filter == 'facts': result = device.get_facts() facts['facts'] = result elif filter == 'interfaces': result = device.get_interfaces() facts['interfaces'] = result elif filter == 'interfaces_counter': result = device.get_interfaces_counter() facts['interfaces_counter'] = result elif filter == 'bgp_config': result = device.get_bgp_config() facts['bgp_config'] = result elif filter == 'bgp_neighbors': result = device.get_bgp_neighbors() facts['bgp_neighbors'] = result elif filter == 'bgp_neighbors_detail': result = device.get_bgp_neighbors_detail() facts['bgp_neighbors_detail'] = result elif filter == 'environment': result = device.get_environment() facts['environment'] = result elif filter == 'lldp_neighbors': result = device.get_lldp_neighbors() facts['lldp_neighbors'] = result elif filter == 'lldp_neighbors_detail': result = device.get_lldp_neighbors_detail() facts['lldp_neighbors_detail'] = result else: module.fail_json(msg="filter not recognized: " + filter) except Exception, e: module.fail_json(msg="cannot retrieve device data: " + str(e)) # close device connection try: device.close() except Exception, e: module.fail_json(msg="cannot close device connection: " + str(e)) module.exit_json(ansible_facts=facts) # standard ansible module imports from ansible.module_utils.basic import * if __name__ == '__main__': main()
Python
0.000001
@@ -4878,32 +4878,176 @@ aces'%5D = result%0A + elif filter == 'interfaces_ip':%0A result = device.get_interfaces_ip()%0A facts%5B'interfaces_ip'%5D = result%0A elif
fd92c0b2964bce5d56b9bf41e84bfde24fec0b78
raise default post limit to 25q
djangofeeds/managers.py
djangofeeds/managers.py
from datetime import timedelta, datetime from django.db import models from django.db.models.query import QuerySet from django.core.exceptions import MultipleObjectsReturned from djangofeeds.utils import truncate_field_data """ .. data:: DEFAULT_POST_LIMIT The default limit of number of posts to keep in a feed. Default is 5 posts. """ DEFAULT_POST_LIMIT = 5 def update_with_dict(obj, fields): """Update and save a model from the values of a :class:`dict`.""" set_value = lambda (name, val): setattr(obj, name, val) map(set_value, fields.items()) obj.save() return obj class ExtendedQuerySet(QuerySet): def update_or_create(self, **kwargs): obj, created = self.get_or_create(**kwargs) if not created: fields = dict(kwargs.pop("defaults", {})) fields.update(kwargs) update_with_dict(obj, fields) return obj def since(self, interval): """Return all the feeds refreshed since a specified amount of seconds.""" threshold = datetime.now() - timedelta(seconds=interval) return self.filter(date_last_refresh__lt=threshold) def ratio(self, min=None, max=None): """Select feeds based on ratio. :param min: Don't include feeds with a ratio lower than this. :param max: Don't include feeds with a ratio higher than this. """ query = {} if min is not None: query["ratio__gt"] = min if max is not None: query["ratio__lt"] = max return self.filter(**query) def frequency(self, min=None, max=None): """Select feeds based on update frequency. :param min: Don't include feeds with a frequency lower than this. :param max: Don't include feeds with a frequency higher than this. """ query = {} if min is not None: query["freq__gt"] = min if max is not None: query["freq__lt"] = max return self.filter(**query) class ExtendedManager(models.Manager): """Manager supporting :meth:`update_or_create`.""" def get_query_set(self): return ExtendedQuerySet(self.model) def update_or_create(self, **kwargs): return self.get_query_set().update_or_create(**kwargs) class FeedManager(ExtendedManager): """Manager for :class:`djangofeeds.models.Feed`.""" def since(self, interval): return self.get_query_set().since(interval) def ratio(self, *args, **kwargs): return self.get_query_set().ratio(*args, **kwargs) def frequency(self, *args, **kwargs): return self.get_query_set().frequency(*args, **kwargs) class PostManager(ExtendedManager): """Manager class for Posts""" def all_by_order(self, limit=DEFAULT_POST_LIMIT): """Get feeds using the default sort order.""" ordering = self.model._meta.ordering return self.all().order_by(*ordering)[:limit] def update_or_create(self, feed_obj, **fields): """Update post with new values.""" super_update = super(PostManager, self).update_or_create defaults = truncate_field_data(self.model, fields) try: return super_update(guid=fields["guid"], feed=feed_obj, defaults=defaults) except MultipleObjectsReturned: self.filter(guid=fields["guid"], feed=feed_obj).delete() super_update(guid=fields["guid"], feed=feed_obj, defaults=defaults) class CategoryManager(ExtendedManager): pass class EnclosureManager(ExtendedManager): pass
Python
0
@@ -363,16 +363,17 @@ LIMIT = +2 5%0A%0A%0Adef
27e67bf1311512e83978bb82606ef73127efece1
version number
emcee/__init__.py
emcee/__init__.py
# -*- coding: utf-8 -*- from __future__ import print_function, absolute_import __version__ = "3.0.0.dev0" __bibtex__ = """ @article{emcee, author = {{Foreman-Mackey}, D. and {Hogg}, D.~W. and {Lang}, D. and {Goodman}, J.}, title = {emcee: The MCMC Hammer}, journal = {PASP}, year = 2013, volume = 125, pages = {306-312}, eprint = {1202.3665}, doi = {10.1086/670067} } """ try: __EMCEE_SETUP__ except NameError: __EMCEE_SETUP__ = False if not __EMCEE_SETUP__: from .ensemble import EnsembleSampler from . import moves from . import autocorr from . import backends __all__ = ["EnsembleSampler", "moves", "autocorr", "backends"]
Python
0.000291
@@ -96,15 +96,11 @@ %223.0 -.0.dev0 +rc1 %22%0A__
f274f927d600989db1d485212d116166695e6edd
Use keyword arguments for readability
scell/core.py
scell/core.py
""" scell.core ~~~~~~~~~~ Provides abstractions over lower level APIs and file objects and their interests. """ from select import select as _select from collections import namedtuple def select(rl, wl, timeout=None): """ Returns the file objects ready for reading/writing from the read-list (*rl*) and write-list (*wl*), subject to *timeout* in seconds. :param rl: Objects interested in readability. :param wl: Objects interested in writability. :param timeout: Maximum blocking time in seconds, *None* for no timeout. """ if not (rl or wl): return [], [] readers, writers, _ = _select(rl, wl, (), timeout) return readers, writers class Monitored(namedtuple('_Monitored', 'fp,wants_read,wants_write,callback')): """ Represents the interests of a file handle *fp*, and whether it *wants_read* and or *wants_write*, as well as an attached *callback*. """ __slots__ = () class Event(namedtuple('_Event', 'monitored,readable,writable,fp,callback,ready')): """ Represents the readability or writability of a *monitored* file object. """ __slots__ = () def __new__(cls, monitored, readable, writable): ready = ( readable >= monitored.wants_read and writable >= monitored.wants_write ) return super(Event, cls).__new__( cls, monitored, readable, writable, monitored.fp, monitored.callback, ready, )
Python
0.000001
@@ -1478,32 +1478,35 @@ le,%0A +fp= monitored.fp,%0A @@ -1507,32 +1507,41 @@ fp,%0A +callback= monitored.callba @@ -1552,24 +1552,30 @@ +ready= ready,%0A
e7cce08f32516bc8b15df7eee0c285eebe795cab
Make it easier to filter on multiple field values
explorer/search.py
explorer/search.py
from . import config from .document import Document import requests from time import time def perform_search(**params): response = requests.get( config.GOVUK_SEARCH_API, params=params, auth=config.AUTH, ) return response.json() def fetch_documents(scope): documents = perform_search(**fetch_document_args(scope)) facets = {} for field in Document.FACET_FIELDS: start = time() facet_results = perform_search(**fetch_facet_args(scope, field)) facets[field] = facet_results["facets"][field] print "Fetched %s facet in %fs" % (field, time() - start) return present_documents(documents, facets) def fetch_lots_of_documents(scope, max_documents): fetched = 0 search_args = fetch_document_args(scope) while fetched < max_documents: search_args["start"] = fetched documents = perform_search(**search_args).get("results", []) if len(documents) == 0: break for document in documents: yield Document(document) fetched += 1 def fetch_document_args(scope): args = scope.search_args() args["count"] = 1000 args["fields"] = ",".join(Document.DISPLAY_FIELDS) return args def fetch_facet_args(scope, facet_field): args = scope.search_args() args["count"] = 0 args["facet_" + facet_field] = "1000,scope:all_filters" return args def present_documents(documents, facets): return { "count": documents["total"], "documents": [Document(document) for document in documents["results"] ], "facets": facets, }
Python
0.000001
@@ -1384,19 +1384,28 @@ ope: -all +exclude_field _filter -s %22%0A
82bb3688354334f61cf23a7c380506528d059a0f
Add a configuration variable for the email 'From' field
comments.py
comments.py
#!/usr/bin/env python """ CGI script that takes takes a POST to the address ./add_comment (normally from a comment form) and sends that comment formatted in yaml to the email address set in COMMENT_EMAIL. The resulting yaml file is meant to be used with Jekyll::StaticComments. See https://github.com/mpalmer/jekyll-static-comments/ http://theshed.hezmatt.org/jekyll-static-comments/ """ import web import cgi import time import os, os.path import smtplib from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart # config COMMENT_EMAIL = "blog@example.com" MAX_SIZE = 1024 MAX_SIZE_COMMENT = 102400 URLS = ( '/add_comment', 'CommentHandler' ) def is_test(): """ Returns true iff the environment variable WEBPY_ENV is set to "test". """ webpy_env = os.environ.get('WEBPY_ENV', '') return webpy_env == 'test' class CommentHandler: """ Class meant to be used by web.py. Handles POST requests in the POST method """ DATE_FORMAT = "%Y-%m-%d %H:%M:%S" ACK_MSG = """ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> <html> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8" /> <meta http-equiv="refresh" content="5;url=%(return_url)s" /> <title>Comment Received</title> </head> <body> <h1>Comment Received</h1> <p>Thank you for your comment. It will be reviewed and published shortly.</p> <p>You are now returning to the page you were on. Click the link if you are not redirected automatically.</p> <p><a href="%(return_url)s">%(return_url)s</a></p> </body> </html> """ ERROR_MSG = """ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> <html> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8" /> <title>Problem with input</title> </head> <body> <h1>Error!</h1> <p>The following error was detected: <br/> %(error_msg) </p> </body> </html> """ SMTPCONF = { 'host': 'localhost', 'port': 1025 } def POST(self): """ Handle a POST request: gets its content, transforms it into yaml, email the result and returns a confirmation page. """ try: input_ = web.input() except ValueError: referer = web.ctx.env.get('HTTP_REFERER', '/') return self.ERROR_MSG % { 'error_msg': "Input is too big, you should write less! Hit the" " back button and try again.", 'return_url': referer } comment = { 'author_ip': web.ctx.ip, 'date_gmt': time.strftime(self.DATE_FORMAT, time.gmtime()), 'date': time.strftime(self.DATE_FORMAT, time.localtime()) } comment.update(self._input_data_iterator(input_)) self._email_comment(comment, COMMENT_EMAIL) web.header('Content-Type', 'text/html') return self.ACK_MSG % {'return_url': input_.return_url} @staticmethod def _sanitize_field(data_, max_size=None): """ Sanitize a string for use as a yaml value. """ if max_size is not None: data = data_[:max_size] else: data = data_ return data.replace("'", "").replace('\n', '\n ') def _input_data_iterator(self, input_): """ Transforms the POST input as returned by web.input() into a dictionary. This only keeps the keys that we are interested, truncates values to a maximum size and sanitizes the values. """ keys = ( 'author', 'author_email', 'author_url', 'content', 'post_id') for key in keys: if hasattr(input_, key): max_size = MAX_SIZE if key == 'content': max_size = MAX_SIZE_COMMENT value = self._sanitize_field(getattr(input_, key), max_size) yield (key, value) @staticmethod def _yml_from_dict(dict_): """ Generates a yaml string from a dict """ yaml = u"" for item in dict_.iteritems(): yaml += u"%s: '%s'\n" % item return yaml @staticmethod def _file_name(comment): """ Generates a suitable file name for a comment """ return comment['date_gmt'].replace(' ', '_') + '.yaml' def _email_comment(self, comment, email): """ Send a comment by email """ comment_string = self._yml_from_dict(comment) comment_string = comment_string.encode('UTF-8') comment_attachment = MIMEText(comment_string, #_subtype='x-yaml', _charset='UTF-8') comment_attachment.add_header('Content-Disposition', 'inline', filename=self._file_name(comment)) message = MIMEMultipart() message['Subject'] = "[blogcomment] New comment from %s on %s" % ( comment['author_email'], comment['post_id']) message['From'] = 'noreply@emont.org' message['To'] = email message.attach(MIMEText("A new comment has been posted!\n")) message.attach(comment_attachment) if not is_test(): smtp_connection = smtplib.SMTP(**self.SMTPCONF) smtp_connection.sendmail(email, [email], message.as_string()) smtp_connection.quit() # limit the size of POST requests to 10kb cgi.maxlen = MAX_SIZE_COMMENT app = web.application(URLS, globals()) if (not is_test()) and __name__ == "__main__": app.run()
Python
0.99993
@@ -580,16 +580,51 @@ le.com%22%0A +FROM_EMAIL = %22noreply@example.com%22%0A MAX_SIZE @@ -5421,27 +5421,18 @@ %5D = -'noreply@emont.org' +FROM_EMAIL %0A
10d0b7c452c8d9d5893cfe612e0beaa738f61628
Add to template builtins only if add_to_buitlins is available (Django <= 1.8)
easy_pjax/__init__.py
easy_pjax/__init__.py
#-*- coding: utf-8 -*- """ Register filter so it is available for use in the `extends` template tag (The `extends` tag must come first in a template, so regular `load` is not an option). """ from __future__ import absolute_import, division, print_function, unicode_literals __version__ = "1.2.0" try: from django.template import add_to_builtins except ImportError: # import path changed in 1.8 from django.template.base import add_to_builtins add_to_builtins("easy_pjax.templatetags.pjax_tags")
Python
0
@@ -293,16 +293,42 @@ 1.2.0%22%0A%0A +has_add_to_builtins = True %0Atry:%0A @@ -393,16 +393,29 @@ tError:%0A + try:%0A # im @@ -439,24 +439,28 @@ in 1.8%0A + + from django. @@ -496,17 +496,105 @@ uiltins%0A -%0A + except ImportError:%0A has_add_to_builtins = False%0A%0Aif has_add_to_builtins:%0A add_to_b
5b33794ce7a3c40c4fe1912be78d628ac5673536
sort WEO issues in chronological order
dlstats/fetchers/IMF.py
dlstats/fetchers/IMF.py
# -*- coding: utf-8 -*- from dlstats.fetchers._skeleton import Skeleton, Category, Series, Dataset, Provider, CodeDict from dlstats.fetchers.make_elastic_index import ElasticIndex import urllib import xlrd import csv import codecs from datetime import datetime import pandas import pprint from collections import OrderedDict from re import match from time import sleep import requests from lxml import etree class IMF(Skeleton): def __init__(self): super().__init__(provider_name='IMF') self.provider_name = 'IMF' self.provider = Provider(name=self.provider_name,website='http://www.imf.org/') def upsert_dataset(self, datasetCode): if datasetCode=='WEO': for u in self.weo_urls: self.upsert_weo_issue(u,datasetCode) es = ElasticIndex() # ???? es.make_index(self.provider_name,datasetCode) # ???? else: raise Exception("This dataset is unknown" + dataCode) @property def weo_urls(self): """Procedure for fetching the list of links to the Excel files from the WEO database :returns: list --- list of links >>> l = get_weo_links() >>> print(l[:4]) ['http://www.imf.org/external/pubs/ft/weo/2015/01/weodata/WEOApr2015all.xls', 'http://www.imf.org/external/pubs/ft/weo/2014/02/weodata/WEOOct2014all.xls', 'http://www.imf.org/external/pubs/ft/weo/2014/01/weodata/WEOApr2014all.xls', 'http://www.imf.org/external/pubs/ft/weo/2013/02/weodata/WEOOct2013all.xls'] """ #We hardcode these links because their formats are different. output = ['http://www.imf.org/external/pubs/ft/weo/2006/02/data/WEOSep2006all.xls', 'http://www.imf.org/external/pubs/ft/weo/2007/01/data/WEOApr2007all.xls', 'http://www.imf.org/external/pubs/ft/weo/2007/02/weodata/WEOOct2007all.xls'] webpage = requests.get('http://www.imf.org/external/ns/cs.aspx?id=28') html = etree.HTML(webpage.text) hrefs = html.xpath("//div[@id = 'content-main']/h4/a['href']") links = [href.values() for href in hrefs] #The last links of the WEO webpage lead to data we dont want to pull. links = links[:-16] #These are other links we don't want. links.pop(-8) links.pop(-10) links = [link[0][:-10]+'download.aspx' for link in links] output = [] for link in links: webpage = requests.get(link) html = etree.HTML(webpage.text) final_link = html.xpath("//div[@id = 'content']//table//a['href']") final_link = final_link[0].values() output.append(link[:-13]+final_link[0]) return(output) def upsert_weo_issue(self,url,dataset_code): dataset = Dataset(self.provider_name,dataset_code) weo_data = WeoData(dataset,url) dataset.name = 'World Economic Outlook' dataset.doc_href = 'http://www.imf.org/external/ns/cs.aspx?id=28' dataset.last_update = weo_data.release_date dataset.attribute_list.update(CodeDict({'flags': {'e': 'Estimated'}})) dataset.series.data_iterator = weo_data dataset.update_database() def upsert_categories(self): document = Category(provider = self.provider_name, name = 'WEO' , categoryCode ='WEO') return document.update_database() class WeoData(): def __init__(self,dataset,url): self.provider_name = dataset.provider_name self.dataset_code = dataset.dataset_code self.dimension_list = dataset.dimension_list self.attribute_list = dataset.attribute_list datafile = urllib.request.urlopen(url).read().decode('latin-1').splitlines() self.sheet = csv.DictReader(datafile, delimiter='\t') self.years = self.sheet.fieldnames[9:-1] print(self.years) self.start_date = pandas.Period(self.years[0],freq='annual') self.end_date = pandas.Period(self.years[-1],freq='annual') self.release_date = datetime.strptime(match(".*WEO(\w{7})",url).groups()[0], "%b%Y") def __next__(self): row = next(self.sheet) series = self.build_series(row) if series is None: raise StopIteration() return(series) def build_series(self,row): if row['Country']: series = {} values = [] dimensions = {} for year in self.years: values.append(row[year]) dimensions['Country'] = self.dimension_list.update_entry('Country', row['ISO'], row['Country']) dimensions['WEO Country Code'] = self.dimension_list.update_entry('WEO Country Code', row['WEO Country Code'], row['WEO Country Code']) # put country name ???? dimensions['Subject'] = self.dimension_list.update_entry('Subject', row['WEO Subject Code'], row['Subject Descriptor']) dimensions['Units'] = self.dimension_list.update_entry('Units', '', row['Units']) dimensions['Scale'] = self.dimension_list.update_entry('Scale', row['Scale'], row['Scale']) series_name = row['Subject Descriptor']+'.'+row['Country']+'.'+row['Units'] series_key = row['WEO Subject Code']+'.'+row['ISO']+'.'+dimensions['Units'] release_dates = [ self.release_date for v in values] series['provider'] = self.provider_name series['datasetCode'] = self.dataset_code series['name'] = series_name series['key'] = series_key series['values'] = values series['attributes'] = {} if row['Estimates Start After']: estimation_start = int(row['Estimates Start After']); series['attributes'] = {'flag': [ '' if int(y) < estimation_start else 'e' for y in self.years]} series['dimensions'] = dimensions series['releaseDates'] = release_dates series['startDate'] = self.start_date.ordinal series['endDate'] = self.end_date.ordinal series['frequency'] = 'A' if row['Subject Notes']: series['notes'] = row['Subject Notes'] if row['Country/Series-specific Notes']: row['Country/Series-specific Notes'] += '\n' + row['Country/Series-specific Notes'] return(series) else: return None if __name__ == "__main__": import IMF w = IMF.IMF() w.provider.update_database() w.upsert_categories() w.upsert_dataset('WEO')
Python
0.999332
@@ -2768,22 +2768,90 @@ -return +# we need to handle the issue in chronological order%0A return(sorted (output) %0A @@ -2846,16 +2846,17 @@ (output) +) %0A
e145ef6ca54c9615f038601da17daf16550196d6
Use environment variables to locate Windows GStreamer includes
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "gstreamer-superficial", "sources": [ "gstreamer.cpp", "GLibHelpers.cpp", "GObjectWrap.cpp", "Pipeline.cpp" ], "include_dirs": [ "<!(node -e \"require('nan')\")" ], "cflags": [ "-Wno-cast-function-type" ], "conditions" : [ ["OS=='linux'", { "include_dirs": [ '<!@(pkg-config gstreamer-1.0 --cflags-only-I | sed s/-I//g)', '<!@(pkg-config gstreamer-app-1.0 --cflags-only-I | sed s/-I//g)', '<!@(pkg-config gstreamer-app-1.0 --cflags-only-I | sed s/-I//g)' ], "libraries": [ '<!@(pkg-config gstreamer-1.0 --libs)', '<!@(pkg-config gstreamer-app-1.0 --libs)', '<!@(pkg-config gstreamer-video-1.0 --libs)' ] }], ["OS=='mac'", { "include_dirs": [ '<!@(pkg-config gstreamer-1.0 --cflags-only-I | sed s/-I//g)', '<!@(pkg-config gstreamer-app-1.0 --cflags-only-I | sed s/-I//g)', '<!@(pkg-config gstreamer-app-1.0 --cflags-only-I | sed s/-I//g)' ], "libraries": [ '<!@(pkg-config gstreamer-1.0 --libs)', '<!@(pkg-config gstreamer-app-1.0 --libs)', '<!@(pkg-config gstreamer-video-1.0 --libs)' ] }], ["OS=='win'", { "include_dirs": [ "X:/gstreamer-sdk/1.0/x86_64/include/gstreamer-1.0", "X:/gstreamer-sdk/1.0/x86_64/include/glib-2.0", "X:/gstreamer-sdk/1.0/x86_64/include/libxml2" ], "libraries": [ "X:/gstreamer-sdk/1.0/x86_64/lib/gstreamer-1.0.lib", "X:/gstreamer-sdk/1.0/x86_64/lib/gstapp-1.0.lib", "X:/gstreamer-sdk/1.0/x86_64/lib/gstvideo-1.0.lib", "X:/gstreamer-sdk/1.0/x86_64/lib/gobject-2.0.lib", "X:/gstreamer-sdk/1.0/x86_64/lib/glib-2.0.lib" ] }] ] } ] }
Python
0
@@ -1183,44 +1183,52 @@ %09%09%09%22 -X:/gstreamer-sdk/1.0/x +%3C!(echo %25GSTREAMER_1_0_ROOT_X 86_64 -/ +%25) include -/ +%5C gstr @@ -1248,44 +1248,116 @@ %09%09%09%22 -X:/gstreamer-sdk/1.0/x +%3C!(echo %25GSTREAMER_1_0_ROOT_X86_64%25)lib%5Cglib-2.0%5Cinclude%22,%0A%09%09%09%09%22%3C!(echo %25GSTREAMER_1_0_ROOT_X 86_64 -/ +%25) include -/ +%5C glib @@ -1372,44 +1372,52 @@ %09%09%09%22 -X:/gstreamer-sdk/1.0/x +%3C!(echo %25GSTREAMER_1_0_ROOT_X 86_64 -/ +%25) include -/ +%5C libx @@ -1454,40 +1454,48 @@ %09%09%09%22 -X:/gstreamer-sdk/1.0/x +%3C!(echo %25GSTREAMER_1_0_ROOT_X 86_64 -/lib/ +%25)lib%5C gstr @@ -1519,40 +1519,48 @@ %09%09%09%22 -X:/gstreamer-sdk/1.0/x +%3C!(echo %25GSTREAMER_1_0_ROOT_X 86_64 -/lib/ +%25)lib%5C gsta @@ -1581,40 +1581,48 @@ %09%09%09%22 -X:/gstreamer-sdk/1.0/x +%3C!(echo %25GSTREAMER_1_0_ROOT_X 86_64 -/lib/ +%25)lib%5C gstv @@ -1645,40 +1645,48 @@ %09%09%09%22 -X:/gstreamer-sdk/1.0/x +%3C!(echo %25GSTREAMER_1_0_ROOT_X 86_64 -/lib/ +%25)lib%5C gobj @@ -1708,40 +1708,48 @@ %09%09%09%22 -X:/gstreamer-sdk/1.0/x +%3C!(echo %25GSTREAMER_1_0_ROOT_X 86_64 -/lib/ +%25)lib%5C glib
55dd6cb9dfb72fcbff89b10ccdd0d68c309d9aa9
Enable RTTI on OS X to fix exception handling (gh issue #106)
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "oracle_bindings", "sources": [ "src/connection.cpp", "src/oracle_bindings.cpp", "src/executeBaton.cpp", "src/reader.cpp", "src/statement.cpp", "src/outParam.cpp" ], "conditions": [ ["OS=='mac'", { "xcode_settings": { "GCC_ENABLE_CPP_EXCEPTIONS": "YES" } }], ["OS!='win'", { "variables": { "oci_include_dir%": "<!(if [ -z $OCI_INCLUDE_DIR ]; then echo \"/opt/instantclient/sdk/include/\"; else echo $OCI_INCLUDE_DIR; fi)", "oci_lib_dir%": "<!(if [ -z $OCI_LIB_DIR ]; then echo \"/opt/instantclient/\"; else echo $OCI_LIB_DIR; fi)", "oci_version%": "<!(if [ -z $OCI_VERSION ]; then echo 11; else echo $OCI_VERSION; fi)" }, "libraries": [ "-locci", "-lclntsh", "-lnnz<(oci_version)" ], "link_settings": {"libraries": [ '-L<(oci_lib_dir)'] } }], ["OS=='win'", { "configurations": { "Release": { "msvs_settings": { "VCCLCompilerTool": { "RuntimeLibrary": "2" } }, }, "Debug": { "msvs_settings": { "VCCLCompilerTool": { "RuntimeLibrary": "3" } }, } }, "variables": { "oci_include_dir%": "<!(IF DEFINED OCI_INCLUDE_DIR (echo %OCI_INCLUDE_DIR%) ELSE (echo C:\oracle\instantclient\sdk\include))", "oci_lib_dir%": "<!(IF DEFINED OCI_LIB_DIR (echo %OCI_LIB_DIR%) ELSE (echo C:\oracle\instantclient\sdk\lib\msvc))", "oci_version%": "<!(IF DEFINED OCI_VERSION (echo %OCI_VERSION%) ELSE (echo 11))" }, # "libraries": [ "-loci" ], "link_settings": {"libraries": [ '<(oci_lib_dir)\oraocci<(oci_version).lib'] } }] ], "include_dirs": [ "<(oci_include_dir)" ], "cflags!": [ "-fno-exceptions" ], "cflags_cc!": [ "-fno-exceptions" ] } ] }
Python
0
@@ -422,16 +422,49 @@ PTIONS%22: + %22YES%22,%0A%09%09%09%22GCC_ENABLE_CPP_RTTI%22: %22YES%22%0A
043a0ad774964d2608ee1c8bd8ba1abc5b2ed0b4
Tweak binding.gyp so it doesn't error out on Windows
binding.gyp
binding.gyp
{ 'conditions': [ ['OS!="win"', { 'targets': [{ 'target_name': 'pty', 'include_dirs' : [ '<!(node -e "require(\'nan\')")' ], 'sources': [ 'src/unix/pty.cc' ], 'libraries': [ '-lutil', '-L/usr/lib', '-L/usr/local/lib' ], 'conditions': [ # http://www.gnu.org/software/gnulib/manual/html_node/forkpty.html # One some systems (at least including Cygwin, Interix, # OSF/1 4 and 5, and Mac OS X) linking with -lutil is not required. ['OS=="mac" or OS=="solaris"', { 'libraries!': [ '-lutil' ] }] ] }] }] ] }
Python
0
@@ -1,48 +1,6 @@ %7B%0A - 'conditions': %5B%0A %5B'OS!=%22win%22', %7B%0A 't @@ -15,20 +15,16 @@ %5B%7B%0A - - 'target_ @@ -37,16 +37,58 @@ 'pty',%0A + 'conditions': %5B%0A %5B'OS!=%22win%22', %7B%0A @@ -724,13 +724,14 @@ -%7D %5D%0A -%5D +%7D%5D, %0A%7D%0A
5a6f748981554cb4d4aa0b5500a9b86bd09eb1b5
Add Linux static bindings
binding.gyp
binding.gyp
{ 'targets': [ { 'target_name': 'zmq', 'sources': [ 'binding.cc' ], 'include_dirs' : [ "<!(node -e \"require('nan')\")" ], 'conditions': [ ['OS=="win"', { 'win_delay_load_hook': 'true', 'include_dirs': ['windows/include'], 'link_settings': { 'libraries': [ 'Delayimp.lib', ], 'conditions': [ ['target_arch=="ia32"', { 'libraries': [ '<(PRODUCT_DIR)/../../windows/lib/x86/libzmq.lib', ] },{ 'libraries': [ '<(PRODUCT_DIR)/../../windows/lib/x64/libzmq.lib', ] }] ], }, 'msvs_settings': { 'VCLinkerTool': { 'DelayLoadDLLs': ['libzmq.dll'] } }, }, { 'libraries': [ '<(PRODUCT_DIR)/../../zmq/lib/libzmq.a' ], 'include_dirs': [ '<(PRODUCT_DIR)/../../zmq/include' ], 'cflags!': ['-fno-exceptions'], 'cflags_cc!': ['-fno-exceptions'], }], ['OS=="mac" or OS=="solaris"', { 'xcode_settings': { 'GCC_ENABLE_CPP_EXCEPTIONS': 'YES', 'MACOSX_DEPLOYMENT_TARGET': '10.6', }, 'libraries': [ '<(PRODUCT_DIR)/../../zmq/lib/libzmq.a' ], }], ['OS=="openbsd" or OS=="freebsd"', { }], ['OS=="linux"', { }], ] } ] }
Python
0
@@ -1473,24 +1473,92 @@ %22linux%22', %7B%0A + 'libraries': %5B '%3C(PRODUCT_DIR)/../../zmq/lib/libzmq.a' %5D,%0A %7D%5D,%0A
777bb37f9ac4457dca79a07953356ce46b941a30
change '-std=c++11' to '-std=c++0x' for linux
binding.gyp
binding.gyp
{ 'targets': [ { 'target_name': 'eigen', 'sources': [ 'src/EigenJS.cpp' ], 'include_dirs': [ 'deps', "<!(node -e \"require('nan')\")" ], 'conditions': [ ['OS=="win"', { 'msvs_settings': { 'VCCLCompilerTool': { 'ExceptionHandling': 1, 'AdditionalOptions': [ '/GR', '/EHsc', '/wd4018', '/wd4506' ] } } }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', { 'cflags': [ '-std=c++11' ], 'cflags_cc!': [ '-fno-rtti', '-fno-exceptions'] }], ['OS=="mac"', { 'xcode_settings': { 'GCC_ENABLE_CPP_EXCEPTIONS': 'YES', 'GCC_ENABLE_CPP_RTTI': 'YES', 'OTHER_CPLUSPLUSFLAGS': [ '-std=c++11', '-stdlib=libc++' ], 'OTHER_LDFLAGS': [ '-stdlib=libc++' ], 'MACOSX_DEPLOYMENT_TARGET': '10.7' } }] ] } ] }
Python
0.000023
@@ -559,18 +559,18 @@ -std=c++ -11 +0x ' %5D,%0A
4d117703e8a33ab628feba96aa5f2a35dd175ef6
Allow null event dates
web/whim/core/models.py
web/whim/core/models.py
from datetime import datetime, timedelta, timezone from django.db import models from django.dispatch import receiver from django.contrib.postgres.fields import ArrayField from django.utils.text import slugify from django.urls import reverse from whim.core.time import zero_time_with_timezone class BaseModel(models.Model): ''' Custom model base class providing creation/mod timestamps ''' creation_date = models.DateTimeField(auto_now_add=True) last_modified_date = models.DateTimeField(auto_now=True) class Meta: abstract = True class Source(BaseModel): ''' Source model ''' name = models.CharField(blank=False, max_length=75) scraper_name = models.CharField(blank=False, max_length=75) last_run_date = models.DateTimeField(blank=True, null=True) active = models.BooleanField(default=False) def __str__(self): return '{} - {}'.format(self.name, self.scraper_name) class CategoryManager(models.Manager): ''' Manager methods for Category model ''' def get_or_create_from_name(self, name): return self.get_or_create(name=name.lower()) class Category(BaseModel): ''' Category model ''' name = models.CharField(blank=False, max_length=75) objects = CategoryManager() class Meta: verbose_name_plural = 'Categories' def __str__(self): return self.name class EventManager(models.Manager): ''' Manager methods for Event model ''' def create_slug(self, name): slug = slugify(name) count = 1 while self.filter(slug=slug).exists(): count += 1 slug = '{}_{:d}'.format(slug, count) return slug def pending(self): return self.filter(status=Event.STATUS_PENDING) def published(self): return self.filter(status=Event.STATUS_PUBLISHED) def pending_or_published(self): return self.filter(status__in=(Event.STATUS_PENDING, Event.STATUS_PUBLISHED, )) def forthcoming(self): tomorrow = datetime.now(timezone.utc).date() + timedelta(days=1) tomorrow_start = zero_time_with_timezone(tomorrow) return self.published().filter(start_datetime__gte=tomorrow_start) def today(self): today = datetime.now(timezone.utc).date() tomorrow = today + timedelta(days=1) today_start = zero_time_with_timezone(today) today_end = zero_time_with_timezone(tomorrow) return self.published().filter( start_datetime__lte=today_end, end_datetime__gte=today_start) def exists_for_source_name(self, source, name): return self.pending_or_published().exists( source=source, name__iexact=name) class Event(BaseModel): ''' Event model ''' STATUS_PENDING = 0 STATUS_PUBLISHED = 1 STATUS_REMOVED = 2 STATUS_NEEDS_REVIEW = 3 STATUS_CHOICES = ((STATUS_PENDING, 'Pending'), ( STATUS_PUBLISHED, 'Published'), (STATUS_REMOVED, 'Removed'), (STATUS_NEEDS_REVIEW, 'Needs Review'), ) source = models.ForeignKey(Source, related_name="events") category = models.ForeignKey(Category, related_name="events") name = models.CharField(blank=False, max_length=75) slug = models.SlugField(max_length=75) description = models.TextField(blank=False) start_datetime = models.DateTimeField(blank=False) end_datetime = models.DateTimeField(blank=True, null=True) link = models.URLField(blank=True) tags = ArrayField(models.CharField(max_length=50), blank=True) status = models.PositiveIntegerField( choices=STATUS_CHOICES, default=STATUS_PENDING) objects = EventManager() class Meta: ordering = ['-start_datetime'] def __str__(self): return '{} ({}) - {}'.format( self.name, self.category, self.start_datetime.strftime("%d/%m/%y %H:%M")) def get_absolute_url(self): return reverse('public_event_detail', args=[self.slug]) #Signals @receiver(models.signals.pre_save, sender=Event) def init_event(sender, instance=None, **kwargs): if not instance.id: instance.slug = Event.objects.create_slug(instance.name) instance.status = Event.STATUS_PENDING # default to pending
Python
0.000259
@@ -3423,28 +3423,38 @@ Field(blank= -Fals +True, null=Tru e)%0A end_d @@ -4256,73 +4256,4 @@ me)%0A - instance.status = Event.STATUS_PENDING # default to pending%0A
786e7d83672ad5ff2718c9a440dbd180f8e7b24a
make addon buildable as static library (#119)
binding.gyp
binding.gyp
{ 'targets': [ { 'target_name': 'kerberos', 'include_dirs': [ '<!(node -e "require(\'nan\')")' ], 'sources': [ 'src/kerberos.cc' ], 'xcode_settings': { 'MACOSX_DEPLOYMENT_TARGET': '10.12', 'OTHER_CFLAGS': [ "-std=c++11", "-stdlib=libc++" ], }, 'conditions': [ ['OS=="mac" or OS=="linux"', { 'sources': [ 'src/unix/base64.cc', 'src/unix/kerberos_gss.cc', 'src/unix/kerberos_unix.cc' ], 'link_settings': { 'libraries': [ '-lkrb5', '-lgssapi_krb5' ] } }], ['OS=="win"', { 'sources': [ 'src/win32/kerberos_sspi.cc', 'src/win32/kerberos_win32.cc' ], 'link_settings': { 'libraries': [ 'crypt32.lib', 'secur32.lib', 'Shlwapi.lib' ] } }] ] } ] }
Python
0.000001
@@ -49,16 +49,49 @@ beros',%0A + 'type': 'loadable_module',%0A 'i @@ -702,33 +702,257 @@ %5D%0A %7D +,%0A 'conditions': %5B%0A %5B'_type==%22static_library%22', %7B%0A 'link_settings': %7B%0A 'libraries': %5B%0A '-lcom_err'%0A %5D%0A %7D%0A %7D%5D%0A %5D %0A - %7D%5D,%0A @@ -1167,19 +1167,17 @@ ' +-l crypt32 -.lib ',%0A @@ -1194,19 +1194,17 @@ ' +-l secur32 -.lib ',%0A @@ -1221,19 +1221,17 @@ ' +-l Shlwapi -.lib '%0A
4b0df9b01431e670012398fae580de567f4e6199
Make a few imports start with leading underscore
src/dynmen/common.py
src/dynmen/common.py
# -*- coding: utf-8 -*- import logging as _logging from dynmen import Menu, ValidationError, Default from collections import (namedtuple as _ntupl, OrderedDict as _OrderedDict) try: from functools import lru_cache from inspect import signature except ImportError: # for Python 2.7 from functools32 import lru_cache from funcsigs import signature _logr = _logging.getLogger(__name__) _logr.addHandler(_logging.NullHandler()) Record = _ntupl('Record', 'name value transformed') DefaultRecord = _ntupl('DefaultRecord', Record._fields) @lru_cache(maxsize=256) def _get_record(name, value, fn): return Record(name, value, fn(value)) class Descriptor(object): """ When Descriptor instances are accessed normally they return a Record tuple. Subclasses of Descriptor can be created by implementing 1. validate(self, value) which returns the validated value if the given value is not valid some exception should be raised. validate() is called by __set__() 2. transform(self, value) which returns a transformation of the validated value. It is called by __get__() """ def __init__(self, name, default=Default.value, info=''): self.under_name = '_' + name self.name = name self.default = default self.info = info def __get__(self, inst, cls): if inst is None: return self else: return self.get_record(inst, cls) def transform(self, value): msg = '{} must implement the transform method' raise NotImplementedError(msg.format(self.__class__.__name__)) def get_record(self, inst, cls): if inst is not None: value = inst.__dict__.get(self.under_name, self.default) else: value = self.default # return Record(name=self.name, value=value, transformed=self.transform(value)) return _get_record(self.name, value, self.transform) @property def default_record(self): return DefaultRecord._make(self.get_record(None, self.__class__)) def validate(self, value): msg = '{} must implement the validate method' raise NotImplementedError(msg.format(self.__class__.__name__)) def __set__(self, inst, value): if isinstance(value, (Record, DefaultRecord)): value = value.value def err(): msgfail = '{}->{}: validation failed for {!r}' cname = self.__class__.__name__ return ValidationError(msgfail.format(cname, self.name, value)) try: validated = self.validate(value) except Exception as e: from six import raise_from raise_from(err(), e) if validated is not None: inst.__dict__[self.under_name] = validated else: raise err() def __delete__(self, inst): del inst.__dict__[self.under_name] def __repr__(self): clsname = self.__class__.__name__ rtuple = repr(self.as_tuple) rtuple = rtuple[rtuple.find('('):] return clsname + rtuple @classmethod def _get_constructor_keys(cls): kname = '_{}_constructor_keys'.format(cls.__name__) try: return getattr(cls, kname) except AttributeError: sig = signature(cls) keys = tuple(sig.parameters.keys()) setattr(cls, kname, keys) return keys @classmethod def _get_named_tuple(cls): ntname = '_{}_named_tuple'.format(cls.__name__) try: return getattr(cls, ntname) except AttributeError: tuplname = 'T{}'.format(cls.__name__) keys = cls._get_constructor_keys() nt = _ntupl(tuplname, keys) setattr(cls, ntname, nt) return nt @property def as_tuple(self): ntupl = self._get_named_tuple() return ntupl._make((getattr(self, x) for x in ntupl._fields)) class Flag(Descriptor): def __init__(self, name, default=False, info='', flag=''): super(Flag, self).__init__(name, default=default, info=info) self.flag = flag def validate(self, value): if isinstance(value, bool): return value else: raise TypeError('{!r} is not a bool'.format(value)) def transform(self, value): return [self.flag] if value else [] class Option(Descriptor): def __init__(self, name, default=Default.value, info='', flag='', type=Default.type): super(Option, self).__init__(name, default=default, info=info) self.flag = flag self.type = type def validate(self, value): if (value is self.default) or (self.type is Default.type): return value return self.type(value) def transform(self, value): if (value != Default.value) and (value is not None): if self.type != Default.type: return [self.flag, str(self.type(value))] else: return [self.flag, str(value)] else: return [] class TraitMenu(Menu): def __call__(self, entries): cmd = list(self.command) opts = self._make_opts() cmd.extend(opts) _logr.debug('Built cmd: {!r}'.format(cmd)) return self._run(cmd, entries) def _make_opts(self): def get_names(): settings = self.meta_settings for opt_group in settings.values(): for opt in opt_group: yield opt.name opts = [] for name in get_names(): opts.extend(getattr(self, name).transformed) return opts @property def meta_settings(self): cls = self.__class__ settname = '_meta_settings_{}'.format(cls.__name__) try: return getattr(self, settname) except AttributeError: pass def get_descriptors(): for name in dir(cls): val = getattr(cls, name) if isinstance(val, Descriptor): yield val od = _OrderedDict() for option in get_descriptors(): opt_name = type(option).__name__ try: od[opt_name].append(option.as_tuple) except KeyError: od[opt_name] = [option.as_tuple,] setattr(self, settname, od) return od
Python
0.000014
@@ -227,32 +227,46 @@ import lru_cache + as _lru_cache %0A from inspec @@ -283,16 +283,30 @@ ignature + as _signature %0Aexcept @@ -376,32 +376,46 @@ import lru_cache + as _lru_cache %0A from funcsi @@ -433,16 +433,30 @@ ignature + as _signature %0A%0A%0A_logr @@ -640,16 +640,17 @@ elds)%0A%0A@ +_ lru_cach @@ -3399,16 +3399,17 @@ sig = +_ signatur
b8df30e7a879049997b49aace09501d0dd809e8f
change output of --show-folders
norless/run.py
norless/run.py
import sys import json import fcntl import os.path import argparse import threading from mailbox import Maildir, MaildirMessage from collections import Counter from .config import IniConfig from .state import State, connect, create_tables get_maildir_lock = threading.Lock() class ConcurentMaildir(Maildir): def __init__(self, *args, **kwargs): Maildir.__init__(self, *args, **kwargs) self.refresh_lock = threading.Lock() self.store_lock = threading.Lock() def _refresh(self): with self.refresh_lock: return Maildir._refresh(self) def cm_get_flags(self, key): mpath = self._lookup(key) name = os.path.basename(mpath) _, sep, info = name.rpartition(':') if sep: _, sep, flags = info.rpartition(',') if sep: return flags return '' maildir_cache = {} def get_maildir(maildir): with get_maildir_lock: key = os.path.expanduser(maildir) try: return maildir_cache[key] except KeyError: pass result = maildir_cache[key] = ConcurentMaildir(key, factory=None, create=True) result.name = os.path.basename(maildir) return result def apply_remote_changes(maildir, state, changes, change_uid): uids = changes['trash'] if uids: for uid in uids: s = state.get(uid) if s and not s.is_check: maildir.discard(s.msgkey) state.remove(uid) uids = changes['seen'] if uids: for uid in uids: s = state.get(uid) print s if s and not s.is_check: try: msg = maildir[s.msgkey] except KeyError: state.remove(uid) else: msg.add_flag('S') with maildir.store_lock: maildir[s.msgkey] = msg state.put(s.uid, s.msgkey, msg.get_flags()) state.put(change_uid, '', 'S', 1) def store_message(config, maildir, state, skip_checkpoints, uid, message, flags): uid = int(uid) msg = MaildirMessage(message) if 'X-Norless' in msg: replica_id = msg['X-Norless'] if skip_checkpoints or replica_id == config.replica_id: state.put(uid, '', 'S', 1) return else: changes = json.loads(msg.get_payload()) apply_remote_changes(maildir, state, changes, uid) return if '\\Seen' in flags: msg.add_flag('S') s = state.get(uid) if s: if s.flags != msg.get_flags(): oldmessage = maildir[s.msgkey] oldmessage.set_flags(msg.get_flags()) with maildir.store_lock: maildir[s.msgkey] = oldmessage else: with maildir.store_lock: key = maildir.add(msg) state.put(uid, key, msg.get_flags()) def sync_local(maildir, state): maxuid = 0 changes = {'seen':[], 'trash':[]} for row in state.getall(): maxuid = max(row.uid, maxuid) if row.is_check: continue flags = set(row.flags) try: mflags = maildir.cm_get_flags(row.msgkey) except KeyError: changes['trash'].append(row.uid) else: if 'S' in mflags and 'S' not in flags: changes['seen'].append(row.uid) return maxuid, changes def sync_account(config, sync_list): conn = connect(os.path.expanduser(config.state_db)) for s in sync_list: account = config.accounts[s.account] maildir = get_maildir(s.maildir) state = State(conn, s.account, s.folder) maxuid, changes = sync_local(maildir, state) skip_checkpoints = not maxuid folder = account.get_folder(s.folder) folder.apply_changes(config, changes, state, s.trash) messages = folder.fetch(config.fetch_last, maxuid) for m in messages: store_message(config, maildir, state, skip_checkpoints, m['uid'], m['body'], m['flags']) def sync(config): accounts = {} for s in config.sync_list: accounts.setdefault(s.account, []).append(s) threads = [] for sync_list in accounts.itervalues(): t = threading.Thread(target=sync_account, args=(config, sync_list)) t.start() threads.append(t) for t in threads: t.join() def check(config): maildirs = set(s.maildir for s in config.sync_list) result = Counter() for maildir_path in maildirs: maildir = get_maildir(maildir_path) for key in maildir.iterkeys(): if 'S' not in maildir.cm_get_flags(key): result[maildir.name] += 1 for k, v in result.iteritems(): print '{}\t{}'.format(k, v) return result def show_folders(config): for account, box in config.accounts.iteritems(): print account for f, s, name in box.list_folders(): print ' ', f, s, name def show_fingerprint(config): for account, box in config.accounts.iteritems(): box.fingerprint = None print account, box.server_fingerprint def main(): parser = argparse.ArgumentParser() parser.add_argument('-C', '--config', dest='config', default=os.path.expanduser('~/.config/norlessrc')) parser.add_argument('-c', '--check', dest='check', action='store_true') parser.add_argument('-s', '--show-folders', dest='show_folders', action='store_true') parser.add_argument('-a', '--account', dest='account') parser.add_argument('--init-state', dest='init_state', action='store_true') parser.add_argument('--show-fingerprint', dest='show_fingerprint', action='store_true') args = parser.parse_args() config = IniConfig(args.config) if args.account: config.restrict_to(args.account) if args.show_folders: show_folders(config) elif args.show_fingerprint: show_fingerprint(config) else: lock_file = os.path.join( os.path.dirname(os.path.expanduser(config.state_db)), '.norless-lock') fp = open(lock_file, 'w') try: fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: print >>sys.stderr, 'Another instance already running' sys.exit(1) if args.init_state: with connect(config.state_db) as conn: create_tables(conn) sync(config) if args.check: if not check(config): sys.exit(1)
Python
0.000001
@@ -5072,21 +5072,42 @@ ' -', f, + %5B%7B%7D%5D %7B%7D%5Ct(%7B%7D)'.format( s, name +, f) %0A%0Ade
1df655ac5a2bc31c693411c8f6a31e421c817c7d
fix typo
src/c3nav/control/views.py
src/c3nav/control/views.py
from functools import wraps from django.contrib import messages from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from django.core.exceptions import PermissionDenied from django.core.paginator import Paginator from django.db import transaction from django.db.models import Prefetch from django.shortcuts import get_object_or_404, redirect, render from django.urls import reverse from django.utils.translation import ugettext_lazy as _ from c3nav.control.forms import AccessPermissionForm, AnnouncementForm, UserPermissionsForm from c3nav.control.models import UserPermissions from c3nav.mapdata.models.access import AccessPermission, AccessPermissionToken from c3nav.site.models import Announcement def control_panel_view(func): @wraps(func) def wrapped_func(request, *args, **kwargs): if not request.user_permissions.control_panel: raise PermissionDenied return func(request, *args, **kwargs) return login_required(login_url='site.login')(wrapped_func) @login_required(login_url='site.login') @control_panel_view def main_index(request): return render(request, 'control/index.html', {}) @login_required(login_url='site.login') @control_panel_view def user_list(request): search = request.GET.get('s') page = request.GET.get('page', 1) queryset = User.objects.order_by('id') if search: queryset = queryset.filter(username__icontains=search.strip()) paginator = Paginator(queryset, 20) users = paginator.page(page) return render(request, 'control/users.html', { 'users': users, }) @login_required(login_url='site.login') @control_panel_view def user_detail(request, user): qs = User.objects.select_related( 'permissions', ).prefetch_related( Prefetch('accesspermissions', AccessPermission.objects.select_related('access_restriction', 'author')) ) user = get_object_or_404(qs, pk=user) if request.method == 'POST': delete_access_permission = request.POST.get('delete_access_permission') if delete_access_permission: with transaction.atomic(): try: permission = AccessPermission.objects.select_for_update().get(pk=delete_access_permission) except AccessPermission.DoesNotExist: messages.error(request, _('Unknown access permission.')) else: if request.user_permissions.can_grant or permission.author_id == request.user.pk: permission.delete() messages.success(request, _('Access Permission successfully deleted.')) else: messages.error(request, _('You cannot delete this Access Permission.')) return redirect(request.path_info) ctx = { 'user': user, } # user permissions try: permissions = user.permissions except AttributeError: permissions = UserPermissions(user=user) ctx.update({ 'user_permissions': tuple( field.verbose_name for field in UserPermissions._meta.get_fields() if not field.one_to_one and getattr(permissions, field.attname) ) }) if request.user_permissions.grant_permissions: if request.method == 'POST' and request.POST.get('submit_user_permissions'): form = UserPermissionsForm(instance=permissions, data=request.POST) if form.is_valid(): form.save() messages.success(request, _('General permissions successfully updated.')) return redirect(request.path_info) else: form = UserPermissionsForm(instance=permissions) ctx.update({ 'user_permissions_form': form }) # access permissions if request.method == 'POST' and request.POST.get('submit_access_permissions'): form = AccessPermissionForm(request=request, data=request.POST) if form.is_valid(): form.get_token().redeem(user) messages.success(request, _('Access permissions successfully granted.')) return redirect(request.path_info) else: form = AccessPermissionForm(request=request) ctx.update({ 'access_permission_form': form }) return render(request, 'control/user.html', ctx) @login_required(login_url='site.login') @control_panel_view def grant_access(request): if request.method == 'POST' and request.POST.get('submit_access_permissions'): form = AccessPermissionForm(request=request, data=request.POST) if form.is_valid(): token = form.get_token() token.save() return redirect(reverse('control.access.qr', kwargs={'token': token.token})) else: form = AccessPermissionForm(request=request) ctx = { 'access_permission_form': form } return render(request, 'control/access.html', ctx) @login_required(login_url='site.login') @control_panel_view def grant_access_qr(request, token): with transaction.atomic(): token = AccessPermissionToken.objects.select_for_update().get(token=token, author=request.user) if token.redeemed: messages.success(request, _('Access successfully granted.')) token = None elif request.method == 'POST' and request.POST.get('revoke'): token.delete() messages.success(request, _('Token successfully revoked.')) return redirect('control.access') elif not token.unlimited: try: latest = AccessPermissionToken.objects.filter(author=request.user).latest('valid_until') except AccessPermissionToken.DoesNotExist: token = None else: if latest.id != token.id: token = None if token is None: messages.error(request, _('You can only display your most recently created token.')) if token is None: return redirect('control.access') token.bump() token.save() url = reverse('site.access.redeem', kwargs={'token': str(token.token)}) return render(request, 'control/access_qr.html', { 'url': url, 'url_qr': reverse('site.qr', kwargs={'path': url}), 'url_absolute': request.build_absolute_uri(url), }) @login_required(login_url='site.login') @control_panel_view def announcement_list(request): if not request.user_permissions.manage_announcements: raise PermissionDenied announcements = Announcement.objects.order_by('-created') if request.method == 'POST': form = AnnouncementForm(data=request.POST) if form.is_valid(): announcement = form.instance announcement = request.user announcement.save() return redirect('control.announcements') else: form = AnnouncementForm() return render(request, 'control/announcements.html', { 'form': form, 'announcements': announcements, }) @login_required(login_url='site.login') @control_panel_view def announcement_detail(request, announcement): if not request.user_permissions.manage_announcements: raise PermissionDenied announcement = get_object_or_404(Announcement, pk=announcement) if request.method == 'POST': form = AnnouncementForm(instance=announcement, data=request.POST) if form.is_valid(): form.save() return redirect('control.announcements') else: form = AnnouncementForm(instance=announcement) return render(request, 'control/announcement.html', { 'form': form, 'announcement': announcement, })
Python
0.999991
@@ -2493,17 +2493,24 @@ ons. -can_ grant +_all_access or
f7f9e5ec0889db742c1014f07ddad7f2a16b8a80
Update NewsblurConnector.py
src/connectors/NewsblurConnector.py
src/connectors/NewsblurConnector.py
import json import requests import requests.exceptions import rollbar from bs4 import BeautifulSoup from datadog import statsd from ddtrace import patch from ddtrace import tracer from time import sleep from utility import nb_logging patch(requests=True) logger = nb_logging.setup_logger('NewsblurConnector') class NewsblurConnector: def __init__(self, config, username, password): self.cookies = None self.config = config self.verify = config.get('VERIFY') self.nb_endpoint = config.get('NB_ENDPOINT') self.credentials = {'username': username, 'password': password} @statsd.timed('nb.NewsblurConnector.login') def login(self): """ log in and save cookies """ r = requests.post(self.nb_endpoint + '/api/login', self.credentials) statsd.increment('nb.http_requests.post') self.cookies = r.cookies @statsd.timed('nb.NewsblurConnector.get_nb_hash_list') def get_nb_hash_list(self): """ get a list of story identifiers (hashes) from NewsBlur """ hashes_req = requests.Request('GET', self.nb_endpoint + '/reader/starred_story_hashes', cookies=self.cookies) hashes = self.request_with_backoff(hashes_req) try: return hashes.json()['starred_story_hashes'] except ValueError as e: rollbar.report_exc_info() msg = 'Failed to decode JSON' logger.error(msg) logger.error(e) logger.debug(hashes) statsd.event(msg, e.message, alert_type='error') return [] @statsd.timed('nb.NewsblurConnector.get_story_list') def get_story_list(self, batch): """ get a list of stories corresponding to a list of hashes """ req_str = self.nb_endpoint + '/reader/starred_stories?' for a_hash in batch: req_str += 'h=' + a_hash + '&' stories = {} stories_req = requests.Request('GET', req_str, cookies=self.cookies) try: stories = self.request_with_backoff(stories_req) except requests.exceptions.ConnectionError as e: rollbar.report_exc_info() msg = 'Failed to get stories' logger.error(msg) logger.debug('Request string: %s', req_str) logger.error(e) statsd.event(msg, e.message, alert_type='error') logger.debug(stories.text) statsd.increment('nb.http_requests.get') story_list = [] try: story_list = json.loads(stories.text)['stories'] except ValueError as e: rollbar.report_exc_info() msg = 'Failed to parse stories response' logger.error(msg) logger.error(e) statsd.event(msg, e.message, alert_type='error') logger.debug(stories.text) return story_list @statsd.timed('nb.NewsblurConnector.get_comment_count') def get_comment_count(self, hnurl): req = requests.Request('GET', hnurl, cookies=self.cookies) resp = self.request_with_backoff(req) if resp is None: return None story_text = self.request_with_backoff(req).text return self.parse_story(story_text) # Parse HN story to find how many comments there are @statsd.timed('nb.NewsblurConnector.parse_story') def parse_story(self, content): soup = BeautifulSoup(content, "html.parser") comment_count = len(soup.find_all("div", {"class": "comment"})) return comment_count @statsd.timed('nb.NewsblurConnector.check_if_starred') def check_if_starred(self, story_hash): starred_req = requests.Request('GET', self.nb_endpoint + '/reader/starred_story_hashes', cookies=self.cookies) hashes = self.request_with_backoff(starred_req) statsd.increment('nb.http_requests.get') hashlist = hashes.json()['starred_story_hashes'] return bool(story_hash in hashlist) @statsd.timed('nb.NewsblurConnector.remove_star_with_backoff') def remove_star_with_backoff(self, story_hash): unstar_url = self.nb_endpoint + '/reader/mark_story_hash_as_unstarred' req = requests.Request('POST', unstar_url, params={'story_hash': story_hash}, cookies=self.cookies) return bool(self.request_with_backoff(req) is not None) @statsd.timed('nb.NewsblurConnector.request_with_backoff') def request_with_backoff(self, req): sleep(float(self.config.get('POLITE_WAIT'))) backoff = self.config.get('BACKOFF_START') session = requests.Session() prepared_req = session.prepare_request(req) try: resp = session.send(prepared_req) statsd.increment('nb.http_requests.count') statsd.increment('nb.http_requests.status_' + str(resp.status_code)) while resp.status_code != 200: if resp.status_code in [429, 500, 502, 503, 504]: # exponential backoff logger.info( "Request for %s returned %s response", req.url, resp.status_code) if backoff < self.config.get('BACKOFF_MAX'): logger.info("Backing off %s seconds", backoff) sleep(backoff) backoff = backoff * self.config.get('BACKOFF_FACTOR') resp = session.send(prepared_req) statsd.increment('nb.http_requests.count') else: logger.warn("Giving up after %s seconds for %s", backoff, req.url) return None elif resp.status_code in [403, 520]: logger.warn("%s response, skipping %s and waiting %ss", resp.status_code, req.url, self.config.get('BACKOFF_START')) return None else: logger.error("Request for %s returned unhandled %s response", req.url, resp.status_code) raise requests.exceptions.RequestException() return resp except requests.exceptions.RequestException as e: rollbar.report_exc_info() logger.info("url is: %s", req.url) logger.error(e) return None
Python
0
@@ -5483,32 +5483,115 @@ equests.count')%0A + %09%09statsd.increment('nb.http_requests.status_' + str(resp.status_code))%0A @@ -5949,16 +5949,62 @@ TART'))%0A +%09%09 sleep(self.config.get('BACKOFF_START'))%0A
8d2805107bc765ab0c171adebaeed9c0379b10da
Add -f, --fork to run in the background
notable/app.py
notable/app.py
"""Notable - a simple not taking application""" # Python imports import logging import optparse import os import sys import threading import time import webbrowser try: import urllib2 except ImportError: from urllib import request as urllib2 root = os.path.abspath(os.path.dirname(__file__)) sys.path = [os.path.join(root, '..')] + sys.path # Project imports from notable import bottle, db, editor # Constants, and help with template lookup host = 'localhost' version = '0.0.3' static = os.path.join(root, 'static') bottle.TEMPLATE_PATH.insert(0, os.path.join(root, 'views')) log = logging.getLogger(__name__) up = 'up' @bottle.route('/') @bottle.view('index') def homepage(): return dict() @bottle.route('/static/<filename>') def htdocs(filename): return bottle.static_file(filename, root=static) @bottle.post('/api/decrypt') def decrypt(): password = bottle.request.forms.get('password') uid = bottle.request.forms.get('uid') return db.get_content(uid, password) @bottle.post('/api/delete') def delete(): password = bottle.request.forms.get('password') uid = bottle.request.forms.get('uid') return dict(success=db.delete_note(uid, password=password)) @bottle.post('/api/launch_editor') def launch_editor(): uid = bottle.request.forms.get('uid') content = bottle.request.forms.get('content') return editor.launch(uid, content) @bottle.get('/api/from_disk/<uid>') def from_disk(uid=None): path = os.path.join('/tmp', uid) if os.path.exists(path): return open(path).read() else: return 'missing' @bottle.get('/api/list') def api_list(): return db.search(bottle.request.query.get('s')) @bottle.post('/api/persist') def persist(): n = db.note(actual=True) password = bottle.request.forms.get('password') form = dict((k, v) for k, v in bottle.request.forms.items() if k in n) if form.get('uid') == '': fcn, _ = db.create_note, form.pop('uid') else: fcn = db.update_note n.update(form) return dict(success=fcn(n, password=password)) @bottle.get('/word') def word(): return up def browser(): time.sleep(1) webbrowser.open_new_tab('http://localhost:8082') def getopts(): parser = optparse.OptionParser(__doc__.strip()) parser.add_option('-b', '--browser', action='store_true', dest='browser', help='Launch a browser') parser.add_option('-d', '--debug', action='store_true', dest='debug', help='Debug using a debug db') parser.add_option('-p', '--port', default=8082, dest='port', help='TCP port to start the server on') return parser.parse_args(), parser def running(opts): url = 'http://%s:%s/word' % (host, opts.port) try: _running = urllib2.urlopen(url).read() == up except urllib2.URLError: _running = False return _running def run(opts): db.path = db.path + '.debug' if opts.debug else db.path db.prepare() bottle.run(host=host, port=int(opts.port)) def main(): logging.basicConfig() (opts, _), _ = getopts() _ = threading.Thread(target=browser).start() if opts.browser else False _ = run(opts) if not running(opts) else False if __name__ == '__main__': main()
Python
0.000001
@@ -59,16 +59,31 @@ imports%0A +import httplib%0A import l @@ -115,16 +115,34 @@ port os%0A +import subprocess%0A import s @@ -651,18 +651,8 @@ e__) -%0Aup = 'up' %0A%0A@b @@ -2104,19 +2104,18 @@ t('/ -wor +pi d')%0Adef word @@ -2110,19 +2110,18 @@ d')%0Adef -wor +pi d():%0A @@ -2128,18 +2128,32 @@ return -up +str(os.getpid()) %0A%0Adef br @@ -2640,16 +2640,204 @@ ug db')%0A + parser.add_option('-f', '--fork',%0A action='store_true',%0A dest='fork',%0A help='Start the server in the background (fork)')%0A pars @@ -3082,19 +3082,18 @@ //%25s:%25s/ -wor +pi d' %25 (ho @@ -3124,26 +3124,22 @@ -_running = +return urllib2 @@ -3162,14 +3162,8 @@ ad() - == up %0A @@ -3169,16 +3169,40 @@ except + (httplib.BadStatusLine, urllib2 @@ -3206,24 +3206,25 @@ ib2.URLError +) :%0A _r @@ -3225,60 +3225,295 @@ -_running = False%0A return _running%0A%0Adef run(opts): +return False%0A%0Adef run(opts):%0A bg = %5B'-f', '--fork', '-b', '--browser'%5D%0A if opts.fork:%0A map(sys.argv.remove, (o for o in bg if o in sys.argv))%0A cmd = %5B'nohup', 'python', os.path.join(root, 'app.py')%5D + sys.argv%5B1:%5D%0A subprocess.Popen(cmd + %5B'&'%5D)%0A return%0A %0A
b6208c1f9b6f0afca1dff40a66d2c915594b1946
Add exception hook to help diagnose server test errors in python3 gui mode
blaze/io/server/tests/start_simple_server.py
blaze/io/server/tests/start_simple_server.py
""" Starts a Blaze server for tests. $ start_test_server.py /path/to/catalog_config.yaml <portnumber> """ import sys, os import blaze from blaze.io.server.app import app blaze.catalog.load_config(sys.argv[1]) app.run(port=int(sys.argv[2]), use_reloader=False)
Python
0
@@ -115,16 +115,696 @@ sys, os%0A +%0Aif os.name == 'nt':%0A old_excepthook = sys.excepthook%0A%0A # Exclude this from our autogenerated API docs.%0A undoc = lambda func: func%0A%0A @undoc%0A def gui_excepthook(exctype, value, tb):%0A try:%0A import ctypes, traceback%0A MB_ICONERROR = 0x00000010%0A title = u'Error starting test Blaze server'%0A msg = u''.join(traceback.format_exception(exctype, value, tb))%0A ctypes.windll.user32.MessageBoxW(0, msg, title, MB_ICONERROR)%0A finally:%0A # Also call the old exception hook to let it do%0A # its thing too.%0A old_excepthook(exctype, value, tb)%0A%0A sys.excepthook = gui_excepthook%0A%0A import b
bde944941aa74eb8b7bc1150366b8f7cfedff93c
fix test coverage
etcd/test_etcd.py
etcd/test_etcd.py
# (C) Datadog, Inc. 2010-2017 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) # 3p from nose.plugins.attrib import attr # project from tests.checks.common import AgentCheckTest @attr(requires='etcd') class CheckEtcdTest(AgentCheckTest): CHECK_NAME = "etcd" STORE_METRICS = [ 'compareanddelete.fail', 'compareanddelete.success', 'compareandswap.fail', 'compareandswap.success', 'create.fail', 'create.success', 'delete.fail', 'delete.success', 'expire.count', 'gets.fail', 'gets.success', 'sets.fail', 'sets.success', 'update.fail', 'update.success', 'watchers', ] def __init__(self, *args, **kwargs): AgentCheckTest.__init__(self, *args, **kwargs) self.config = {"instances": [{"url": "http://localhost:2379"}]} def test_metrics(self): self.run_check_twice(self.config) tags = ['url:http://localhost:2379', 'etcd_state:leader'] for mname in self.STORE_METRICS: self.assertMetric('etcd.store.%s' % mname, tags=tags, count=1) self.assertMetric('etcd.self.send.appendrequest.count', tags=tags, count=1) self.assertMetric('etcd.self.recv.appendrequest.count', tags=tags, count=1) self.assertServiceCheckOK(self.check.SERVICE_CHECK_NAME, count=1, tags=['url:http://localhost:2379', 'etcd_state:leader']) self.assertServiceCheckOK(self.check.HEALTH_SERVICE_CHECK_NAME, count=1, tags=['url:http://localhost:2379', 'etcd_state:leader']) self.coverage_report() # FIXME: not really an integration test, should be pretty easy # to spin up a cluster to test that. def test_followers(self): mock = { "followers": { "etcd-node1": { "counts": { "fail": 1212, "success": 4163176 }, "latency": { "average": 2.7206299430775007, "current": 1.486487, "maximum": 2018.410279, "minimum": 1.011763, "standardDeviation": 6.246990702203536 } }, "etcd-node3": { "counts": { "fail": 1378, "success": 4164598 }, "latency": { "average": 2.707100125761001, "current": 1.666258, "maximum": 1409.054765, "minimum": 0.998415, "standardDeviation": 5.910089773061448 } } }, "leader": "etcd-node2" } mocks = { '_get_leader_metrics': lambda url, path, ssl, timeout: mock } self.run_check_twice(self.config, mocks=mocks) common_leader_tags = ['url:http://localhost:2379', 'etcd_state:leader'] follower_tags = [ common_leader_tags[:] + ['follower:etcd-node1'], common_leader_tags[:] + ['follower:etcd-node3'], ] for fol_tags in follower_tags: self.assertMetric('etcd.leader.counts.fail', count=1, tags=fol_tags) self.assertMetric('etcd.leader.counts.success', count=1, tags=fol_tags) self.assertMetric('etcd.leader.latency.avg', count=1, tags=fol_tags) self.assertMetric('etcd.leader.latency.min', count=1, tags=fol_tags) self.assertMetric('etcd.leader.latency.max', count=1, tags=fol_tags) self.assertMetric('etcd.leader.latency.stddev', count=1, tags=fol_tags) self.assertMetric('etcd.leader.latency.current', count=1, tags=fol_tags) def test_bad_config(self): self.assertRaises(Exception, lambda: self.run_check({"instances": [{"url": "http://localhost:2379/test"}]})) self.assertServiceCheckCritical(self.check.SERVICE_CHECK_NAME, count=1, tags=['url:http://localhost:2379/test']) self.coverage_report()
Python
0.000002
@@ -4318,16 +4318,93 @@ /test'%5D) +%0A self.assertServiceCheckUnknown(self.check.HEALTH_SERVICE_CHECK_NAME) %0A%0A
826698c9894ce94c625718eb041ce817eb6ab5ef
Update config.dist.py
boiler/boiler_template/config/config.dist.py
boiler/boiler_template/config/config.dist.py
from project.backend import config class DefaultConfig(config.DefaultConfig): """ Local development config """ # set this for offline mode SERVER_NAME = None SECRET_KEY = None class DevConfig(config.DevConfig, DefaultConfig): """ Local development config """ pass class TestingConfig(config.TestingConfig, DefaultConfig): """ Local testing config """
Python
0.000002
@@ -378,14 +378,23 @@ fig %22%22%22%0A + pass%0A %0A%0A%0A%0A%0A%0A
5d5f0ff3a9514e723497b7238f1c24a8baaf85e8
Specifying the number of places past the decimal point
detector.py
detector.py
import os, math, random, sys from ngram import Ngram class Detector: def __init__(self, test_docs_dir="./test"): self.test_docs_dir = test_docs_dir self.files = [d for d in os.listdir(test_docs_dir) if os.path.isfile(os.path.join(test_docs_dir, d)) and d[0] != "." ] self.ngrams = [] # all ngrams self.ngrams_to_objs = {} # all ngrams self.docs_to_ngrams = {} # maps filenames to Ngram objects self.p = 24107.0 self.pairs_of_randoms = [] self.generate_random_pairs_of_numbers() print "Creating 3-grams for each document..." self.create_3grams() print "3-gram generation is complete." self.sketches = {} # maps filename to sketch print "Calculating sketches for each document..." self.calculate_sketches() print "Sketch calculation is complete." # cleanup self.ngrams = None self.ngrams_to_objs = None self.docs_to_ngrams = None def calculate_sketches(self): p = self.p filenames = self.docs_to_ngrams.keys() completed = [] for j in filenames: with open('./ngrams/'+os.path.basename(j), 'w') as f: f.write(self.docs_to_ngrams[j].__str__()) if j in completed: raise Exception sketch = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] for s in xrange(25): f_min = sys.float_info.max a_s = self.pairs_of_randoms[s][0] b_s = self.pairs_of_randoms[s][1] for obj in self.docs_to_ngrams[j]: fsx = (a_s*float(obj.ID) + b_s) % p if fsx < f_min: f_min = fsx sketch[s] = f_min self.sketches[j] = sketch completed.append(j) def generate_random_pairs_of_numbers(self): for i in xrange(25): a = random.randint(1, self.p-1) b = random.randint(0, self.p-1) self.pairs_of_randoms.append((a,b)) def filename(self, filename): return "%s/%s" % (self.test_docs_dir, filename) def create_3grams(self): for file in self.files: filename = self.filename(file) with open(filename) as f: ngrams_for_file = self.ngram(f.read().strip().strip(",.!|&-_()[]<>{}/\"'").strip(), 3, filename) self.docs_to_ngrams[filename] = ngrams_for_file def ngram(self, file_contents, length=3, filename=None): tokens = file_contents.split(" ") num_tokens = len(tokens) ngrams = [] for t in xrange(num_tokens): if num_tokens <= t+length-1: break # n-2 ngrams! ngram_tokens = tokens[t:t+length] ngram_value = "-".join(ngram_tokens) ngram = Ngram(len(self.ngrams)+1, ngram_value, filename) if ngram_value in self.ngrams: self.ngrams_to_objs[ngram_value].add_containing_file(filename) else: self.ngrams_to_objs[ngram_value] = ngram self.ngrams.append(ngram_value) ngrams.append(ngram) return ngrams def jaccard(self, k): return (k/25.0) def get_jaccard(self, file1, file2): # get num of same sketch values k = 0.0 for index in xrange(25): #print "%f == %f? @ index %d" % (self.sketches[file1][index], self.sketches[file2][index], index) if self.sketches[file1][index] == self.sketches[file2][index]: k += 1 return self.jaccard(k) def check_for_duplicates(self): matches = [] for indx1, f1 in enumerate(self.files): file1 = self.filename(f1) for indx2, f2 in enumerate(self.files[indx1+1:]): file2 = self.filename(f2) jaccard = self.get_jaccard(file1, file2) if jaccard >= 0.5: matches.append("%s and %s are near-duplicates, with Jaccard value=%f." % (file1, file2, jaccard)) return "\n".join(matches)
Python
0.949914
@@ -4100,18 +4100,24 @@ alue -=%25 + of %250.3 f.%22 %25 (f ile1 @@ -4112,26 +4112,20 @@ f.%22 %25 (f -ile 1, f -ile 2, jacca
2e2eb70663efa66532ee61dd9b3923751865cf06
Add ElasticQuery.timeout(timeout)
elasticquery/query.py
elasticquery/query.py
# ElasticQuery # File: query.py # Desc: ElasticQuery itself import json from .exception import NoESClient, NoIndexName, NoDocType # , InvalidField class ElasticQuery(object): '''A class for building ES queries''' __es__ = None __index__ = None __doc_type__ = None __mapping__ = None def __init__(self, es=None, index=None, doc_type=None, mapping=None): self.__es__ = es self.__index__ = index self.__doc_type__ = doc_type self.__mapping__ = mapping # An empty query self.structure = {} def _ensure_bool(self, query_type, name): '''Ensure we have a bool filter/quer struct prepared''' if not self.structure.get(query_type): self.structure[query_type] = { 'bool': { 'must': [], 'should': [], 'must_not': [] } } def _ensure_fields(self, fields): '''When we have a mapping, ensure the fields we use are valid''' pass # if self.__mapping__ is not None: # mapping_fields = self.__mapping__.keys() # fields = fields if isinstance(fields, list) else [fields] # for field in fields: # if field not in mapping_fields: # raise InvalidField(field) def set(self, key, value): '''Set an arbitrary attribute on this query''' self.structure[key] = value return self def offset(self, offset): '''Offset the query results.''' self.structure['from'] = offset return self def limit(self, size): '''Limit the number of query results.''' self.structure['size'] = size return self def sort(self, field, order=False): '''Sort the query results''' if 'sort' not in self.structure: self.structure['sort'] = [] if not order: self.structure['sort'].append(field) else: self.structure['sort'].append({ field: { 'order': order } }) return self def fields(self, fields): '''Limit the fields returned by this query''' self._ensure_fields(fields) self.structure['_source'] = fields return self def must(self, *must): '''Add one or more conditions which must be met by this query''' for (query_type, fields, object) in must: self._ensure_fields(fields) self._ensure_bool(query_type, 'must') self.structure[query_type]['bool']['must'].append(object) return self def should(self, *should): '''Add one or more conditions which should be met by this query''' for (query_type, fields, object) in should: self._ensure_fields(fields) self._ensure_bool(query_type, 'should') self.structure[query_type]['bool']['should'].append(object) return self def must_not(self, *must_not): '''Add one or more conditions which must not be met by this query''' for (query_type, fields, object) in must_not: self._ensure_fields(fields) self._ensure_bool(query_type, 'must_not') self.structure[query_type]['bool']['must_not'].append(object) return self def aggregate(self, *aggregates): '''Add a aggregations to the query.''' if 'aggregations' not in self.structure: self.structure['aggregations'] = {} [ self.structure['aggregations'].update(aggregate) for aggregate in aggregates ] return self def dict(self): '''Return the current query representation.''' return self.structure def json(self, indent=None): '''Return the current query as a JSON document.''' return json.dumps( self.dict(), indent=indent ) def get(self): '''Execute the current query (requires __es__, __index__ & __doc_type__)''' if self.__es__ is None: raise NoESClient() if self.__index__ is None: raise NoIndexName() if self.__doc_type__ is None: raise NoDocType() return self.__es__.search( index=self.__index__, doc_type=self.__doc_type__, body=self.structure )
Python
0.000001
@@ -1748,32 +1748,171 @@ return self%0A%0A + def timeout(self, timeout):%0A '''Set a timeout on the query.'''%0A self.structure%5B'timeout'%5D = timeout%0A return self%0A%0A def sort(sel
4c4b1e6a4bde5edb9e11942245a21437e73fe6df
fix link creation
archivebox/index/sql.py
archivebox/index/sql.py
__package__ = 'archivebox.index' from io import StringIO from typing import List, Tuple, Iterator from .schema import Link from ..util import enforce_types from ..config import setup_django, OUTPUT_DIR ### Main Links Index @enforce_types def parse_sql_main_index(out_dir: str=OUTPUT_DIR) -> Iterator[Link]: setup_django(out_dir, check_db=True) from core.models import Snapshot return ( Link.from_json(page.as_json(*Snapshot.keys)) for page in Snapshot.objects.all() ) @enforce_types def write_sql_main_index(links: List[Link], out_dir: str=OUTPUT_DIR) -> None: setup_django(out_dir, check_db=True) from core.models import Snapshot from django.db import transaction with transaction.atomic(): for link in links: info = {k: v for k, v in link._asdict().items() if k in Snapshot.keys} Snapshot.objects.update_or_create(url=url, defaults=info) @enforce_types def write_sql_link_details(link: Link, out_dir: str=OUTPUT_DIR) -> None: setup_django(out_dir, check_db=True) from core.models import Snapshot from django.db import transaction with transaction.atomic(): snap = Snapshot.objects.get(url=link.url, timestamp=link.timestamp) snap.title = link.title snap.tags = link.tags snap.save() @enforce_types def list_migrations(out_dir: str=OUTPUT_DIR) -> List[Tuple[bool, str]]: setup_django(out_dir, check_db=False) from django.core.management import call_command out = StringIO() call_command("showmigrations", list=True, stdout=out) out.seek(0) migrations = [] for line in out.readlines(): if line.strip() and ']' in line: status_str, name_str = line.strip().split(']', 1) is_applied = 'X' in status_str migration_name = name_str.strip() migrations.append((is_applied, migration_name)) return migrations @enforce_types def apply_migrations(out_dir: str=OUTPUT_DIR) -> List[str]: setup_django(out_dir, check_db=False) from django.core.management import call_command null, out = StringIO(), StringIO() call_command("makemigrations", interactive=False, stdout=null) call_command("migrate", interactive=False, stdout=out) out.seek(0) return [line.strip() for line in out.readlines() if line.strip()] @enforce_types def get_admins(out_dir: str=OUTPUT_DIR) -> List[str]: setup_django(out_dir, check_db=False) from django.contrib.auth.models import User return User.objects.filter(is_superuser=True)
Python
0
@@ -901,16 +901,21 @@ ate(url= +link. url, def
7c7b7316843c9ae20073878485e6d68a6813a756
remove useless rack checking code
bvalosek_MF_Twister/TwisterControlSurface.py
bvalosek_MF_Twister/TwisterControlSurface.py
from _Framework.ButtonMatrixElement import ButtonMatrixElement from _Framework.ControlSurface import ControlSurface from _Framework.InputControlElement import MIDI_CC_TYPE from _Framework.Layer import Layer from _Framework.MixerComponent import MixerComponent from _Framework.ModesComponent import LayerMode from _Framework.SubjectSlot import subject_slot, subject_slot_group from consts import * from Colors import * from bvalosek_common.MetronomeComponent import MetronomeComponent from BackgroundComponent import BackgroundComponent from ButtonElementEx import ButtonElementEx from DeviceComponentEx import DeviceComponentEx from MixerComponentEx import MixerComponentEx, ChannelStripComponentEx from ModesComponentEx import ModesComponentEx from SendsComponent import SendsComponent from SkinDefault import make_default_skin from SliderElementEx import SliderElementEx def to_matrix(buttons): return ButtonMatrixElement(rows = [buttons]) class TwisterControlSurface(ControlSurface): def __init__(self, c_instance): ControlSurface.__init__(self, c_instance) self._handle_track_change.subject = self.song().view with self.component_guard(): self._skin = make_default_skin() self._setup_controls() self._setup_background() self._setup_device() self._setup_mixer() self._setup_modes() self._handle_track_change() @subject_slot('selected_track') def _handle_track_change(self): track = self.song().view.selected_track self._select_device_on_track(track) # only change sends if the device isnt locked -- keeps strip in sync # with the locked device if not self._device._locked_to_device: self._strip.set_track(track) def _select_device_on_track(self, track): if not track or not len(track.devices): return device = track.devices[0] # if its a drum rack without macros, try to set the device to chain # instead in order to have the focus be on the selected / last played # pad's chain rack = device.can_have_drum_pads if rack and not device.has_macro_mappings and len(device.chains) > 0: chain = device.view.selected_chain or device.chains[0] if len(chain.devices): device = chain.devices[0] self._device.set_device(device) def _setup_background(self): background = BackgroundComponent() background.layer = Layer(priority = -100, knobs = self._knobs, lights = self._buttons) def _setup_device(self): self._device = DeviceComponentEx() self.set_device_component(self._device) def _setup_mixer(self): self._strip = ChannelStripComponentEx() self._mixer = MixerComponentEx(num_returns = 8) def _setup_controls(self): knobs = [ [ self._make_knob(row, col) for col in range(4) ] for row in range(4) ] buttons = [ [ self._make_button(row, col) for col in range(4) ] for row in range(4) ] self._knobs = ButtonMatrixElement(knobs) self._buttons = ButtonMatrixElement(buttons) def _make_knob(self, row, col): return SliderElementEx( msg_type = MIDI_CC_TYPE, channel = KNOB_CHANNEL, identifier = row * 4 + col) def _make_button(self, row, col): return ButtonElementEx( msg_type = MIDI_CC_TYPE, channel = BUTTON_CHANNEL, identifier = row * 4 + col, is_momentary = True, skin = self._skin) def _setup_modes(self): self._modes = ModesComponentEx() self._setup_main_mode() self._setup_sixteen_param_mode() self._setup_mixer_mode() self._modes.layer = Layer(priority = 10, main_mode_button = self._buttons.get_button(0, 0), sixteen_param_mode_button = self._buttons.get_button(1, 0), mixer_mode_button = self._buttons.get_button(2, 0)) self._modes.selected_mode = 'main_mode' def _setup_main_mode(self): strip_layer = Layer( volume_control = self._knobs.get_button(3, 0), arm_button = self._buttons.get_button(3, 0), send_controls = self._knobs.submatrix[:, 1]) mixer_layer = Layer( prehear_volume_control = self._knobs.get_button(0, 0), return_track_select_buttons = self._buttons.submatrix[:, 1]) device_layer = Layer( on_off_button = self._buttons.get_button(3, 3), parameter_controls = self._knobs.submatrix[:, 2:], lock_button = self._buttons.get_button(2, 3)) metronome = MetronomeComponent() metronome_layer = Layer(lights = self._buttons.submatrix[:, 2]) device_bg = BackgroundComponent(color = 'Device.Background') device_bg_layer = Layer(priority = -10, lights = self._buttons.submatrix[:, 2:]) self._modes.add_mode('main_mode', [ LayerMode(self._strip, strip_layer), LayerMode(self._mixer, mixer_layer), LayerMode(metronome, metronome_layer), LayerMode(device_bg, device_bg_layer), LayerMode(self._device, device_layer) ]) def _setup_sixteen_param_mode(self): device_layer = Layer( parameter_controls = self._knobs, on_off_button = self._buttons.get_button(3, 3), bank_buttons = self._buttons.submatrix[:, 2], lock_button = self._buttons.get_button(2, 3)) device_bg = BackgroundComponent(color = 'Device.Background') device_bg_layer = Layer(priority = -10, lights = self._buttons) self._modes.add_mode('sixteen_param_mode', [ LayerMode(device_bg, device_bg_layer), LayerMode(self._device, device_layer) ]) def _setup_mixer_mode(self): strip_layer = Layer( send_controls = self._knobs.submatrix[:, :2]) mixer_layer = Layer( return_track_select_buttons = self._buttons.submatrix[:, :2]) strip_mode = LayerMode(self._strip, strip_layer) mixer_mode = LayerMode(self._mixer, mixer_layer) self._modes.add_mode('mixer_mode', [ strip_mode, mixer_mode ])
Python
0
@@ -1945,448 +1945,8 @@ s%5B0%5D -%0A%0A # if its a drum rack without macros, try to set the device to chain%0A # instead in order to have the focus be on the selected / last played%0A # pad's chain%0A rack = device.can_have_drum_pads%0A if rack and not device.has_macro_mappings and len(device.chains) %3E 0:%0A chain = device.view.selected_chain or device.chains%5B0%5D%0A if len(chain.devices):%0A device = chain.devices%5B0%5D %0A
1f75ecb06a8bb463648e3b7689de043f0bab0d16
Remove unneeded users.json fixtures from readout tests.
apps/dashboards/tests/test_readouts.py
apps/dashboards/tests/test_readouts.py
from datetime import datetime from functools import partial from nose.tools import eq_ from dashboards.readouts import (UnreviewedReadout, TemplateTranslationsReadout, MostVisitedTranslationsReadout) from sumo.tests import TestCase from wiki.models import MAJOR_SIGNIFICANCE, MEDIUM_SIGNIFICANCE from wiki.tests import revision, translated_revision, document NON_DEFAULT_LOCALE = 'de' translated_revision = partial(translated_revision, locale=NON_DEFAULT_LOCALE) class MockRequest(object): locale = NON_DEFAULT_LOCALE class UnreviewedChangesTests(TestCase): """Tests for the Unreviewed Changes readout I'm not trying to cover every slice of the Venn diagram--just the tricky bits. """ fixtures = ['users.json'] @staticmethod def titles(): """Return the titles shown by the Unreviewed Changes readout.""" return [row['title'] for row in UnreviewedReadout(MockRequest()).rows()] def test_unrevieweds_after_current(self): """Show the unreviewed revisions with later creation dates than the current""" current = translated_revision(is_approved=True, save=True, created=datetime(2000, 1, 1)) unreviewed = revision(document=current.document, save=True, created=datetime(2000, 2, 1)) assert unreviewed.document.title in self.titles() def test_current_revision_null(self): """Show all unreviewed revisions if none have been approved yet.""" unreviewed = translated_revision(save=True) assert unreviewed.document.title in self.titles() def test_rejected_newer_than_current(self): """If there are reviewed but unapproved (i.e. rejected) revisions newer than the current_revision, don't show them.""" rejected = translated_revision(reviewed=datetime.now(), save=True) assert rejected.document.title not in self.titles() class MostVisitedTranslationsTests(TestCase): """Tests for the Most Visited Translations readout This is an especially tricky readout, since it effectively implements a superset of all other readouts' status discriminators. """ fixtures = ['users.json'] @staticmethod def row(): """Return first row shown by the Most Visited Translations readout.""" return MostVisitedTranslationsReadout(MockRequest()).rows()[0] def test_unreviewed(self): """Assert articles in need of review are labeled as such.""" unreviewed = translated_revision(is_approved=False, save=True) row = self.row() eq_(row['title'], unreviewed.document.title) eq_(row['status'], 'Review Needed') def test_unlocalizable(self): """Unlocalizable docs shouldn't show up in the list.""" revision( document=document(is_localizable=False, save=True), is_approved=True, save=True) self.assertRaises(IndexError, self.row) def _test_significance(self, significance, status): """Assert that a translation out of date due to a `significance`-level update to the original article shows status `status`.""" translation = translated_revision(is_approved=True, save=True) revision(document=translation.document.parent, is_approved=True, significance=significance, save=True) row = self.row() eq_(row['title'], translation.document.title) eq_(row['status'], status) def test_out_of_date(self): """Assert out-of-date translations are labeled as such.""" self._test_significance(MAJOR_SIGNIFICANCE, 'Out of Date') def test_update_needed(self): """Assert update-needed translations are labeled as such.""" self._test_significance(MEDIUM_SIGNIFICANCE, 'Update Needed') def test_untranslated(self): """Assert untranslated documents are labeled as such.""" untranslated = revision(save=True) row = self.row() eq_(row['title'], untranslated.document.title) eq_(unicode(row['status']), 'Translation Needed') def test_up_to_date(self): """Show up-to-date translations have no status, just a happy class.""" translation = translated_revision(is_approved=True, save=True) row = self.row() eq_(row['title'], translation.document.title) eq_(unicode(row['status']), '') eq_(row['status_class'], 'ok') class TemplateTranslationsReadoutTests(TestCase): """Tests for the Template Translations readout""" fixtures = ['users.json'] @staticmethod def row(): """Return first row shown by the Template Translations readout.""" return TemplateTranslationsReadout(MockRequest()).rows()[0] def test_not_template(self): """Documents that are not templates shouldn't show up in the list.""" translated_revision(is_approved=False, save=True) self.assertRaises(IndexError, self.row) def test_untranslated(self): """Assert untranslated templates are labeled as such.""" d = document(title='Template:test', save=True) untranslated = revision(is_approved=True, document=d, save=True) row = self.row() eq_(row['title'], untranslated.document.title) eq_(unicode(row['status']), 'Translation Needed')
Python
0
@@ -778,39 +778,8 @@ %22%22%22%0A - fixtures = %5B'users.json'%5D%0A%0A @@ -2243,39 +2243,8 @@ %22%22%22%0A - fixtures = %5B'users.json'%5D%0A%0A @@ -4599,38 +4599,8 @@ t%22%22%22 -%0A fixtures = %5B'users.json'%5D %0A%0A
01c1466487a9802f4c3d3c21390587ae6d3a7122
Fix bug in tasks.sync_notes
everbean/tasks.py
everbean/tasks.py
# coding=utf-8 from __future__ import ( with_statement, absolute_import, unicode_literals ) import time from datetime import datetime from flask import current_app as app from flask.ext.mail import Message from everbean.core import mail, db, celery from everbean.models import User, Note from everbean.ext.douban import ( get_douban_client, import_annotations, import_books ) from everbean.ext.evernote import ( get_evernote_client, get_notebook, find_note, make_note, create_or_update_note ) @celery.task def send_mail(messages): if not (app.config['MAIL_SERVER'] and app.config['MAIL_USERNAME'] and app.config['MAIL_PASSWORD']): return False if isinstance(messages, Message): messages = [messages, ] with mail.connect() as conn: for msg in messages: conn.send(msg) @celery.task def refresh_douban_access_token(user): client = get_douban_client() client.refresh_token(user.douban_refresh_token) me = client.user.me if 'id' in me: # update access token and other infomations user.douban_access_token = client.token_code user.douban_refresh_token = client.refresh_token_code user.douban_expires_at = client.access_token.expires_at user.douban_name = me['name'] user.avatar = me['avatar'] user.large_avatar = me['avatar'].replace('icon/u', 'icon/ul') user.signature = me['signature'] user.desc = me['desc'] db.session.add(user) db.session.commit() else: app.logger.warning('Refresh token for user %s error.' % user.douban_uid) @celery.task def sync_books(user): """ Sync reading status books """ import_books(user) @celery.task def import_douban_annotations(user): import_annotations(user) @celery.task def sync_book_notes(user_id, book, notes=None): user = User.query.get(user_id) if not user or not user.evernote_access_token: return if notes is None: notes = Note.query.filter_by( user_id=user.id, book_id=book.id ).order_by(Note.created.asc()).all() if not notes: return # generate evernote format note token = user.evernote_access_token en = get_evernote_client(user.is_i18n, token) note_store = en.get_note_store() notebook = get_notebook( note_store, user.evernote_notebook, app.config['EVERNOTE_NOTEBOOK_NAME'] ) if not user.evernote_notebook: user.evernote_notebook = notebook.guid db.session.add(user) db.session.commit() note = None the_book = user.user_books.filter_by(book_id=book.id).first() if not the_book: return if the_book.evernote_guid: note = find_note(note_store, the_book.evernote_guid) note = make_note( book, notes, note, notebook, template=user.template ) if note.guid: # note.updated is milliseconds, should convert it to seconds updated = note.updated / 1000 book_updated = time.mktime(the_book.updated.timetuple()) if updated >= book_updated: return # sync to evernote note = create_or_update_note(note_store, note) # sync guid to database if note and hasattr(note, 'guid'): the_book.evernote_guid = note.guid the_book.updated = datetime.now() db.session.add(the_book) db.session.add(user) db.session.commit() def sync_notes(user): def _sync_notes_of_book(book): notes = Note.query.filter_by( user_id=user.id, book_id=book.id ).order_by(Note.created.asc()).all() if notes: sync_book_notes.delay(user.id, book, notes) if not user.enable_sync: return books = user.books.all() # now we can sync notes to evernote map(_sync_notes_of_book, books)
Python
0.000021
@@ -3868,22 +3868,16 @@ er.books -.all() %0A # n
efdf4a4898cc3b5217ac5e45e75a74e19eee95d4
bump version
evojax/version.py
evojax/version.py
__version__ = "0.1.0-13"
Python
0
@@ -19,7 +19,7 @@ .0-1 -3 +4 %22%0A
155c953f7bf8590b4a11547369bee29baa5ea5f6
Fix typo.
isaactest/tests/numeric_q_all_correct.py
isaactest/tests/numeric_q_all_correct.py
import time from ..utils.log import log, INFO, ERROR, PASS from ..utils.isaac import answer_numeric_q from ..utils.i_selenium import assert_tab, image_div from ..utils.i_selenium import wait_for_xpath_element from ..tests import TestWithDependency from selenium.common.exceptions import TimeoutException, NoSuchElementException __all__ = ["numeric_q_all_correct"] ##### # Test : Numeric Questions Correct Answers ##### @TestWithDependency("NUMERIC_Q_ALL_CORRECT", ["NUMERIC_Q_UNITS_SELECT"]) def numeric_q_all_correct(driver, ISAAC_WEB, WAIT_DUR): """Test is numeric questions can be answered correctly. - 'driver' should be a Selenium WebDriver. - 'ISAAC_WEB' is the string URL of the Isaac website to be tested. - 'WAIT_DUR' is the time in seconds to wait for JavaScript to run/load. """ assert_tab(driver, ISAAC_WEB) time.sleep(WAIT_DUR) try: num_question = driver.find_element_by_xpath("//div[@ng-switch-when='isaacNumericQuestion']") except NoSuchElementException: log(ERROR, "Can't find the numeric question; can't continue!") return False log(INFO, "Attempt to enter correct answer.") if not answer_numeric_q(num_question, "2.01", "\units{ m\,s^{-1} }", wait_dur=WAIT_DUR): log(ERROR, "Couldn't answer Numeric Question; can't continue!") return False time.sleep(WAIT_DUR) try: wait_for_xpath_element(driver, "//div[@ng-switch-when='isaacNumericQuestion']//h1[text()='Correct!']") log(INFO, "A 'Correct!' message was displayed as expected.") wait_for_xpath_element(driver, "(//div[@ng-switch-when='isaacNumericQuestion']//p[text()='This is a correct choice.'])[2]") log(INFO, "The editor entered explanation text was correctly shown.") wait_for_xpath_element(driver, "//div[@ng-switch-when='isaacNumericQuestion']//strong[text()='Well done!']") log(INFO, "The 'Well done!' message was correctly shown.") time.sleep(WAIT_DUR) log(PASS, "Numeric Question 'correct value, correct unit' behavior as expected.") return True except TimeoutException: image_div(driver, "ERROR_numeric_q_all_correct") log(ERROR, "The messages shown for a correct answer were not all displayed; see 'ERROR_numeric_q_all_correct.png'!") return False
Python
0.001604
@@ -557,17 +557,17 @@ %22%22Test i -s +f numeric
aabc4bc60f0c8b6db21453dd6fad387773b18e55
Fix a print
openquake/commands/__main__.py
openquake/commands/__main__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (C) 2015-2018 GEM Foundation # # OpenQuake is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with OpenQuake. If not, see <http://www.gnu.org/licenses/>. import os import sys import importlib from openquake.baselib import sap from openquake.commonlib import __version__ from openquake import commands PY_VER = sys.version_info[:3] # check for Python version if PY_VER < (3, 5): sys.exit('Python 3.5+ is required, you are using %s', sys.executable) elif PY_VER < (3, 6): print('Warning: Python %s.%s.%s is deprecated. ' 'Please upgrade to Python 3.6+' % PY_VER) # force cluster users to use `oq engine` so that we have centralized logs if os.environ['OQ_DISTRIBUTE'] == 'celery' and 'run' in sys.argv: print('You are on a cluster and you are using oq run?? ' 'Use oq engine --run instead!') def oq(): modnames = ['openquake.commands.%s' % mod[:-3] for mod in os.listdir(commands.__path__[0]) if mod.endswith('.py') and not mod.startswith('_')] for modname in modnames: importlib.import_module(modname) parser = sap.compose(sap.Script.registry.values(), prog='oq', version=__version__) parser.callfunc() if __name__ == '__main__': oq()
Python
0.999999
@@ -1108,16 +1108,27 @@ print(' +Deprecation Warning:
be06ba33aff25995c9323e251003c9bea09aa9de
update UI
MellPlayer/ui.py
MellPlayer/ui.py
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Netease Music UI Created on 2017-02-21 @author: Mellcap ''' import os SONG_CATEGORIES = ( '流行', '摇滚', '民谣', '说唱', '轻音乐', '乡村', '古典', 'R&B/Soul', '电子', '另类/独立',\ '学习', '工作', '午休',\ '华语', '欧美', '日语', '韩语', '粤语', '小语种',\ '怀旧', '清新', '浪漫', '性感', '治愈', '放松', '兴奋', '快乐', '安静', '思念' ) # 所有颜色 https://upload.wikimedia.org/wikipedia/commons/1/15/Xterm_256color_chart.svg FORE_COLOR = { # 前景色 'default' : 249, # 默认 'blue' : 39, 'green' : 34, 'gray' : 239, 'red' : 196, 'pink' : 201 } BLANK_CONSTANT = 3 MAX_LINES = len(SONG_CATEGORIES) ALL_LINES = MAX_LINES + BLANK_CONSTANT TERMINAL_SIZE = os.get_terminal_size() class UI(object): def __init__(self): self.category_lines = SONG_CATEGORIES self.mark_index = 0 self.play_index= 0 self.play_info = '' self.top_index = 0 self.screen_height = TERMINAL_SIZE.lines self.screen_width = TERMINAL_SIZE.columns self.title = 'MellPlayer' def display(self): ''' 说明:多线程终端输出有问题,在每行结尾加\r ''' display_lines = ['\r'] display_title = '\n%s\033[1m%s\r' % (' '*5, self.gen_color(self.title, 'blue')) display_lines.append(display_title) top_index = self.top_index bottom_index = (self.screen_height - BLANK_CONSTANT) + top_index for index, category in enumerate(self.category_lines[top_index: bottom_index]): # mark_index is_markline = True if (index + self.top_index) == self.mark_index else False category = self.gen_category(category, is_markline) # play_index play_info = '' is_playline = True if (index + self.top_index) == self.play_index else False if is_playline: play_info = self.gen_playline() complete_line = '%s%s%s\r' % (category, ' '*10, play_info) display_lines.append(complete_line) if ALL_LINES < self.screen_height: # fill_blanks display_lines = self.fill_blanks(display_lines) print('\n'.join(display_lines) + '\r') def next_line(self): if self.mark_index < (MAX_LINES - 1): self.mark_index += 1 bottom_index = (self.screen_height - BLANK_CONSTANT) + self.top_index if self.mark_index > (bottom_index - 1): self.top_index += 1 self.display() def prev_line(self): if self.mark_index > 0: self.mark_index -= 1 if self.mark_index < self.top_index: self.top_index -= 1 self.display() def update_play_index(self): self.play_index = self.mark_index self.display() def update_play_info(self, play_info): self.play_info = play_info self.display() def gen_category(self, category, is_markline=False): if is_markline: category = self.gen_mark(category) category = self.gen_color(data=category, color='pink') else: # fill 3 blanks category = '%s%s' % (' '*5, category) category = self.gen_color(data=category, color='') return category def gen_mark(self, category): return ' ➣ %s' % category def gen_playline(self): complete_info = [self.gen_color(data=p, color='pink') for p in self.play_info] divider = self.gen_color(data='|', color='') return (' %s ' % divider).join(complete_info) def gen_color(self, data, color): ''' 参考地址:http://blog.csdn.net/gatieme/article/details/45439671 但是目前用不到这么多类型,目前只用前景色 ''' color_code = FORE_COLOR.get(color, 246) # data = "\033[;%s;m%s\033[0m" % (color_code, data) data = "\001\033[38;5;%sm\002%s\001\033[0m\002" % (color_code, data) return data def fill_blanks(self, display_lines): delta_lines = self.screen_height - ALL_LINES display_lines += [' ' for i in range(delta_lines)] return display_lines
Python
0.000001
@@ -1085,20 +1085,292 @@ e = -'MellPlayer' +self._get_title()%0A%0A def _get_title(self):%0A player_name = '%5C033%5B1m%25s' %25 self.gen_color('MellPlayer', 'blue')%0A netease = self.gen_color('%E7%BD%91%E6%98%93%E4%BA%91%E9%9F%B3%E4%B9%90', 'red')%0A divider = self.gen_color(data=r'%5C%5C', color='')%0A return (' %25s ' %25 divider).join(%5Bplayer_name, netease%5D) %0A%0A @@ -1476,16 +1476,16 @@ %5B'%5Cr'%5D%0A + @@ -1505,23 +1505,16 @@ = '%5Cn%25s -%5C033%5B1m %25s%5Cr' %25
df8848beffeb952f8da034c13d22245ce123f576
fix 677: Error on enum type during manage.py migrations
shop/models/fields.py
shop/models/fields.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals import enum import six from django.conf import settings from django.db import models from django.utils.six import python_2_unicode_compatible, string_types from django.utils.translation import ugettext_lazy as _, ugettext postgresql_engine_names = [ 'django.db.backends.postgresql', 'django.db.backends.postgresql_psycopg2', ] if settings.DATABASES['default']['ENGINE'] in postgresql_engine_names: from django.contrib.postgres.fields import JSONField as _JSONField else: from jsonfield.fields import JSONField as _JSONField class JSONField(_JSONField): def __init__(self, *args, **kwargs): kwargs.update({'default': {}}) super(JSONField, self).__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super(JSONField, self).deconstruct() del kwargs['default'] return name, path, args, kwargs class ChoiceEnumMeta(enum.EnumMeta): def __call__(cls, value, *args, **kwargs): if isinstance(value, string_types): try: value = cls.__members__[value] except KeyError: pass # let the super method complain return super(ChoiceEnumMeta, cls).__call__(value, *args, **kwargs) @python_2_unicode_compatible class ChoiceEnum(six.with_metaclass(ChoiceEnumMeta, enum.Enum)): """ Utility class to handle choices in Django model fields """ def __str__(self): return ugettext('.'.join((self.__class__.__name__, self.name))) @classmethod def default(cls): try: return next(iter(cls)) except StopIteration: return None @classmethod def choices(cls): choices = [(c.value, str(c)) for c in cls] return choices class ChoiceEnumField(models.PositiveSmallIntegerField): description = _("Customer recognition state") def __init__(self, *args, **kwargs): self.enum_type = kwargs.pop('enum_type', ChoiceEnum) # fallback is required form migrations if not issubclass(self.enum_type, ChoiceEnum): raise ValueError("enum_type must be a subclass of `ChoiceEnum`.") kwargs.update(choices=self.enum_type.choices()) kwargs.setdefault('default', self.enum_type.default()) super(ChoiceEnumField, self).__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super(ChoiceEnumField, self).deconstruct() if 'choices' in kwargs: del kwargs['choices'] if kwargs['default'] is self.enum_type.default(): del kwargs['default'] elif isinstance(kwargs['default'], self.enum_type): kwargs['default'] = kwargs['default'].value return name, path, args, kwargs def from_db_value(self, value, expression, connection, context): try: return self.enum_type(value) except ValueError: return value def get_prep_value(self, state): if isinstance(state, self.enum_type): return state.value if isinstance(state, int): return state raise ValueError("Value must be of type {}".format(self.enum_type)) def to_python(self, state): return self.enum_type(state) def value_to_string(self, obj): value = getattr(obj, self.name) if not isinstance(value, self.enum_type): raise ValueError("Value must be of type {}".format(self.enum_type)) return value.name
Python
0
@@ -3104,135 +3104,20 @@ -if isinstance(state, int):%0A return state%0A raise ValueError(%22Value must be of type %7B%7D%22.format(self.enum_type)) +return state %0A%0A
c916f4c379fe2b7035da46bc194f010e1795f9ef
Remove some debug.
cool-bot.py
cool-bot.py
#!/usr/bin/env/ python import os import sys import string import socket HOST='0.0.0.0' PORT=6667 NICK='cool-bot' REALNAME='cool bot' CHANNEL='#cool-bot' def connected(fn): def deco(self, *args, **kwargs): try: fn(self, *args, **kwargs) except socket.error, err: ## Removing the socket self._sock = None except Exception, err: if self._sock: self.die() return deco class CoolBot(object): _sock = None _lines = [""] def _sendmsg(self, cmd, *args): print "SENDING: %s" % ("%s %s" % (cmd, ' '.join(args))) self._sock.send("%s %s\n" % (cmd, ' '.join(args))) def _buffermsg(self, data): lines = data.split('\n') self._lines[-1] += lines[0] self._lines.extend(lines[1:]) def _processmsg(self, line): print line if line.startswith('PING'): self.pong() return line = line[1:] if line.find(':') == -1: return speaker, msg = [l.strip() for l in line.split(':', 1)] user, cmd = speaker.split(None, 1) if user == "cool-bot!~bot@127.0.0.1": ## Ignore messages from myself return if cmd.startswith('PRIVMSG'): channel = cmd.split()[1] self._processcmd(user, [channel, ], msg) def _processcmd(self, user, channels, raw): if not raw.startswith('!!'): return msg = raw.lower() try: cmd, msg = msg.split(None, 1) except: cmd = msg chunks = msg.split() targets = filter(lambda s: s.startswith('@'), chunks) for target in targets: channels.append(target[1:]) msg = msg[0:msg.find(target)] + msg[msg.find(target) + len(target) + 1:] channels = list(set(channels)) if 'cool-bot' in channels: channels.remove('cool-bot') if cmd in self._cmds: self._cmds[cmd](channels, msg) elif cmd in ['!!hi', '!!hello', '!!sup']: self.hello(channels, user) elif cmd in ['!!part']: self.leave(channels, msg) elif cmd in ['!!quit', '!!exit', '!!die']: self.die() elif cmd in ['!!join']: self.join([msg]) def __init__(self, host, port, nick, name, channel): self._cmds = { '!!say' : self.say, '!!help' : self.help, '!!die' : self.die, '!!leave' : self.leave, } self.__connect__(host, port) self.__identify__(nick, name) self.join([channel]) def __connect__(self, host, port): sock = socket.socket() sock.connect((host, port)) self._sock = sock @connected def __identify__(self, nick, name): self._sendmsg('NICK', nick) self._sendmsg('USER', 'bot', '0.0.0.0:', name) @connected def process(self): self._buffermsg(self._sock.recv(512)) while len(self._lines) > 1: self._processmsg(self._lines.pop(0)) @connected def join(self, channel): self._sendmsg('JOIN', channel) @connected def hello(self, channels, user): self.say(channels, 'Hi ' + user) @connected def say(self, channels, msg): for channel in channels: self._sendmsg('PRIVMSG', channel, ':', msg) @connected def help(self, channels, msg = ""): self.say(channels, ', '.join(self._cmds.keys())) @connected def leave(self, channels, msg): for channel in channels: self._sendmsg('PART', channel, ':', 'You told me to go.') @connected def join(self, channels = '#hackerscool'): for channel in channels: self._sendmsg('JOIN', channel) @connected def die(self, channel = "", msg = ""): self._sendmsg('QUIT') self._sock.close() self._sock = None @connected def pong(self): self._sendmsg('PONG') def connected(self): return self._sock != None if __name__ == "__main__": bot = CoolBot(HOST, PORT, NICK, REALNAME, CHANNEL) while bot.connected(): bot.process()
Python
0
@@ -556,72 +556,8 @@ s):%0A - print %22SENDING: %25s%22 %25 (%22%25s %25s%22 %25 (cmd, ' '.join(args)))%0A
8b69b3af8b7ed9dcbd00f2b22a47828627dc7c78
fix setup
zgres/tests/test_apt.py
zgres/tests/test_apt.py
import os from unittest import mock import asyncio from subprocess import check_output, check_call import pytest import psycopg2 from . import FakeSleeper def have_root(): destroy = os.environ.get('ZGRES_DESTROY_MACHINE', 'false').lower() if destroy in ['t', 'true']: user = check_output(['whoami']).decode('latin1').strip() if user != 'root': raise Exception('I need to run as root if you want me to destroy the machine! I am {}'.format(repr(user))) return True return False needs_root = pytest.mark.skipif(not have_root(), reason='requires root and ZGRES_DESTROY_MACHINE=true in the environment') @pytest.fixture def cluster(): return ('9.4', 'zgres_test') @pytest.fixture def plugin(cluster): pg_version, cluster_name = cluster app = mock.Mock() app.config = dict( apt=dict( postgresql_version=pg_version, postgresql_cluster_name=cluster_name)) from ..apt import AptPostgresqlPlugin return AptPostgresqlPlugin('zgres#apt', app) def test_config_file(plugin, cluster): assert plugin._config_file(name='pg_hba.conf') == '/etc/postgresql/{}/{}/pg_hba.conf'.format(*cluster) @pytest.mark.asyncio async def test_monitoring(plugin, cluster): with mock.patch('zgres.apt.sleep') as sleep, mock.patch('zgres.apt.call') as subprocess_call: retvals = [ 0, # become healthy 1, # noop 0, 0, # become healthy 6, 5, # become unhelathy after 2 failed checks 0, # become healthy ] subprocess_call.side_effect = retvals sleeper = FakeSleeper(max_loops=len(retvals) + 1) sleep.side_effect = sleeper plugin.start_monitoring() await sleeper.wait() assert plugin.app.mock_calls == [ mock.call.unhealthy(('zgres#apt', 'systemd'), 'Waiting for first systemd check'), mock.call.healthy(('zgres#apt', 'systemd')), mock.call.healthy(('zgres#apt', 'systemd')), mock.call.unhealthy(('zgres#apt', 'systemd'), 'inactive according to systemd'), mock.call.healthy(('zgres#apt', 'systemd')), ] subprocess_call.assert_has_calls( [mock.call(['systemctl', 'is-active', 'postgresql@{}-{}.service'.format(*cluster)]), ] * len(retvals)) @needs_root def test_travis(plugin, cluster): plugin.pg_initdb() plugin.pg_start() conn_info = plugin.pg_connect_info() with psycopg2.connect(**conn_info) as conn: with conn.cursor() as cur: cur.execute('SELECT version(), current_database();') got_ver, got_db = cur.fetchall()[0] assert got_ver == cluster[0] assert got_db == 'PostgreSQL {}'.format(cluster[1]) check_call(['pg_dropcluster'] + list(cluster))
Python
0.000001
@@ -951,16 +951,55 @@ ter_name +,%0A create_superuser=True ))%0A f
2a13f4d21085228a1ef615eec8a3e42110c315d3
Make test pass
benchmarker/modules/problems/cnn2d_toy/pytorch.py
benchmarker/modules/problems/cnn2d_toy/pytorch.py
import torch import torch.nn as nn import torch.nn.functional as F from benchmarker.modules.problems.helpers_torch import Net4Inference, Net4Train class Net(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=2) self.conv2 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=2) # TODO: make sure we check cnt_classes self.dense1 = nn.Linear(1577088, 2) def __call__(self, x): h = x h = self.conv1(h) h = F.relu(h) h = self.conv2(h) h = F.relu(h) h = torch.flatten(h, 1) h = self.dense1(h) return h # TODO: this can be reused as well def get_kernel(net, params, unparsed_args=None): if params["mode"] == "inference": net = Net4Inference(net) else: net = Net4Train(net) return net
Python
0.000225
@@ -60,16 +60,17 @@ al as F%0A +%0A from ben @@ -734,13 +734,8 @@ nel( -net, para @@ -755,24 +755,40 @@ args=None):%0A + net = Net()%0A if param
f6e93144a2471ef22883f4db935a499463a76824
fix sytanx errors
will/0003/into_redis.py
will/0003/into_redis.py
# 第 0003 题: 将 0001 题生成的 200 个激活码(或者优惠券)保存到 Redis 非关系型数据库中。 import random, string, time, math, uuid, redis chars = string.ascii_letters + string.digits def gen1(): key = ''.join(random.sample(chars, 10)) #key2 = ''.join(random.choice(chars) for i in range(10)) return key def gen2(): key = math.modf(time.time())[0] return key def gen3(): return uuid.uuid4() if '__name__' == '__main__': r = redis.Redis(host='localhost', port=6379, db=0) # r.set('name', 'will') # print(r.get('name')) for i in range(200): r.sadd('code', gen1()) r.save()
Python
0.000031
@@ -387,17 +387,16 @@ %0Aif -' __name__ ' == @@ -391,17 +391,16 @@ __name__ -' == '__m
3b564cdd4adbf3185d2f18ec6eedbf4b87057cf5
Add virus fixture to conftest
conftest.py
conftest.py
from virtool.tests.fixtures.db import * from virtool.tests.fixtures.documents import * from virtool.tests.fixtures.client import * from virtool.tests.fixtures.core import * from virtool.tests.fixtures.hmm import * from virtool.tests.fixtures.users import * def pytest_addoption(parser): parser.addoption("--quick", action="store_true", help="Skip slower tests")
Python
0
@@ -250,16 +250,61 @@ mport *%0A +from virtool.tests.fixtures.viruses import *%0A %0A%0Adef py
769783bc46ecd4b8def05e3cf664a3844aa6dec5
Use a regex to parse the version
bazelisk.py
bazelisk.py
#!/usr/bin/env python3 ''' Copyright 2018 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' from distutils.version import LooseVersion import json import platform import os.path import subprocess import shutil import sys import time import urllib.request ONE_HOUR = 1 * 60 * 60 def decide_which_bazel_version_to_use(): # Check in this order: # - env var "USE_BAZEL_VERSION" is set to a specific version. # - env var "USE_NIGHTLY_BAZEL" or "USE_BAZEL_NIGHTLY" is set -> latest # nightly. (TODO) # - env var "USE_CANARY_BAZEL" or "USE_BAZEL_CANARY" is set -> latest # rc. (TODO) # - the file workspace_root/tools/bazel exists -> that version. (TODO) # - workspace_root/.bazelversion exists -> read contents, that version. # - workspace_root/WORKSPACE contains a version -> that version. (TODO) # - fallback: latest release if 'USE_BAZEL_VERSION' in os.environ: return os.environ['USE_BAZEL_VERSION'] try: workspace_root = find_workspace_root() if workspace_root: with open(os.path.join(workspace_root, '.bazelversion'), 'r') as f: return f.read().strip() except FileNotFoundError: pass return 'latest' def find_workspace_root(root=None): if root is None: root = os.getcwd() if os.path.exists(os.path.join(root, 'WORKSPACE')): return root new_root = os.path.dirname(root) return find_workspace_root(new_root) if new_root != root else None def resolve_latest_version(): req = urllib.request.Request( 'https://api.github.com/repos/bazelbuild/bazel/releases', method='GET') res = urllib.request.urlopen(req).read().decode('utf-8') return str( max( LooseVersion(release['tag_name']) for release in json.loads(res) if not release['prerelease'])) def resolve_version_label_to_number(bazelisk_directory, version): if version == 'latest': latest_cache = os.path.join(bazelisk_directory, 'latest_bazel') try: if abs(time.time() - os.path.getmtime(latest_cache)) < ONE_HOUR: with open(latest_cache, 'r') as f: return f.read().strip() except FileNotFoundError: pass latest_version = resolve_latest_version() with open(latest_cache, 'w') as f: f.write(latest_version) return latest_version return version def determine_bazel_filename(version): machine = platform.machine() if machine != 'x86_64': raise Exception('Unsupported machine architecture "{}". ' 'Bazel currently only supports x86_64.'.format(machine)) operating_system = platform.system().lower() if operating_system not in ('linux', 'darwin', 'windows'): raise Exception('Unsupported operating system "{}". ' 'Bazel currently only supports Linux, macOS and Windows.' .format(operating_system)) return 'bazel-{}-{}-{}'.format(version, operating_system, machine) def determine_release_or_rc(version): parts = version.lower().split("rc") if len(parts) == 1: # e.g. ("0.20.0", "release") for 0.20.0 return (version, "release") elif len(parts) == 2: # e.g. ("0.20.0", "rc2") for 0.20.0rc2 return (parts[0], "rc" + parts[1]) else: raise Exception("Invalid version: {}. " "Versions must be in the form <x>.<y>.<z>[rc<rc-number>]" .format(version)) def download_bazel_into_directory(version, directory): bazel_filename = determine_bazel_filename(version) (parsed_version, release_or_rc) = determine_release_or_rc(version) url = "https://releases.bazel.build/{}/{}/{}".format( parsed_version, release_or_rc, bazel_filename) destination_path = os.path.join(directory, bazel_filename) if not os.path.exists(destination_path): sys.stderr.write("Downloading {}...\n".format(url)) with urllib.request.urlopen(url) as response, open(destination_path, 'wb') as out_file: shutil.copyfileobj(response, out_file) os.chmod(destination_path, 0o755) return destination_path def main(argv=None): if argv is None: argv = sys.argv bazelisk_directory = os.path.join(os.path.expanduser('~'), '.bazelisk') os.makedirs(bazelisk_directory, exist_ok=True) bazel_version = decide_which_bazel_version_to_use() bazel_version = resolve_version_label_to_number(bazelisk_directory, bazel_version) bazel_directory = os.path.join(bazelisk_directory, 'bin') os.makedirs(bazel_directory, exist_ok=True) bazel_path = download_bazel_into_directory(bazel_version, bazel_directory) return subprocess.Popen([bazel_path] + argv[1:], close_fds=True).wait() if __name__ == '__main__': sys.exit(main())
Python
0.00001
@@ -684,16 +684,26 @@ os.path%0A +import re%0A import s @@ -3426,712 +3426,390 @@ ine_ -release_or_rc(version):%0A parts = version.lower().split(%22rc%22)%0A if len(parts) == 1:%0A # e.g. (%220.20.0%22, %22release%22) for 0.20.0%0A return (version, %22release%22)%0A elif len(parts) == 2:%0A # e.g. (%220.20.0%22, %22rc2%22) for 0.20.0rc2%0A +url(version, bazel_filename):%0A # Split version into%0A (version, rc) = re.match(r'(%5Cd*%5C.%5Cd*(?:%5C.%5Cd*)?)(rc%5Cd)?', version).groups()%0A - return -(parts%5B0%5D, %22rc%22 + parts%5B1%5D)%0A else:%0A raise Exception(%22Invalid version: %7B%7D. %22%0A %22Versions must be in the form %3Cx%3E.%3Cy%3E.%3Cz%3E%5Brc%3Crc-number%3E%5D%22%0A .format(version))%0A%0A%0Adef download_bazel_into_directory(version, directory):%0A bazel_filename = determine_bazel_filename(version)%0A (parsed_version, release_or_rc) = determine_release_or_rc(version)%0A url = %22https://releases.bazel.build/%7B%7D/%7B%7D/%7B%7D%22.format(%0A parsed_version, release_or_rc +%22https://releases.bazel.build/%7B%7D/%7B%7D/%7B%7D%22.format(%0A version, rc if rc else %22release%22, bazel_filename)%0A%0A%0Adef download_bazel_into_directory(version, directory):%0A bazel_filename = determine_bazel_filename(version)%0A url = determine_url(version , ba
8b2827a87927e60cefb83f273b58d9aba9f9600d
improve error representation for doctests with cython
conftest.py
conftest.py
# -*- coding: utf-8 -*- import os import pytest from sphinx.application import Sphinx import imgui PROJECT_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) sphinx = None def project_path(*paths): return os.path.join(PROJECT_ROOT_DIR, *paths) class SphinxDoc(pytest.File): def __init__(self, path, parent): # yuck! global sphinx if sphinx is None: os.environ['SPHINX_DISABLE_RENDER'] = '1' sphinx = Sphinx( srcdir=project_path('doc', 'source'), confdir=project_path('doc', 'source'), outdir=project_path('doc', 'build', 'vistest'), doctreedir=project_path('doc', 'build', 'doctree'), buildername='vistest', ) super(SphinxDoc, self).__init__(path, parent) def collect(self): # build only specified file sphinx.build(filenames=[self.fspath.relto(project_path())]) return [ DocItem(name, self, code) for (name, code) in sphinx.builder.snippets ] class DocItem(pytest.Item): def __init__(self, name, parent, code): super(DocItem, self).__init__(name, parent) self.code = code def runtest(self): self.exec_snippet(self.code) @staticmethod def exec_snippet(source): code = compile(source, '<str>', 'exec') io = imgui.get_io() io.render_callback = lambda *args, **kwargs: None io.delta_time = 1.0 / 60.0 io.display_size = 300, 300 # setup default font io.fonts.get_tex_data_as_rgba32() io.fonts.add_font_default() io.fonts.texture_id = 0 # set any texture ID to avoid segfaults imgui.new_frame() exec(code, locals(), globals()) imgui.render() def repr_failure(self, excinfo): """ called when self.runtest() raises an exception. """ return "\n".join([ "Documentation test execution for code:", self.code, "---", str(excinfo) ]) def reportinfo(self): return self.fspath, 0, "usecase: %s" % self.name def pytest_collect_file(parent, path): if path.ext == '.rst' and 'source' in path.dirname: return SphinxDoc(path, parent)
Python
0
@@ -26,16 +26,27 @@ mport os +%0Aimport sys %0A%0Aimport @@ -53,16 +53,63 @@ pytest%0A +from inspect import currentframe, getframeinfo%0A from sph @@ -1341,26 +1341,8 @@ e)%0A%0A - @staticmethod%0A @@ -1358,16 +1358,22 @@ snippet( +self, source): @@ -1420,16 +1420,65 @@ 'exec') +%0A frameinfo = getframeinfo(currentframe()) %0A%0A @@ -1835,16 +1835,34 @@ frame()%0A +%0A try:%0A @@ -1905,21 +1905,464 @@ -imgui.render( +except Exception as err:%0A # note: quick and dirty way to annotate sources with error marker%0A lines = source.split('%5Cn')%0A lines.insert(sys.exc_info()%5B2%5D.tb_next.tb_lineno, %22%5E%5E%5E%22)%0A self.code = %22%5Cn%22.join(lines)%0A raise%0A%0A imgui.render()%0A%0A @staticmethod%0A def indent(text, width=4):%0A return %22%5Cn%22.join(%0A %22%3E%22 + %22 %22 * width + line%0A for line in text.split('%5Cn')%0A )%0A%0A @@ -2481,71 +2481,72 @@ rn %22 -%5Cn%22.join(%5B%0A %22Documentation test execution for code:%22 +Visual example fail: %7B%7D%5Cn%5Cn%7B%7D%5Cn%5Cn%7B%7D%22.format(%0A excinfo ,%0A @@ -2547,32 +2547,44 @@ fo,%0A +self.indent( self.code,%0A @@ -2576,16 +2576,17 @@ elf.code +) ,%0A @@ -2595,38 +2595,52 @@ -%22---%22,%0A str(excinfo +excinfo.getrepr(funcargs=True, style='short' )%0A @@ -2645,17 +2645,16 @@ -%5D )%0A%0A d @@ -2710,11 +2710,12 @@ 0, %22 -use +test case @@ -2730,24 +2730,67 @@ self.name%0A%0A%0A +class ExecException(Exception):%0A pass%0A%0A%0A def pytest_c
ed74c8f8cae53ee81d196ec85ede70597fd2d571
Troubleshoot diff_drv.py
diff_drv.py
diff_drv.py
# diff_drv.py # Rover differential drive class # Using Raspberry Pi GPIO software PWM # Using two DRV8838 Single Brushed DC Motor Driver Carriers # www.pololu.com/product/2990 import RPi.GPIO as GPIO class diff_drv: """Rover differential drive class for two DRV8838 Motor Drivers""" # Signal Mnemonics LOW = 0 HIGH = 1 FORWARD = 1 REVERSE = 0 ENABLE = 1 DISABLE = 0 def __init__(self,l_en_pin,l_phase_pin,l_sln_pin,r_en_pin,r_phase_pin,r_sln_pin,freq): # GPIO numbering mode GPIO.setmode(GPIO.BOARD) # Assign arguments to local data self.l_en_pin = l_en_pin # Enable / PWM pin self.l_en_freq = freq # PWM cycle frequency self.l_phase_pin = l_phase_pin # Phase pin self.l_sln_pin = l_sln_pin # SLEEP NOT pin self.r_en_pin = r_en_pin # Enable / PWM pin self.r_en_freq = freq # PWM cycle frequency self.r_phase_pin = r_phase_pin # Phase pin self.r_sln_pin = r_sln_pin # !Sleep pin # Configure pins as outputs GPIO.setup(self.l_en_pin, GPIO.OUT) GPIO.setup(self.l_phase_pin, GPIO.OUT) GPIO.setup(self.l_sln_pin, GPIO.OUT) GPIO.setup(self.r_en_pin, GPIO.OUT) GPIO.setup(self.r_phase_pin, GPIO.OUT) GPIO.setup(self.r_sln_pin, GPIO.OUT) # Define/configure PWM pins self.l_en_pwm = GPIO.PWM(self.l_en_pin, self.l_en_freq) self.r_en_pwm = GPIO.PWM(self.r_en_pin, self.r_en_freq) # Set up default states - forward phase, coast drive mode self.l_phase = "FORWARD" GPIO.output(self.l_phase_pin, diff_drv.FORWARD) self.l_sln = "SLEEP" GPIO.output(self.l_sln_pin, diff_drv.DISABLE) self.r_phase = "FORWARD" GPIO.output(self.r_phase_pin, diff_drv.FORWARD) self.r_sln = "SLEEP" GPIO.output(self.r_sln_pin, diff_drv.DISABLE) self.l_en_pwm_cmd = 0 self.r_en_pwm_cmd = 0 # Start software PWMs at zero duty cycle self.l_en_pwm.start(0) self.r_en_pwm.start(0) # Enable forward and rotational speed control self.fwd_ctrl = diff_drv.ENABLE self.rot_ctrl = diff_drv.ENABLE def drive(self,fwd_dc, rot_dc=0, trim=0): # Mix speed, rotation, and trim # Speed is positive forward # Rotation is positive right per right hand rule # Add trim self.trim = trim rot_dc += self.trim # Handle control modes if self.fwd_ctrl & self.rot_ctrl: left_dc = fwd_dc - rot_dc/2 right_dc = fwd_dc + rot_dc/2 elif self.fwd_ctrl: left_dc = fwd_dc right_dc = fwd_dc elif self.rot_ctrl: left_dc = -rot_dc/2 right_dc = rot_dc/2 else: self.coast() return # Direction/phase discretes if left_dc < 0: self.l_phase = "REVERSE" GPIO.output(self.l_phase_pin, diff_drv.REVERSE) else: self.l_phase = "FORWARD" GPIO.output(self.l_phase_pin, diff_drv.FORWARD) if right_dc < 0: self.r_phase = "REVERSE" GPIO.output(self.r_phase_pin, diff_drv.REVERSE) else: self.r_phase = "FORWARD" GPIO.output(self.r_phase_pin, diff_drv.FORWARD) # Change PWM duty cycle self.l_en_pwm_cmd = min(100,abs(left_dc)) self.l_en_pwm.ChangeDutyCycle(self.l_en_pwm_cmd) self.r_en_pwm_cmd = min(100,abs(right_dc)) self.r_en_pwm.ChangeDutyCycle(self.r_en_pwm_cmd) # Ensure sleep is removed self.l_sln = "ENABLE" GPIO.output(self.l_sln_pin, diff_drv.ENABLE) self.l_sln = "ENABLE" GPIO.output(self.r_sln_pin, diff_drv.ENABLE) def spd_ctrl_enable(self): self.fwd_ctrl_enable = diff_drv.ENABLE def spd_ctrl_disable(self): self.fwd_ctrl_enable = diff_drv.DISABLE def rot_ctrl_enable(self): self.rot_ctrl_enable = diff_drv.ENABLE def rot_ctrl_disable(self): self.rot_ctrl_enable = diff_drv.DISABLE def coast(self): self.drive(0,0,self.trim) self.l_sln = "SLEEP" GPIO.output(self.l_sln_pin, diff_drv.DISABLE) self.r_sln = "SLEEP" GPIO.output(self.r_sln_pin, diff_drv.DISABLE) def stop(self): self.l_en_pwm.stop() self.r_en_pwm.stop() def cleanup(self): GPIO.cleanup()
Python
0.000004
@@ -2300,32 +2300,75 @@ iff_drv.ENABLE%0A%0A + # Zero Trim%0A self.trim = 0%0A%0A def drive(se
f4498880ad8bd1b30f6439e82ac906034927b3df
Add local memcache config
microweb/settings.py
microweb/settings.py
import os PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__)) # Do not change these! Override them in local_settings.py if necessary. DEBUG = False TEMPLATE_DEBUG = False ADMINS = () MANAGERS = ADMINS # Test runner gets unhappy if there's no database defined. DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': '', 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } TIME_ZONE = 'Europe/London' LANGUAGE_CODE = 'en-gb' # For Django sites framework, not used for anything in microcosm. SITE_ID = 1 USE_I18N = True USE_L10N = True USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. MEDIA_URL = '' # Absolute path to the directory static files should be collected to. STATIC_ROOT = '/srv/www/django/microweb/static/' # URL prefix for static files. STATIC_URL = '/static/' STATICFILES_DIRS = () STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = '!ygb2(@l&amp;h1+iy6z6jwiak3e**e3ljb=1fc5#i&amp;1fk#0ve!+!&amp;' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.core.context_processors.request', 'django.core.context_processors.static', ) MIDDLEWARE_CLASSES = ( # Note: if using messages, enable the sessions middleware too 'django.middleware.common.CommonMiddleware', #'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', #'django.contrib.auth.middleware.AuthenticationMiddleware', #'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', # convenience for request context like site, user account, etc. 'microcosm.middleware.context.ContextMiddleware', # time all requests and report to riemann 'microcosm.middleware.timing.TimingMiddleware', # push exceptions to riemann 'microcosm.middleware.exception.ExceptionMiddleware', ) ROOT_URLCONF = 'microweb.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'microweb.wsgi.application' TEMPLATE_DIRS = () INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'microcosm', 'gunicorn', ) LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'handlers': { 'null': { 'level': 'DEBUG', 'class': 'django.utils.log.NullHandler', }, 'console':{ 'level': 'ERROR', 'class': 'logging.StreamHandler', 'formatter': 'simple', }, 'file':{ 'level': 'INFO', 'class': 'logging.FileHandler', 'formatter': 'verbose', 'filename' : os.path.join(PROJECT_ROOT, 'application.log'), }, 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler', } }, 'loggers': { 'django': { 'handlers': ['console','file'], 'propagate': True, 'level': 'INFO', }, 'django.request': { 'handlers': ['console'], 'level': 'ERROR', 'propagate': True, }, 'microcosm.views': { 'handlers': ['console','file'], 'level': 'ERROR', 'propagate' : True, }, 'microcosm.middleware': { 'handlers': ['console','file'], 'level': 'ERROR', 'propagate' : True, } } } # Populate the settings below in local_settings.py (see the README for example values). CLIENT_ID = '' CLIENT_SECRET = '' API_SCHEME = '' API_DOMAIN_NAME = '' RIEMANN_ENABLED = False RIEMANN_HOST = '' PAGE_SIZE = 25 # Clobber any settings with those defined in local_settings.py try: from local_settings import * except ImportError: pass if API_SCHEME == '' or API_DOMAIN_NAME == '' or API_PATH == '' or API_VERSION == '': raise AssertionError('Please define API settings in local_settings.py')
Python
0
@@ -4558,16 +4558,66 @@ OST = '' +%0AMEMCACHE_HOST = '127.0.0.1'%0AMEMCACHE_PORT = 11211 %0A%0APAGE_S
900de7c14607fbe2936fa682d03747916337f075
Fix the reactor_pytest fixture.
conftest.py
conftest.py
from pathlib import Path import pytest def _py_files(folder): return (str(p) for p in Path(folder).rglob('*.py')) collect_ignore = [ # not a test, but looks like a test "scrapy/utils/testsite.py", # contains scripts to be run by tests/test_crawler.py::CrawlerProcessSubprocess *_py_files("tests/CrawlerProcess") ] for line in open('tests/ignores.txt'): file_path = line.strip() if file_path and file_path[0] != '#': collect_ignore.append(file_path) @pytest.fixture() def chdir(tmpdir): """Change to pytest-provided temporary directory""" tmpdir.chdir() def pytest_collection_modifyitems(session, config, items): # Avoid executing tests when executing `--flake8` flag (pytest-flake8) try: from pytest_flake8 import Flake8Item if config.getoption('--flake8'): items[:] = [item for item in items if isinstance(item, Flake8Item)] except ImportError: pass @pytest.fixture() def reactor_pytest(request): request.cls.reactor_pytest = request.config.getoption("--reactor") return request.cls.reactor_pytest @pytest.fixture(autouse=True) def only_asyncio(request, reactor_pytest): if request.node.get_closest_marker('only_asyncio') and reactor_pytest != 'asyncio': pytest.skip('This test is only run with --reactor-asyncio')
Python
0
@@ -960,24 +960,37 @@ est.fixture( +scope='class' )%0Adef reacto @@ -1004,24 +1004,82 @@ t(request):%0A + if not request.cls:%0A # doctests%0A return%0A request.
8dddafe0ddfcca04a5293033dc8f81dae89e2e96
Update cli.py
src/flask_rq2/cli.py
src/flask_rq2/cli.py
# -*- coding: utf-8 -*- """ flask_rq2.cli ~~~~~~~~~~~~~ Support for the Click based Flask CLI via Flask-CLI. """ import operator import os from functools import update_wrapper import click from rq.cli import cli as rq_cli try: from flask.cli import AppGroup, ScriptInfo except ImportError: # pragma: no cover try: from flask_cli import AppGroup, ScriptInfo except ImportError: raise RuntimeError('Cannot import Flask CLI. Is it installed?') try: from rq_scheduler import Scheduler from rq_scheduler.utils import setup_loghandlers except ImportError: # pragma: no cover Scheduler = None _commands = {} def shared_options(rq): "Default class options to pass to the CLI commands." return { 'url': rq.redis_url, 'config': None, 'worker_class': rq.worker_class, 'job_class': rq.job_class, 'queue_class': rq.queue_class, 'connection_class': rq.connection_class, } def rq_command(condition=True): def wrapper(func): """Marks a callback as wanting to receive the RQ object we've added to the context """ @click.pass_context def new_func(ctx, *args, **kwargs): rq = ctx.obj.data.get('rq') return func(rq, ctx, *args, **kwargs) updated_wrapper = update_wrapper(new_func, func) if condition: _commands[updated_wrapper.__name__] = updated_wrapper return updated_wrapper return wrapper @click.option('--all', '-a', is_flag=True, help='Empty all queues') @click.argument('queues', nargs=-1) @rq_command() def empty(rq, ctx, all, queues): "Empty given queues." return ctx.invoke( rq_cli.empty, all=all, queues=queues or rq.queues, **shared_options(rq) ) @click.option('--all', '-a', is_flag=True, help='Requeue all failed jobs') @click.argument('job_ids', nargs=-1) @rq_command() def requeue(rq, ctx, all, job_ids): "Requeue failed jobs." return ctx.invoke( rq_cli.requeue, all=all, job_ids=job_ids, **shared_options(rq) ) @click.option('--path', '-P', default='.', help='Specify the import path.') @click.option('--interval', '-i', type=float, help='Updates stats every N seconds (default: don\'t poll)') @click.option('--raw', '-r', is_flag=True, help='Print only the raw numbers, no bar charts') @click.option('--only-queues', '-Q', is_flag=True, help='Show only queue info') @click.option('--only-workers', '-W', is_flag=True, help='Show only worker info') @click.option('--by-queue', '-R', is_flag=True, help='Shows workers by queue') @click.argument('queues', nargs=-1) @rq_command() def info(rq, ctx, path, interval, raw, only_queues, only_workers, by_queue, queues): "RQ command-line monitor." return ctx.invoke( rq_cli.info, path=path, interval=interval, raw=raw, only_queues=only_queues, only_workers=only_workers, by_queue=by_queue, queues=queues or rq.queues, **shared_options(rq) ) @click.option('--burst', '-b', is_flag=True, help='Run in burst mode (quit after all work is done)') @click.option('--logging_level', type=str, default="INFO", help='Set logging level') @click.option('--name', '-n', help='Specify a different name') @click.option('--path', '-P', default='.', help='Specify the import path.') @click.option('--results-ttl', help='Default results timeout to be used') @click.option('--worker-ttl', type=int, help='Default worker timeout to be used') @click.option('--verbose', '-v', is_flag=True, help='Show more output') @click.option('--quiet', '-q', is_flag=True, help='Show less output') @click.option('--sentry-dsn', default=None, help='Sentry DSN address') @click.option('--exception-handler', help='Exception handler(s) to use', multiple=True) @click.option('--pid', help='Write the process ID number to a file at ' 'the specified path') @click.argument('queues', nargs=-1) @rq_command() def worker(rq, ctx, burst, logging_level, name, path, results_ttl, worker_ttl, verbose, quiet, sentry_dsn, exception_handler, pid, queues): "Starts an RQ worker." ctx.invoke( rq_cli.worker, burst=burst, logging_level=logging_level, name=name, path=path, results_ttl=results_ttl, worker_ttl=worker_ttl, verbose=verbose, quiet=quiet, sentry_dsn=sentry_dsn, exception_handler=exception_handler or rq._exception_handlers, pid=pid, queues=queues or rq.queues, **shared_options(rq) ) @rq_command() @click.option('--duration', type=int, help='Seconds you want the workers to be suspended. ' 'Default is forever.') def suspend(rq, ctx, duration): "Suspends all workers." ctx.invoke( rq_cli.suspend, duration=duration, **shared_options(rq) ) @rq_command() def resume(rq, ctx): "Resumes all workers." ctx.invoke( rq_cli.resume, **shared_options(rq) ) @click.option('--verbose', '-v', is_flag=True, help='Show more output') @click.option('--burst', '-b', is_flag=True, help='Run in burst mode (quit after all work is done)') @click.option('-q', '--queue', metavar='QUEUE', help='The name of the queue to run the scheduler with.') @click.option('-i', '--interval', metavar='SECONDS', type=int, help='How often the scheduler checks for new jobs to add to ' 'the queue (in seconds, can be floating-point for more ' 'precision).') @click.option('--pid', metavar='FILE', help='Write the process ID number ' 'to a file at the specified path') @rq_command(Scheduler is not None) def scheduler(rq, ctx, verbose, burst, queue, interval, pid): "Periodically checks for scheduled jobs." scheduler = rq.get_scheduler(interval=interval, queue=queue) if pid: with open(os.path.expanduser(pid), 'w') as fp: fp.write(str(os.getpid())) if verbose: level = 'DEBUG' else: level = 'INFO' setup_loghandlers(level) scheduler.run(burst=burst) def add_commands(cli, rq): @click.group(cls=AppGroup, help='Runs RQ commands with app context.') @click.pass_context def rq_group(ctx): ctx.ensure_object(ScriptInfo).data['rq'] = rq sorted_commands = sorted(_commands.items(), key=operator.itemgetter(0)) for name, func in sorted_commands: rq_group.command(name=name)(func) cli.add_command(rq_group, name='rq')
Python
0
@@ -3594,32 +3594,45 @@ -ttl', type=int, + default=420, %0A h @@ -3661,32 +3661,47 @@ meout to be used + (default: 420) ')%0A@click.option
bcb0d5b3c0a90db3eb1e70b27185c4f511e9ab57
Add relu6 as alias to clipped_relu
chainer/functions/activation/clipped_relu.py
chainer/functions/activation/clipped_relu.py
import numpy import chainer from chainer.backends import cuda from chainer import function_node from chainer import utils from chainer.utils import type_check if cuda.cudnn_enabled: cudnn = cuda.cudnn _mode = cuda.cuda.cudnn.CUDNN_ACTIVATION_CLIPPED_RELU # type: ignore class ClippedReLU(function_node.FunctionNode): """Clipped Rectifier Unit function. Clipped ReLU is written as :math:`ClippedReLU(x, z) = \\min(\\max(0, x), z)`, where :math:`z(>0)` is a parameter to cap return value of ReLU. """ _use_cudnn = False def __init__(self, z): if not isinstance(z, float): raise TypeError('z must be float value') # z must be positive. assert z > 0 self.cap = z def check_type_forward(self, in_types): type_check._argname(in_types, ('x',)) x_type = in_types[0] type_check.expect(x_type.dtype.kind == 'f') def forward_cpu(self, inputs): self.retain_inputs((0,)) x, = inputs return utils.force_array(numpy.minimum(numpy.maximum(0, x), self.cap), x.dtype), def forward_gpu(self, inputs): self.retain_inputs((0,)) x, = inputs if chainer.should_use_cudnn('==always') and x.flags.c_contiguous: self._use_cudnn = True y = cudnn.activation_forward(x, _mode, self.cap) self.retain_outputs((0,)) else: return cuda.elementwise( 'T x, T cap', 'T y', 'y = min(max(x, (T)0), cap)', 'clipped_relu_fwd')(x, self.cap), return y, def backward(self, indexes, grad_outputs): x, = self.get_retained_inputs() if chainer.should_use_cudnn('==always') and self._use_cudnn: y = self.get_retained_outputs()[0] return ClippedReLUGrad3(x.data, y.data, self.cap).apply( grad_outputs) else: return ClippedReLUGrad2(x.data, self.cap).apply(grad_outputs) class ClippedReLUGrad2(function_node.FunctionNode): """Clipped Rectifier Unit gradient function.""" def __init__(self, x, z): self.x = x self.cap = z def check_type_forward(self, in_types): type_check._argname(in_types, ('gy',)) type_check.expect(in_types[0].dtype.kind == 'f') def forward_cpu(self, inputs): gy, = inputs x = self.x return utils.force_array( gy * (0 < x) * (x < self.cap), x.dtype), def forward_gpu(self, inputs): gy, = inputs gx = cuda.elementwise( 'T x, T gy, T z', 'T gx', 'gx = ((x > 0) & (x < z)) ? gy : (T)0', 'clipped_relu_bwd')(self.x, gy, self.cap) return gx, def backward(self, indexes, grad_outputs): return ClippedReLUGrad2(self.x, self.cap).apply(grad_outputs) class ClippedReLUGrad3(function_node.FunctionNode): """Clipped Rectifier Unit gradient function.""" def __init__(self, x, y, z): self.x = x self.y = y self.cap = z def check_type_forward(self, in_types): type_check._argname(in_types, ('gy',)) type_check.expect(in_types[0].dtype.kind == 'f') def forward_cpu(self, inputs): gy, = inputs return utils.force_array( gy * (0 < self.x) * (self.x < self.cap), self.x.dtype), def forward_gpu(self, inputs): assert chainer.should_use_cudnn('==always') return cudnn.activation_backward(self.x, self.y, inputs[0], _mode, self.cap), def backward(self, indexes, grad_outputs): return ClippedReLUGrad3(self.x, self.y, self.cap).apply(grad_outputs) def clipped_relu(x, z=20.0): """Clipped Rectifier Unit function. For a clipping value :math:`z(>0)`, it computes .. math:: \\text{ClippedReLU}(x, z) = \\min(\\max(0, x), z). Args: x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable. A :math:`(s_1, s_2, ..., s_n)`-shaped float array. z (float): Clipping value. (default = 20.0) Returns: ~chainer.Variable: Output variable. A :math:`(s_1, s_2, ..., s_n)`-shaped float array. .. admonition:: Example >>> x = np.random.uniform(-100, 100, (10, 20)).astype(np.float32) >>> z = 10.0 >>> np.any(x < 0) True >>> np.any(x > z) True >>> y = F.clipped_relu(x, z=z) >>> np.any(y.array < 0) False >>> np.any(y.array > z) False """ y, = ClippedReLU(z).apply((x,)) return y
Python
0
@@ -4607,8 +4607,467 @@ eturn y%0A +%0Adef relu6(x):%0A %22%22%22Rectifier Unit function clipped at 6.%0A%0A It computes%0A%0A .. math:: %5C%5Ctext%7BReLU6%7D(x) = %5C%5Cmin(%5C%5Cmax(0, x), 6).%0A%0A Args:%0A x (:class:%60~chainer.Variable%60 or :ref:%60ndarray%60):%0A Input variable. A :math:%60(s_1, s_2, ..., s_n)%60-shaped float array.%0A%0A Returns:%0A ~chainer.Variable: Output variable. A%0A :math:%60(s_1, s_2, ..., s_n)%60-shaped float array.%0A%0A %22%22%22%0A y, = ClippedReLU(6.0).apply((x,))%0A return y%0A
7059622c5787f06027ae2cf978beb69df4e5cabd
Send googler profiling data.
breakpad.py
breakpad.py
# Copyright (c) 2009 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Breakpad for Python. Sends a notification when a process stops on an exception. It is only enabled when all these conditions are met: 1. hostname finishes with '.google.com' 2. main module name doesn't contain the word 'test' 3. no NO_BREAKPAD environment variable is defined """ import atexit import getpass import os import urllib import traceback import socket import sys # Configure these values. DEFAULT_URL = 'https://chromium-status.appspot.com/breakpad' _REGISTERED = False def FormatException(e): """Returns a human readable form of an exception. Adds the maximum number of interesting information in the safest way.""" try: out = repr(e) except Exception: out = '' try: out = str(e) if isinstance(e, Exception): # urllib exceptions, usually the HTTP headers. if hasattr(e, 'headers'): out += '\nHeaders: %s' % e.headers if hasattr(e, 'url'): out += '\nUrl: %s' % e.url if hasattr(e, 'msg'): out += '\nMsg: %s' % e.msg # The web page in some urllib exceptions. if hasattr(e, 'read') and callable(e.read): out += '\nread(): %s' % e.read() if hasattr(e, 'info') and callable(e.info): out += '\ninfo(): %s' % e.info() except Exception: pass return out def SendStack(last_tb, stack, url=None, maxlen=50): """Sends the stack trace to the breakpad server.""" if not url: url = DEFAULT_URL print 'Sending crash report ...' try: params = { 'args': sys.argv, 'stack': stack[0:4096], 'user': getpass.getuser(), 'exception': FormatException(last_tb), 'host': socket.getfqdn(), 'cwd': os.getcwd(), 'version': sys.version, } # pylint: disable=W0702 print('\n'.join(' %s: %s' % (k, v[0:maxlen]) for k, v in params.iteritems())) request = urllib.urlopen(url, urllib.urlencode(params)) print(request.read()) request.close() except IOError: print('There was a failure while trying to send the stack trace. Too bad.') def CheckForException(): """Runs at exit. Look if there was an exception active.""" last_value = getattr(sys, 'last_value', None) if last_value and not isinstance(last_value, KeyboardInterrupt): last_tb = getattr(sys, 'last_traceback', None) if last_tb: SendStack(last_value, ''.join(traceback.format_tb(last_tb))) def Register(): """Registers the callback at exit. Calling it multiple times is no-op.""" global _REGISTERED if _REGISTERED: return _REGISTERED = True atexit.register(CheckForException) # Skip unit tests and we don't want anything from non-googler. if (not 'test' in sys.modules['__main__'].__file__ and not 'NO_BREAKPAD' in os.environ and (socket.getfqdn().endswith('.google.com') or socket.getfqdn().endswith('.chromium.org'))): Register() # Uncomment this line if you want to test it out. #Register()
Python
0
@@ -498,22 +498,45 @@ %0Aimport -urllib +socket%0Aimport sys%0Aimport time %0Aimport @@ -548,38 +548,38 @@ back%0Aimport -socket +urllib %0Aimport sys%0A%0A%0A# Conf @@ -566,19 +566,23 @@ %0Aimport -sys +urllib2 %0A%0A%0A# Con @@ -656,17 +656,8 @@ .com -/breakpad '%0A%0A_ @@ -676,16 +676,346 @@ False%0A%0A +_TIME_STARTED = time.time()%0A%0A%0Adef post(url, params):%0A %22%22%22HTTP POST with timeout when it's supported.%22%22%22%0A kwargs = %7B%7D%0A if (sys.version_info%5B0%5D * 10 + sys.version_info%5B1%5D) %3E= 26:%0A kwargs%5B'timeout'%5D = 4%0A request = urllib2.urlopen(url, urllib.urlencode(params), **kwargs)%0A out = request.read()%0A request.close()%0A return out%0A%0A %0Adef For @@ -1943,16 +1943,30 @@ AULT_URL + + '/breakpad' %0A print @@ -2394,108 +2394,31 @@ -request = urllib.urlopen(url, urllib.urlencode(params))%0A print(request.read())%0A request.close( +print(post(url, params) )%0A @@ -2511,24 +2511,306 @@ oo bad.')%0A%0A%0A +def SendProfiling(url=None):%0A try:%0A if not url:%0A url = DEFAULT_URL + '/profiling'%0A params = %7B%0A 'argv': ' '.join(sys.argv),%0A 'duration': time.time() - _TIME_STARTED,%0A 'platform': sys.platform,%0A %7D%0A post(url, params)%0A except IOError:%0A pass%0A%0A%0A def CheckFor @@ -2946,20 +2946,24 @@ st_value - and +:%0A if not isi @@ -3002,16 +3002,18 @@ rrupt):%0A + last @@ -3059,16 +3059,18 @@ ne)%0A + if last_ @@ -3073,16 +3073,18 @@ ast_tb:%0A + Se @@ -3142,16 +3142,44 @@ t_tb)))%0A + else:%0A SendProfiling()%0A %0A%0Adef Re
01d4279b40eb9e3029f857bf9d81d66d0314532d
Bump version to 1.5.1
enlighten/__init__.py
enlighten/__init__.py
# -*- coding: utf-8 -*- # Copyright 2017 - 2020 Avram Lubkin, All Rights Reserved # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. """ **Enlighten Progress Bar** Provides progress bars and counters which play nice in a TTY console """ from enlighten.counter import Counter, SubCounter from enlighten._manager import Manager, get_manager __version__ = '1.5.0' __all__ = ('Counter', 'Manager', 'SubCounter', 'get_manager')
Python
0
@@ -505,17 +505,17 @@ = '1.5. -0 +1 '%0A__all_
5c27ebd8e69802cce4afe51b917df233dcf4d972
Add D3DCompiler_46.dll to ignore list Review URL: https://codereview.chromium.org/12217044
site_config/config.py
site_config/config.py
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Declares a number of site-dependent variables for use by scripts. A typical use of this module would be import chromium_config as config v8_url = config.Master.v8_url """ import os from twisted.spread import banana from config_bootstrap import config_private # pylint: disable=W0403,W0611 from config_bootstrap import Master # pylint: disable=W0403,W0611 # By default, the banana's string size limit is 640kb, which is unsufficient # when passing diff's around. Raise it to 100megs. Do this here since the limit # is enforced on both the server and the client so both need to raise the # limit. banana.SIZE_LIMIT = 100 * 1024 * 1024 def DatabaseSetup(buildmaster_config, require_dbconfig=False): if os.path.isfile('.dbconfig'): values = {} execfile('.dbconfig', values) if 'password' not in values: raise Exception('could not get db password') buildmaster_config['db_url'] = 'postgresql://%s:%s@%s/%s' % ( values['username'], values['password'], values.get('hostname', 'localhost'), values['dbname']) else: assert(not require_dbconfig) class Archive(config_private.Archive): """Build and data archival options.""" # List of symbol files to save, but not to upload to the symbol server # (generally because they have no symbols and thus would produce an error). # We have to list all the previous names of icudt*.dll. Now that we # use icudt.dll, we don't need to update this file any more next time # we pull in a new version of ICU. symbols_to_skip_upload = [ 'icudt38.dll', 'icudt42.dll', 'icudt46.dll', 'icudt.dll', 'rlz.dll', 'avcodec-53.dll', 'avcodec-54.dll', 'avformat-53.dll', 'avformat-54.dll', 'avutil-51.dll', 'd3dx9_42.dll', 'd3dx9_43.dll', 'D3DCompiler_42.dll', 'D3DCompiler_43.dll', 'xinput1_3.dll', 'FlashPlayerApp.exe',] if os.environ.get('CHROMIUM_BUILD', '') == '_google_chrome': exes_to_skip_entirely = [] else: # Skip any filenames (exes, symbols, etc.) starting with these strings # entirely, typically because they're not built for this distribution. exes_to_skip_entirely = ['rlz'] # Installer to archive. installer_exe = 'mini_installer.exe' # Test files to archive. tests_to_archive = ['reliability_tests.exe', 'test_shell.exe', 'automated_ui_tests.exe', 'ui_tests.exe', # For syzygy (binary reorder) test bot 'icudt.dll', 'icudt38.dll', 'icudt42.dll', 'icudt46.dll', 'plugins\\npapi_layout_test_plugin.dll', ] # Archive everything in these directories, using glob. test_dirs_to_archive = ['fonts'] # Create these directories, initially empty, in the archive. test_dirs_to_create = ['plugins', 'fonts'] # Directories in which to store built files, for dev, official, and full # builds. archive_host = config_private.Archive.archive_host www_dir_base = config_private.Archive.www_dir_base class Distributed(config_private.Distributed): # File holding current version information. version_file = 'VERSION'
Python
0
@@ -1960,16 +1960,38 @@ 43.dll', + 'D3DCompiler_46.dll', 'xinput @@ -1999,16 +1999,22 @@ _3.dll', +%0A 'FlashP
46a6656ae1841841ec97c7e583f3595d737bef45
Update preprocessing.py (#1068)
autokeras/hypermodels/preprocessing.py
autokeras/hypermodels/preprocessing.py
import numpy as np from tensorflow.keras.layers.experimental import preprocessing from tensorflow.python.util import nest from autokeras import adapters from autokeras import keras_layers from autokeras.engine import block as block_module class Normalization(block_module.Block): """ Perform basic image transformation and augmentation. # Arguments axis: Integer or tuple of integers, the axis or axes that should be normalized (typically the features axis). We will normalize each element in the specified axis. The default is '-1' (the innermost axis); 0 (the batch axis) is not allowed. """ def __init__(self, axis: int = -1, **kwargs): super().__init__(**kwargs) self.axis = axis def build(self, hp, inputs=None): input_node = nest.flatten(inputs)[0] return preprocessing.Normalization(axis=self.axis)(input_node) def get_config(self): config = super().get_config() config.update({'axis': self.axis}) return config class TextToIntSequence(block_module.Block): """Convert raw texts to sequences of word indices. # Arguments output_sequence_length: Int. The maximum length of a sentence. If unspecified, it would be tuned automatically. max_tokens: Int. The maximum size of the vocabulary. Defaults to 20000. """ def __init__(self, output_sequence_length=None, max_tokens=20000, **kwargs): super().__init__(**kwargs) self.output_sequence_length = output_sequence_length self.max_tokens = max_tokens def get_config(self): config = super().get_config() config.update({ 'output_sequence_length': self.output_sequence_length, 'max_tokens': self.max_tokens, }) return config def build(self, hp, inputs=None): input_node = nest.flatten(inputs)[0] if self.output_sequence_length is not None: output_sequence_length = self.output_sequence_length else: output_sequence_length = hp.Choice('output_sequence_length', [64, 128, 256, 512], default=64) output_node = preprocessing.TextVectorization( max_tokens=self.max_tokens, output_mode='int', output_sequence_length=output_sequence_length)(input_node) return output_node class TextToNgramVector(block_module.Block): """Convert raw texts to n-gram vectors. # Arguments max_tokens: Int. The maximum size of the vocabulary. Defaults to 20000. """ def __init__(self, max_tokens=20000, **kwargs): super().__init__(**kwargs) self.max_tokens = max_tokens def build(self, hp, inputs=None): input_node = nest.flatten(inputs)[0] return preprocessing.TextVectorization( max_tokens=self.max_tokens, output_mode='tf-idf')(input_node) def get_config(self): config = super().get_config() config.update({'max_tokens': self.max_tokens}) return config class ImageAugmentation(block_module.Block): """Collection of various image augmentation methods. # Arguments percentage: Float. The percentage of data to augment. rotation_range: Int. The value can only be 0, 90, or 180. Degree range for random rotations. Default to 180. random_crop: Boolean. Whether to crop the image randomly. Default to True. brightness_range: Positive float. Serve as 'max_delta' in tf.image.random_brightness. Default to 0.5. Equivalent to adjust brightness using a 'delta' randomly picked in the interval [-max_delta, max_delta). saturation_range: Positive float or Tuple. If given a positive float, _get_min_and_max() will automated generate a tuple for saturation range. If given a tuple directly, it will serve as a range for picking a saturation shift value from. Default to 0.5. contrast_range: Positive float or Tuple. If given a positive float, _get_min_and_max() will automated generate a tuple for contrast range. If given a tuple directly, it will serve as a range for picking a contrast shift value from. Default to 0.5. translation: Boolean. Whether to translate the image. horizontal_flip: Boolean. Whether to flip the image horizontally. vertical_flip: Boolean. Whether to flip the image vertically. gaussian_noise: Boolean. Whether to add gaussian noise to the image. """ def __init__(self, percentage=0.25, rotation_range=180, random_crop=True, brightness_range=0.5, saturation_range=0.5, contrast_range=0.5, translation=True, horizontal_flip=True, vertical_flip=True, gaussian_noise=True, **kwargs): super().__init__(**kwargs) self.percentage = percentage self.rotation_range = rotation_range self._rotate_choices = [0] if self.rotation_range == 90: self._rotate_choices = [0, 1, 3] elif self.rotation_range == 180: self._rotate_choices = [0, 1, 2, 3] self.random_crop = random_crop if self.random_crop: # Generate 20 crop settings, ranging from a 1% to 20% crop. self.scales = list(np.arange(0.8, 1.0, 0.01)) self.boxes = np.zeros((len(self.scales), 4)) for i, scale in enumerate(self.scales): x1 = y1 = 0.5 - (0.5 * scale) x2 = y2 = 0.5 + (0.5 * scale) self.boxes[i] = [x1, y1, x2, y2] self.brightness_range = brightness_range self.saturation_range = self._get_min_and_max(saturation_range, 'saturation_range') self.contrast_range = self._get_min_and_max(contrast_range, 'contrast_range') self.translation = translation self.horizontal_flip = horizontal_flip self.vertical_flip = vertical_flip self.gaussian_noise = gaussian_noise self.shape = None @staticmethod def _get_min_and_max(value, name): if isinstance(value, (tuple, list)) and len(value) == 2: min_value, max_value = value return min_value, max_value elif isinstance(value, (int, float)): min_value = 1. - value max_value = 1. + value return min_value, max_value elif value == 0: return None else: raise ValueError('Expected {name} to be either a float between 0 and 1, ' 'or a tuple of 2 floats between 0 and 1, ' 'but got {value}'.format(name=name, value=value)) def build(self, hp, inputs=None): return inputs class CategoricalToNumerical(block_module.Block): """Encode the categorical features to numerical features.""" def __init__(self, **kwargs): super().__init__(**kwargs) self.column_types = None self.column_names = None def build(self, hp, inputs=None): input_node = nest.flatten(inputs)[0] encoding = [] for column_name in self.column_names: column_type = self.column_types[column_name] if column_type == adapters.CATEGORICAL: # TODO: Search to use one-hot or int. encoding.append(keras_layers.INT) else: encoding.append(keras_layers.NONE) return keras_layers.CategoricalEncoding(encoding)(input_node)
Python
0
@@ -1,16 +1,45 @@ +from typing import Optional%0A%0A import numpy as @@ -1474,17 +1474,34 @@ e_length -= +: Optional%5Bint%5D = None,%0A
8ef11bf983705540973badb40f7daf5a14c1173a
Fix typo
astrobin/permissions.py
astrobin/permissions.py
# Django from django.db.models import Q # Third party apps from pybb.permissions import DefaultPermissionHandler # AstroBin apps from astrobin_apps_groups.models import Group class CustomForumPermissions(DefaultPermissionHandler): # Disable forum polls def may_create_poll(self, user): return False def may_view_forum(self, user, forum): may = super(CustomForumPermissions, self).may_view_forum(user, forum) try: if forum.group is not None: if user.is_authenticated(): return may and ( forum.group.public or \ user == forum.group.owner or \ user in forum.group.members.all()) else: return may and forum.group.public except Group.DoesNotExist: pass return may def filter_forums(self, user, qs): f = super(CustomForumPermissions, self).filter_forums(user, qs) if user.is_authenticated(): f = f.filter( Q(group = None) | Q(group__public = True) | Q(group__owner = user) | Q(group__members = user)).distinct() else: f = f.filter(Q(group = None) | Q(group__public = True)) return f def may_view_topic(self, user, topic): # Not using super because of: # https://github.com/hovel/pybbm/issues/241 if user.is_superuser: return True if not user.is_staff and (topic.forum.hidden or topic.forum.category.hidden): return False # only staff may see hidden forum / category may = True try: if topic.forum.group is not None: if user.is_authenticated(): may = topic.forum.group.public or \ user == topic.forum.grouop.owner or \ user in topic.forum.group.members.all() else: may = topic.forum.group.public except Group.DoesNotExist: pass if topic.on_moderation: if user.is_authenticated(): may = may and (user == topic.user or user in topic.forum.moderators.all()) return may def filter_topics(self, user, qs): f = super(CustomForumPermissions, self).filter_topics(user, qs) if user.is_authenticated(): f = f.filter( Q(forum__group = None) | Q(forum__group__public = True) | Q(forum__group__owner = user) | Q(forum__group__members = user)).distinct() else: f = f.filter(Q(forum__group = None) | Q(forum__group__public = True)) return f def may_create_topic(self, user, forum): may = super(CustomForumPermissions, self).may_create_topic(user, forum) try: if forum.group is not None: if forum.group.public: return may return may and ( user == forum.group.owner or user in forum.group.members.all()) except Group.DoesNotExist: pass return may def may_create_post(self, user, topic): may = super(CustomForumPermissions, self).may_create_post(user, topic) return may and self.may_create_topic(user, topic.forum)
Python
0.999999
@@ -1901,17 +1901,16 @@ rum.grou -o p.owner
cf07d8d7dd4449cae983a6a038fa8fabaa990f2a
switch on/off alerts
ott/otp_client/trip_planner.py
ott/otp_client/trip_planner.py
import sys import simplejson as json import urllib import logging log = logging.getLogger(__file__) from ott.utils import json_utils from ott.utils import object_utils from ott.utils import html_utils from ott.otp_client import otp_to_ott from ott.utils.parse import TripParamParser from ott.geocoder.geosolr import GeoSolr from ott.data.content import Adverts class TripPlanner(object): def __init__(self, otp_url="http://localhost/prod", advert_url="http://localhost/adverts", advert_timeout=30, solr_instance=None, solr_url='http://localhost/solr'): self.otp_url = otp_url if solr_instance and isinstance(solr_instance, GeoSolr): self.geo = solr_instance elif isinstance(solr_url, str): self.geo = GeoSolr(solr_url) self.adverts = None if advert_url: self.adverts = Adverts(advert_url, advert_timeout) #import pdb; pdb.set_trace() def plan_trip(self, request=None, pretty=False): """ "powell%20blvd::45.49063653,-122.4822897" "45.433507,-122.559709" """ # step 1: parse params param = TripParamParser(request) # step 2: parse params -- note, changes param object implicitly in the call msg = self.geocode(param) if msg: # TODO -- trip error or plan? pass # step 3: call the trip planner... url = "{0}?{1}".format(self.otp_url, param.otp_url_params()) f = self.call_otp(url) j=json.loads(f) #print json.dumps(j, sort_keys=True, indent=4); # step 4: process any planner errors if j is None: pass # TODO -- trip error or plan? # step 5: parse the OTP trip plan into OTT format ret_val = {} try: plan = otp_to_ott.Plan(j['plan'], param) ret_val['plan'] = plan if self.adverts: m = plan.dominant_transit_mode() l = html_utils.get_lang(request) ret_val['adverts'] = self.adverts.query(m, l) except: try: ret_val['error'] = otp_to_ott.Error(j['error'], param) except: pass ret_val = json_utils.json_repr(ret_val, pretty or param.pretty_output()) return ret_val def call_otp(self, url): ret_val = None try: log.info(url) f = urllib.urlopen(url) ret_val = f.read() except Exception as e: log.warn(e) return ret_val def geocode(self, param): ''' TODO ... rethink this whole thing 1) should geocoding be in param_parser 2) we're going to need other parsers ... like for stops, etc... (where we only need to geocode 1 param, etc...) 3) .... ''' ret_val = None # step 2: get your origin f = param.get_from() if not param.has_valid_coord(f): # step 2b: geocode the origin if you don't have a valid ::LatLon f = param.strip_coord(f) f = self.geo.geostr(f) param.frm = f # step 3: get your destination t = param.get_to() if not param.has_valid_coord(t): # step 3b: geocode the destination if you don't have a valid ::LatLon t = param.strip_coord(t) t = self.geo.geostr(t) param.to = t return ret_val def main(argv): pretty = 'pretty' in argv or 'p' in argv tp = TripPlanner() plan = tp.plan_trip(argv, pretty) print plan if __name__ == '__main__': main(sys.argv)
Python
0.000002
@@ -458,34 +458,12 @@ url= -%22http://localhost/adverts%22 +None , ad
367e760bc49045dd3460122392557089f052d802
create mmap functin
word2vec/wordvectors.py
word2vec/wordvectors.py
import numpy as np from word2vec.utils import unitvec class WordVectors(object): def __init__(self, vocab, vectors=None, l2norm=None, save_memory=True): """ Initialize a WordVectors class based on vocabulary and vectors This initializer precomputes the l2norm of the vectors Parameters ---------- vocab : np.array 1d array with the vocabulary vectors : np.array 2d array with the vectors calculated by word2vec l2norm : np.array 2d array with the calulated l2norm of the vectors save_memory : boolean wheter or not save the original vectors in `self.vectors` """ if vectors is None and l2norm is None: raise Exception('Need vectors OR l2norm arguments') self.vocab = vocab if l2norm is None: if not save_memory: self.vectors = vectors self.l2norm = np.vstack(unitvec(vec) for vec in vectors) else: self.l2norm = l2norm def ix(self, word): """ Returns the index on self.vocab and self.l2norm for `word` """ temp = np.where(self.vocab == word)[0] if temp.size == 0: raise KeyError('Word not in vocabulary') else: return temp[0] def get_vector(self, word): """ Returns the (l2norm) vector for `word` in the vocabulary """ idx = self.ix(word) return self.l2norm[idx] def __getitem__(self, word): return self.get_vector(word) def generate_response(self, indexes, metric, exclude=''): """ Generates a response as a list of tuples based on the indexes Each tuple is: (vocab[i], metric[i]) """ if isinstance(exclude, basestring): exclude = [exclude] return [(word, sim) for word, sim in zip(self.vocab[indexes], metric[indexes]) if word not in exclude] def cosine(self, words, n=10): """ Cosine similarity. metric = dot(l2norm_of_vectors, l2norm_of_target_vector) Uses a precomputed l2norm of the vectors Parameters ---------- words : string or list of string word(s) in the vocabulary to calculate the vectors n : int, optional (default 10) number of neighbors to return Returns ------- dict: of list of tuples Example ------- >>> model.cosine('black', n=2) ``` ``` {'black': [('white', 0.94757425919916516), ('yellow', 0.94640807944950878)] } """ if isinstance(words, basestring): words = [words] targets = np.vstack((self.get_vector(word) for word in words)) metrics = np.dot(self.l2norm, targets.T) ans = {} for col, word in enumerate(words): best = np.argsort(metrics[:, col])[::-1][:n + 1] best = self.generate_response(best, metrics[:, col], exclude=word) ans[word] = best return ans def _cosine(self, word, n=10): """ Test method for cosine distance using `scipy.distance.cosine` Note: This method is **a lot** slower than `self.cosine` and results are the almost the same, you should be using `self.cosine` Requires: `__init__(..., save_memory=False)` Parameters ---------- word : string word in the vocabulary to calculate the vectors n : int, optional (default 10) number of neighbors to return """ from scipy.spatial import distance target_vec = self[word] metric = np.empty(self.vocab.shape) for idx, vector in enumerate(self.vectors): metric[idx] = distance.cosine(target_vec, vector) best = metric.argsort()[:n + 1] return self.generate_response(best, metric, exclude=word) def analogy(self, pos, neg, n=10): """ Analogy similarity. Parameters ---------- pos : list neg : list Returns ------- List of tuples, each tuple is (word, similarity) Example ------- `king - man + woman = queen` will be: `pos=['king', 'woman'], neg=['man']` """ words = pos + neg pos = [(word, 1.0) for word in pos] neg = [(word, -1.0) for word in neg] mean = [] for word, direction in pos + neg: mean.append(direction * unitvec(self.get_vector(word))) mean = np.array(mean).mean(axis=0) similarities = np.dot(self.l2norm, mean) best = similarities.argsort()[::-1][:n + len(words) - 1] return self.generate_response(best, similarities, exclude=words) @classmethod def from_binary(cls, fname, save_memory=True): """ Create a WordVectors class based on a word2vec binary file Parameters ---------- fname : path to file save_memory : boolean Returns ------- WordVectors class """ with open(fname) as fin: header = fin.readline() vocab_size, vector_size = map(int, header.split()) vectors = np.empty((vocab_size, vector_size), dtype=np.float) binary_len = np.dtype(np.float32).itemsize * vector_size for line_number in xrange(vocab_size): # mixed text and binary: read text first, then binary word = '' while True: ch = fin.read(1) if ch == ' ': break word += ch vocab.append(word) vector = np.fromstring(fin.read(binary_len), np.float32) vectors[line_number] = vector fin.read(1) # newline vocab = np.array(vocab) return cls(vocab=vocab, vectors=vectors, save_memory=save_memory) @classmethod def from_text(cls, fname, save_memory=True): """ Create a WordVectors class based on a word2vec text file Parameters ---------- fname : path to file save_memory : boolean Returns ------- WordVectors class """ with open(fname) as f: parts = f.readline().strip().split(' ') shape = int(parts[0]), int(parts[1]) vocab = np.genfromtxt(fname, dtype=object, delimiter=' ', usecols=0, skip_header=1) cols = np.arange(1, shape[1] + 1) vectors = np.genfromtxt(fname, dtype=float, delimiter=' ', usecols=cols, skip_header=1) return cls(vocab=vocab, vectors=vectors, save_memory=save_memory)
Python
0.000002
@@ -6788,28 +6788,110 @@ s, save_memory=save_memory)%0A +%0A @classmethod%0A def from_mmap(cls, vocab_fname, l2norm_fname):%0A pass%0A
b54d7b8079bf414b1fe79061b33e41c6350707d6
use integer instead of string
mopidy_rotaryencoder/__init__.py
mopidy_rotaryencoder/__init__.py
from __future__ import unicode_literals import logging import os from mopidy import config, ext __version__ = '0.1.0' logger = logging.getLogger(__name__) class Extension(ext.Extension): dist_name = 'Mopidy-RotaryEncoder' ext_name = 'rotaryencoder' version = __version__ def get_default_config(self): conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf') return config.read(conf_file) def get_config_schema(self): schema = super(Extension, self).get_config_schema() schema['datapin'] = config.String() schema['clkpin'] = config.String() schema['swpin'] = config.String() return schema def setup(self, registry): from .frontend import RotaryEncoderFrontend registry.add('frontend', RotaryEncoderFrontend)
Python
0.027486
@@ -548,38 +548,39 @@ apin'%5D = config. -String +Integer ()%0A schem @@ -592,38 +592,39 @@ kpin'%5D = config. -String +Integer ()%0A schem @@ -647,14 +647,15 @@ fig. -String +Integer ()%0A
c01c691761de3b4c976a68865905fd981ea4c165
handle range/xrange
autodiff/ast_context.py
autodiff/ast_context.py
import logging import meta import ast as ast_module import numpy as np import theano import theano.tensor as T logger = logging.getLogger('pyautodiff') # XXX FIXME This will not do - seed must be exposed. from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams global_randomstreams = RandomStreams(seed=123) def istensor(x): tensortypes = (theano.tensor.TensorConstant, theano.tensor.TensorVariable) return isinstance(x, tensortypes) def isvar(x): vartypes = (theano.tensor.sharedvar.SharedVariable, theano.tensor.TensorConstant, theano.tensor.TensorVariable) return isinstance(x, vartypes) def get_ast(func, flags=0): func_def = meta.decompiler.decompile_func(func) if isinstance(func_def, ast_module.Lambda): func_def = ast_module.FunctionDef( name='<lambda>', args=func_def.args, body=[ast_module.Return(func_def.body)], decorator_list=[]) assert isinstance(func_def, ast_module.FunctionDef) return func_def def print_ast(ast): if hasattr(ast, 'func_code'): ast = get_ast(ast) meta.asttools.print_ast(ast) def print_source(ast): if hasattr(ast, 'func_code'): ast = get_ast(ast) meta.asttools.python_source(ast) def unshadow(x): if isvar(x): try: return x.eval() except: return x else: return x class TheanoTransformer(ast_module.NodeTransformer): def __init__(self): super(TheanoTransformer, self).__init__() self.smap = dict() def ast_wrap(self, node, method_name): wrapped = ast_module.Call( args=[node], func=ast_module.Attribute( attr=method_name, ctx=ast_module.Load(), value=ast_module.Name( ctx=ast_module.Load(), id='TT')), keywords=[], kwargs=None, starargs=None) return wrapped def getvar(self, var): return self.smap.get(id(var), var) def shadow(self, x): if not isinstance(x, (int, float, np.ndarray)): return x # take special care with small ints, because CPYthon caches them. # This makes it impossible to tell one from the other. if isinstance(x, int) and -5 <= x <= 256: x = np.int_(x) elif isinstance(x, float): x = np.float_(x) if getattr(x, 'dtype', None) == bool: logger.info('Warning: Theano has no bool type; upgrading to int8.') x = x.astype('int8') sym_x = theano.shared(x) return self.smap.setdefault(id(x), sym_x) def handle_functions(self, func): # if the function has a _theano_fn attribute, return that fn if hasattr(func, '_theano_fn'): func = func._theano_fn # if it is a numpy function, try to get the theano version elif ((getattr(func, '__module__', None) and func.__module__.startswith('numpy')) or isinstance(func, np.ufunc)): func = getattr(T, func.__name__, func) # handle random numbers elif ('method random of mtrand.RandomState' in str(func) or 'method random_sample of mtrand.RandomState' in str(func)): def rand_u(shape): return global_randomstreams.uniform( low=0, high=1, size=shape) return rand_u return func def visit_Num(self, node): # return self.ast_wrap(node, 'shadow') # don't make changes because these are typically function arguments return node def visit_Name(self, node): self.generic_visit(node) if isinstance(node.ctx, ast_module.Load): node = self.ast_wrap(node, 'shadow') return node def visit_Call(self, node): self.generic_visit(node) node.func = self.ast_wrap(node.func, 'handle_functions') return node def transform(self, f): self.smap.clear() ast = self.visit(get_ast(f)) ast = ast_module.fix_missing_locations(ast) new_globals = globals() new_globals.update({'TT' : self}) new_f = meta.decompiler.compile_func( ast, '<TheanoTransformer-AST>', new_globals) return new_f
Python
0.000005
@@ -2885,16 +2885,137 @@ ano_fn%0A%0A + elif func.__name__ in ('range', 'xrange'):%0A return lambda *args : func(*(unshadow(a) for a in args))%0A%0A
c2a79d8cbbb174530991d8b59578169ee9b2be44
use absolute paths for external scripts in Spidermonkey wrapper
wrapper_spidermonkey.py
wrapper_spidermonkey.py
#!/usr/bin/env python """ wrapper for JSLint requires Spidermonkey Usage: $ wrapper_spidermonkey.py <filepath> TODO: * support for JSLint options """ import sys import spidermonkey from simplejson import loads as json lint_path = "fulljslint.js" json_path = "json2.js" def main(args=None): filepath = args[1] status, errors = lint(filepath) print format(errors, filepath) return status def lint(filepath): rt = spidermonkey.Runtime() cx = rt.new_context() options = {} # TODO: read from argument cx.add_global("options", options) cx.add_global("getFileContents", get_file_contents) # load JavaScript code for path in (lint_path, json_path): cx.execute('eval(getFileContents("%s"));' % path) cx.execute('var code = getFileContents("%s");' % filepath) # lint code status = cx.execute("JSLINT(code, options);") # True if clean, False otherwise errors = cx.execute("JSON.stringify(JSLINT.errors);"); # XXX: errors incomplete (e.g. not reporting missing var)!? return status, errors def format(errors, file): """ convert JSLint errors object into report using standard error format <filepath>:<line>:<column>:<message> """ lines = [":".join([ file, str(error["line"] + 1), str(error["character"] + 1), error["reason"] ]) for error in json(errors)] # XXX: don't use generator expression!? # XXX: ignoring members id, evidence, raw, a, b, c, d return "\n".join(lines) def get_file_contents(filepath): return open(filepath).read() if __name__ == "__main__": status = not main(sys.argv) sys.exit(status)
Python
0
@@ -159,16 +159,26 @@ port sys +%0Aimport os %0A%0Aimport @@ -235,19 +235,55 @@ n%0A%0A%0A -lint_path = +cwd = sys.path%5B0%5D%0Alint_path = os.path.join(cwd, %22fu @@ -294,16 +294,17 @@ lint.js%22 +) %0Ajson_pa @@ -312,18 +312,75 @@ h = -%22json2.js%22 +os.path.join(cwd, %22json2.js%22) # XXX: built in from Spidermonkey 1.8 %0A%0A%0Ad
2271131d5c2794eeba256a9d9547fa925f7bdf73
bump __version__
matplotlib2tikz/__init__.py
matplotlib2tikz/__init__.py
# -*- coding: utf-8 -*- # '''Script to convert Matplotlib generated figures into TikZ/PGFPlots figures. ''' __author__ = 'Nico Schlömer' __email__ = 'nico.schloemer@gmail.com' __copyright__ = 'Copyright (c) 2010-2016, %s <%s>' % (__author__, __email__) __credits__ = [] __license__ = 'MIT License' __version__ = '0.5.6' __maintainer__ = 'Nico Schlömer' __status__ = 'Production' from matplotlib2tikz.save import save
Python
0.000017
@@ -311,17 +311,17 @@ = '0.5. -6 +7 '%0A__main
35d3284a1242bdeb6ea3aec128deb92b3138106b
Add the ability to specify the Cakefile parent directory.
flask_cake/cake.py
flask_cake/cake.py
from __future__ import absolute_import import os import subprocess from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler class Cake(object): def __init__(self, app=None, tasks=["build"]): """Initalize a new instance of Flask-Cake. :param app: The Flask app :param tasks: A string containing a cake "task" to execute or a list of multiple cake tasks to run. By default, this will run ``cake build``. """ self.init_app(app, tasks) def init_app(self, app, tasks): """Initalize a new instance of Flask-Cake. :param app: The Flask app :param tasks: A string containing a cake "task" to execute or a list of multiple cake tasks to run. By default, this will run ``cake build``. """ self.app = app self.tasks = tasks self._watchdog() def _watchdog(self): """Runs Watchdog to listen to filesystem events. The directory currently requires the CoffeeScript files to be located in `static/coffee`. This directory should contain the `Cakefile`. When first run, it touches the `Cakefile` to trigger the initial build. """ if not hasattr(self.app, 'static_url_path'): from warnings import warn warn( DeprecationWarning('static_path is called static_url_path since Flask 0.7'), stacklevel=2 ) static_url_path = self.app.static_path else: static_url_path = self.app.static_url_path static_dir = self.app.root_path + static_url_path cakedir = os.path.join(static_dir, "coffee") # Setup Watchdog handler = Events(cakedir=cakedir, tasks=self.tasks) observer = Observer(timeout=5000) observer.schedule(handler, path=cakedir, recursive=True) observer.start() # "Touch" the Cakefile to signal the initial build cakefile = os.path.join(cakedir, "Cakefile") with file(cakefile, 'a'): os.utime(cakefile, None) class Events(FileSystemEventHandler): """Handler for all filesystem events.""" def __init__(self, cakedir, tasks): super(Events, self).__init__() self._cakedir = cakedir self._tasks = tasks def on_any_event(self, event): nullfh = open(os.devnull, "w") # Check to see if the tasks are specified as a single task or multiple # tasks. if isinstance(self._tasks, basestring): tasks = [self._tasks] else: tasks = self._tasks # Run `cake build` and send all stdout to `/dev/null`. p = subprocess.Popen(["cake"] + tasks, cwd=self._cakedir, stdout=nullfh) p.wait() nullfh.close()
Python
0
@@ -222,16 +222,37 @@ %22build%22%5D +, cakeparent=%22coffee%22 ):%0A @@ -517,32 +517,337 @@ %60%60cake build%60%60. +%0A :param str cakeparent: The directory where the Cakefile is located%0A relative to Flask's %60static_path%60. By default,%0A this is %60coffee/%60, meaning that the Cakefile is%0A located at %60static_path/coffee/Cakefile%60. %0A%0A %22%22%22%0A @@ -876,16 +876,28 @@ p, tasks +, cakeparent )%0A%0A d @@ -920,24 +920,36 @@ , app, tasks +, cakeparent ):%0A %22 @@ -1218,16 +1218,321 @@ build%60%60. +%0A :param str cakeparent: The directory where the Cakefile is located%0A relative to Flask's %60static_path%60. By default,%0A this is %60coffee/%60, meaning that the Cakefile is%0A located at %60static_path/coffee/Cakefile%60. %0A%0A @@ -1582,24 +1582,61 @@ asks = tasks +%0A self.cakeparent = cakeparent %0A%0A se @@ -1746,201 +1746,49 @@ -The directory currently requires the CoffeeScript files to be located%0A in %60static/coffee%60. This directory should contain the %60Cakefile%60. When%0A first run, it touches the %60Cakefile%60 +When first run, the %60Cakefile%60 is touched to @@ -1794,24 +1794,32 @@ trigger the +%0A initial bui @@ -2304,24 +2304,31 @@ ic_dir, -%22coffee%22 +self.cakeparent )%0A%0A
cd25fd1bd40a98886b92f5e3b357ee0ab2796c7b
add /query route, with plain text for mongo
flaskr/__init__.py
flaskr/__init__.py
#!/usr/bin/python3 # -*- coding: latin-1 -*- import os import sys # import psycopg2 import json from bson import json_util from pymongo import MongoClient from flask import Flask, request, session, g, redirect, url_for, abort, \ render_template, flash def create_app(): app = Flask(__name__) return app app = create_app() # REPLACE WITH YOUR DATABASE NAME MONGODATABASE = "dbEscuchas" MONGOSERVER = "localhost" MONGOPORT = 27017 client = MongoClient(MONGOSERVER, MONGOPORT) mongodb = client[MONGODATABASE] ''' # Uncomment for postgres connection # REPLACE WITH YOUR DATABASE NAME, USER AND PASS POSTGRESDATABASE = "mydatabase" POSTGRESUSER = "myuser" POSTGRESPASS = "mypass" postgresdb = psycopg2.connect( database=POSTGRESDATABASE, user=POSTGRESUSER, password=POSTGRESPASS) ''' #Cambiar por Path Absoluto en el servidor QUERIES_FILENAME = '/var/www/FlaskApp/queries' @app.route("/") def home(): with open(QUERIES_FILENAME, 'r', encoding='utf-8') as queries_file: json_file = json.load(queries_file) pairs = [(x["name"], x["database"], x["description"], x["query"]) for x in json_file] return render_template('file.html', results=pairs) @app.route("/mongo") def mongo(): query = request.args.get("query") if not query is None: results = eval('mongodb.'+query) results = json_util.dumps(results, sort_keys=True, indent=4) if "find" in query: return render_template('mongo.html', results=results) else: return "no query" @app.route("/postgres") def postgres(): return "Postgres API is not available" query = request.args.get("query") if not query is None: cursor = postgresdb.cursor() cursor.execute(query) results = [[a for a in result] for result in cursor] print(results) return render_template('postgres.html', results=results) else: return "no query" @app.route("/example") def example(): return render_template('example.html') if __name__ == "__main__": app.run()
Python
0
@@ -1578,32 +1578,416 @@ urn %22no query%22%0A%0A +@app.route(%22/query%22)%0Adef ruta_query():%0A query = request.args.get(%22query%22)%0A if not query is None:%0A results = eval('mongodb.'+query)%0A results = json_util.dumps(results, sort_keys=True, indent=4)%0A if %22find%22 in query:%0A # return render_template('mongo.html', results=results)%0A return str(results);%0A else:%0A return %22%7B%7D%22 # No query%0A%0A %0A@app.route(%22/po
af4b53a85aec95c9ec7bf20b1c019ec0f397eacb
Bump version to 0.2.2
flavio/_version.py
flavio/_version.py
__version__='0.2.1'
Python
0.000002
@@ -14,8 +14,8 @@ 0.2. -1 +2 ' %0A
3d90dcd652fcfa47a5350a1ddb8cc562b0f3d676
Rename a base class
pathvalidate/_base.py
pathvalidate/_base.py
""" .. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com> """ import abc import itertools import os import re from typing import Any, List, Optional, Tuple, cast from ._common import PathType, Platform, PlatformType, normalize_platform, unprintable_ascii_chars from ._const import _NTFS_RESERVED_FILE_NAMES from .error import ReservedNameError, ValidationError class Base: _INVALID_PATH_CHARS = "".join(unprintable_ascii_chars) _INVALID_FILENAME_CHARS = _INVALID_PATH_CHARS + "/" _INVALID_WIN_PATH_CHARS = _INVALID_PATH_CHARS + ':*?"<>|\t\n\r\x0b\x0c' _INVALID_WIN_FILENAME_CHARS = _INVALID_FILENAME_CHARS + _INVALID_WIN_PATH_CHARS + "\\" _WINDOWS_RESERVED_FILE_NAMES = ("CON", "PRN", "AUX", "CLOCK$", "NUL") + tuple( "{:s}{:d}".format(name, num) for name, num in itertools.product(("COM", "LPT"), range(1, 10)) ) _RE_INVALID_FILENAME = re.compile( "[{:s}]".format(re.escape(_INVALID_FILENAME_CHARS)), re.UNICODE ) _RE_INVALID_WIN_FILENAME = re.compile( "[{:s}]".format(re.escape(_INVALID_WIN_FILENAME_CHARS)), re.UNICODE ) _RE_INVALID_PATH = re.compile("[{:s}]".format(re.escape(_INVALID_PATH_CHARS)), re.UNICODE) _RE_INVALID_WIN_PATH = re.compile( "[{:s}]".format(re.escape(_INVALID_WIN_PATH_CHARS)), re.UNICODE ) _RE_NTFS_RESERVED = re.compile( "|".join("^/{}$".format(re.escape(pattern)) for pattern in _NTFS_RESERVED_FILE_NAMES), re.IGNORECASE, ) _ERROR_MSG_TEMPLATE = "invalid char found: invalids=({invalid}), value={value}" @property def platform(self) -> Platform: return self.__platform @property def reserved_keywords(self) -> Tuple[str, ...]: return (".", "..") @property def min_len(self) -> int: return self._min_len @property def max_len(self) -> int: return self._max_len def __init__( self, min_len: Optional[int], max_len: Optional[int], platform: PlatformType = None ) -> None: self.__platform = normalize_platform(platform) if min_len is None: min_len = 1 self._min_len = max(min_len, 1) if max_len in [None, -1]: self._max_len = self._get_default_max_path_len() else: self._max_len = cast(int, max_len) def _is_universal(self) -> bool: return self.platform == Platform.UNIVERSAL def _is_linux(self) -> bool: return self.platform == Platform.LINUX def _is_windows(self) -> bool: return self.platform == Platform.WINDOWS def _is_macos(self) -> bool: return self.platform == Platform.MACOS def _validate_max_len(self) -> None: if self.max_len < 1: raise ValueError("max_len must be greater or equals to one") if self.min_len > self.max_len: raise ValueError("min_len must be lower than max_len") def _get_default_max_path_len(self) -> int: if self._is_linux(): return 4096 if self._is_windows(): return 260 if self._is_macos(): return 1024 return 260 # universal @staticmethod def _findall_to_str(match: List[Any]) -> str: return ", ".join([repr(text) for text in match]) class AbstractValidator(Base, metaclass=abc.ABCMeta): @abc.abstractmethod def validate(self, value: PathType) -> None: # pragma: no cover pass def is_valid(self, value: PathType) -> bool: try: self.validate(value) except (TypeError, ValidationError): return False return True def _is_reserved_keyword(self, value: str) -> bool: return value in self.reserved_keywords class AbstractSanitizer(Base, metaclass=abc.ABCMeta): @abc.abstractmethod def sanitize(self, value: PathType, replacement_text: str = "") -> PathType: # pragma: no cover pass class BaseValidator(AbstractValidator): def _validate_reserved_keywords(self, name: str) -> None: root_name = self.__extract_root_name(name) if self._is_reserved_keyword(root_name.upper()): raise ReservedNameError( "'{}' is a reserved name".format(root_name), reusable_name=False, reserved_name=root_name, platform=self.platform, ) @staticmethod def __extract_root_name(path: str) -> str: return os.path.splitext(os.path.basename(path))[0]
Python
0.000943
@@ -378,16 +378,20 @@ ass Base +File :%0A _I @@ -3306,24 +3306,28 @@ lidator(Base +File , metaclass= @@ -3767,16 +3767,20 @@ zer(Base +File , metacl
94bbae75b2ef5147fdb55c3a4b62910552e5fb5d
update instances of iterations to reps in stratified_permutationtest
permute/stratified.py
permute/stratified.py
# -*- coding: utf-8 -*- """ Stratified permutation tests. """ from __future__ import division, print_function, absolute_import import numpy as np from .utils import get_prng, permute_within_groups def corrcoef(x, y, group): """ Calculates sum of Spearman correlations between x and y, computed separately in each group. Parameters ---------- x : array-like Variable 1 y : array-like Variable 2, of the same length as x group : array-like Group memberships, of the same length as x Returns ------- float The sum of Spearman correlations """ tst = 0.0 for g in np.unique(group): gg = group == g tst += np.corrcoef(x[gg], y[gg])[0, 1] return tst def sim_corr(x, y, group, reps=10**4, seed=None): """ Simulate permutation p-value of stratified Spearman correlation test. Parameters ---------- x : array-like Variable 1 y : array-like Variable 2, of the same length as x group : array-like Group memberships, of the same length as x reps : int Number of repetitions seed : RandomState instance or {None, int, RandomState instance} If None, the pseudorandom number generator is the RandomState instance used by `np.random`; If int, seed is the seed used by the random number generator; If RandomState instance, seed is the pseudorandom number generator. Returns ------- float the left (lower) p-value float the right (upper) p-value float the two-sided p-value float the observed test statistic list the null distribution """ prng = get_prng(seed) tst = corrcoef(x, y, group) sims = [corrcoef(permute_within_groups(x, group, prng), y, group) for i in range(reps)] left_pv = np.sum(sims <= tst)/reps right_pv = np.sum(sims >= tst)/reps two_sided_pv = np.sum(np.abs(sims) >= np.abs(tst))/reps return tst, left_pv, right_pv, two_sided_pv, sims def stratified_permutationtest_mean(group, condition, response, groups=np.unique(group), conditions=np.unique(condition)): """ Calculates variability in sample means between treatment conditions, within groups. If there are two treatment conditions, the test statistic is the difference in means, aggregated across groups. If there are more than two treatment conditions, the test statistic is the standard deviation of the means, aggregated across groups. Parameters ---------- group : array-like Group memberships condition : array-like Treatment conditions, of the same length as group response : array-like Responses, of the same length as group groups : array-like Group labels. By default, it is the unique values of group conditions : array-like Condition labels. By default, it is the unique values of condition Returns ------- tst : float The observed test statistic """ tst = 0.0 if len(groups) < 2: raise ValueError('Number of groups must be at least 2.') elif len(groups) == 2: stat = lambda u: u[0] - u[1] elif len(groups) > 2: stat = np.std for g in groups: gg = group == g x = [gg & (condition == c) for c in conditions] tst += stat([response[x[j]].mean() for j in range(len(x))]) return tst def stratified_permutationtest(group, condition, response, reps=10**5, testStatistic=stratified_permutationtest_mean, seed=None): """ Stratified permutation test based on differences in means. The test statistic is .. math:: \sum_{g \in \\text{groups}} [ f(mean(\\text{response for cases in group $g$ assigned to each condition}))]. The function f is the difference if there are two conditions, and the standard deviation if there are more than two conditions. There should be at least one group and at least two conditions. Under the null hypothesis, all assignments to the two conditions that preserve the number of cases assigned to the conditions are equally likely. Groups in which all cases are assigned to the same condition are skipped; they do not contribute to the p-value since all randomizations give the same contribution to the difference in means. Parameters ---------- group : array-like Group memberships condition : array-like Treatment conditions, of the same length as group response : array-like Responses, of the same length as group reps : int Number of repetitions testStatistic : function Function to compute test statistic. By default, stratified_permutationtest_mean seed : RandomState instance or {None, int, RandomState instance} If None, the pseudorandom number generator is the RandomState instance used by `np.random`; If int, seed is the seed used by the random number generator; If RandomState instance, seed is the pseudorandom number generator Returns ------- float the left (lower) p-value float the right (upper) p-value float the two-sided p-value float the observed test statistic list the null distribution """ prng = get_prng(seed) groups = np.unique(group) conditions = np.unique(condition) if len(conditions) < 2: return 1.0, 1.0, 1.0, np.nan, None else: tst = testStatistic(group, condition, response, groups, conditions) dist = np.zeros(iterations) for i in range(int(iterations)): dist[i] = testStatistic(group, permute_within_groups( condition, group, prng), response, groups, conditions) conds = [dist <= tst, dist >= tst, abs(dist) >= abs(tst)] left_pv, right_pv, two_sided_pv = [np.count_nonzero(c)/iterations for c in conds] return left_pv, right_pv, two_sided_pv, tst, dist
Python
0
@@ -2164,94 +2164,29 @@ ups= -np.unique(group), %0A conditions=np.unique(condition) +None, conditions=None ):%0A @@ -3061,32 +3061,165 @@ atistic%0A %22%22%22%0A + if(groups is None):%0A groups = np.unique(group)%0A if(conditions is None):%0A conditions = np.unique(condition)%0A %0A tst = 0.0%0A @@ -5865,25 +5865,19 @@ p.zeros( -iteration +rep s)%0A @@ -5898,25 +5898,19 @@ nge(int( -iteration +rep s)):%0A @@ -6273,17 +6273,11 @@ (c)/ -iteration +rep s fo
b2b5e91649cdfadda63e11dcfe5ef5c105d28f23
Add timeout cli arg
pg_bawler/listener.py
pg_bawler/listener.py
#!/usr/bin/env python ''' Listen on given channel for notification. $ python -m pg_bawler.listener mychannel If you installed notification trigger with ``pg_bawler.gen_sql`` then channel is the same as ``tablename`` argument. ''' import argparse import asyncio import importlib import logging import sys import pg_bawler.core LOGGER = logging.getLogger('pg_bawler.listener') class DefaultHandler: def __init__(self): self.count = 0 async def handle_notification(self, notification): self.count += 1 notification_number = self.count LOGGER.info( 'Received notification #%s pid %s from channel %s: %s', notification_number, notification.pid, notification.channel, notification.payload) def get_default_cli_args_parser(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( '--log-level', metavar='LOG_LEVEL', default='info', help='Log level. One of: fatal, cirtical, error, warning, info, debug') parser.add_argument( '--dsn', metavar='DSN', help='Connection string. e.g. `dbname=test user=postgres`') parser.add_argument( '--handler', metavar='HANDLER', default='pg_bawler.listener:default_handler', help=( 'Module and name of python callable.' ' e.g. `pg_bawler.listener:default_handler`')) parser.add_argument( 'channel', metavar='CHANNEL', type=str, help='Name of Notify/Listen channel to listen on.') return parser def resolve_handler(handler_str): module_name, callable_name = handler_str.split(':') return getattr(importlib.import_module(module_name), callable_name) default_handler = DefaultHandler().handle_notification class NotificationListener( pg_bawler.core.BawlerBase, pg_bawler.core.ListenerMixin ): pass def main(*argv): args = get_default_cli_args_parser(argv or sys.argv[1:]).parse_args() try: logging.basicConfig( format='[%(asctime)s][%(name)s][%(levelname)s]: %(message)s', level=args.log_level) except TypeError: sys.exit('Worng log level. --help for more info.') LOGGER.info('Starting pg_bawler listener for channel: %s', args.channel) loop = asyncio.get_event_loop() listener = NotificationListener(connection_params={'dsn': args.dsn}) listener.listen_timeout = 5 listener.register_handler(resolve_handler(args.handler)) loop.run_until_complete(listener.register_channel(args.channel)) loop.run_until_complete(listener.listen()) if __name__ == '__main__': sys.exit(main())
Python
0.000001
@@ -1026,20 +1026,20 @@ efault=' -info +INFO ',%0A @@ -1070,52 +1070,52 @@ of: -fatal, cirtical, error, warning, info, debug +FATAL, CIRTICAL, ERROR, WARNING, INFO, DEBUG ')%0A @@ -1178,16 +1178,39 @@ ='DSN',%0A + required=True,%0A @@ -1286,32 +1286,287 @@ r.add_argument(%0A + '--timeout',%0A metavar='TIMEOUT', default=5, type=int,%0A help=(%0A 'Timeout for getting notification.'%0A ' If this timeout passes pg_bawler checks'%0A ' connection if it%5C's alive'))%0A parser.add_argument(%0A '--handl @@ -2311,16 +2311,29 @@ _parser( +).parse_args( argv or @@ -2349,21 +2349,8 @@ 1:%5D) -.parse_args() %0A @@ -2490,16 +2490,24 @@ og_level +.upper() )%0A ex @@ -2797,17 +2797,28 @@ meout = -5 +args.timeout %0A lis
8602d984a9caf73dc40168e0e7937c9e930d035b
Stop $PYTHONPATH from messing up the search path for DLLs. CURA-3418 Cura build on Win 64 fails due to $PYTHONPATH
cura_app.py
cura_app.py
#!/usr/bin/env python3 # Copyright (c) 2015 Ultimaker B.V. # Cura is released under the terms of the AGPLv3 or higher. import os import sys import platform from UM.Platform import Platform #WORKAROUND: GITHUB-88 GITHUB-385 GITHUB-612 if Platform.isLinux(): # Needed for platform.linux_distribution, which is not available on Windows and OSX # For Ubuntu: https://bugs.launchpad.net/ubuntu/+source/python-qt4/+bug/941826 if platform.linux_distribution()[0] in ("debian", "Ubuntu", "LinuxMint"): # TODO: Needs a "if X11_GFX == 'nvidia'" here. The workaround is only needed on Ubuntu+NVidia drivers. Other drivers are not affected, but fine with this fix. import ctypes from ctypes.util import find_library libGL = find_library("GL") ctypes.CDLL(libGL, ctypes.RTLD_GLOBAL) #WORKAROUND: GITHUB-704 GITHUB-708 # It looks like setuptools creates a .pth file in # the default /usr/lib which causes the default site-packages # to be inserted into sys.path before PYTHONPATH. # This can cause issues such as having libsip loaded from # the system instead of the one provided with Cura, which causes # incompatibility issues with libArcus if "PYTHONPATH" in os.environ.keys(): # If PYTHONPATH is used PYTHONPATH = os.environ["PYTHONPATH"].split(os.pathsep) # Get the value, split it.. PYTHONPATH.reverse() # and reverse it, because we always insert at 1 for PATH in PYTHONPATH: # Now beginning with the last PATH PATH_real = os.path.realpath(PATH) # Making the the path "real" if PATH_real in sys.path: # This should always work, but keep it to be sure.. sys.path.remove(PATH_real) sys.path.insert(1, PATH_real) # Insert it at 1 after os.curdir, which is 0. def exceptHook(hook_type, value, traceback): import cura.CrashHandler cura.CrashHandler.show(hook_type, value, traceback) sys.excepthook = exceptHook # Workaround for a race condition on certain systems where there # is a race condition between Arcus and PyQt. Importing Arcus # first seems to prevent Sip from going into a state where it # tries to create PyQt objects on a non-main thread. import Arcus #@UnusedImport from UM.Platform import Platform import cura.CuraApplication import cura.Settings.CuraContainerRegistry if Platform.isWindows() and hasattr(sys, "frozen"): dirpath = os.path.expanduser("~/AppData/Local/cura/") os.makedirs(dirpath, exist_ok = True) sys.stdout = open(os.path.join(dirpath, "stdout.log"), "w") sys.stderr = open(os.path.join(dirpath, "stderr.log"), "w") # Force an instance of CuraContainerRegistry to be created and reused later. cura.Settings.CuraContainerRegistry.CuraContainerRegistry.getInstance() # This prestart up check is needed to determine if we should start the application at all. if not cura.CuraApplication.CuraApplication.preStartUp(): sys.exit(0) app = cura.CuraApplication.CuraApplication.getInstance() app.run()
Python
0
@@ -807,16 +807,235 @@ LOBAL)%0A%0A +# When frozen, i.e. installer version, don't let PYTHONPATH mess up the search path for DLLs.%0Aif Platform.isWindows() and hasattr(sys, %22frozen%22):%0A try:%0A del os.environ%5B%22PYTHONPATH%22%5D%0A except KeyError: pass%0A%0A #WORKARO
90571c86f39fee14fafcc9c030de66d4255c5d82
Change naming style
lexos/interfaces/statistics_interface.py
lexos/interfaces/statistics_interface.py
from flask import request, session, render_template, Blueprint from lexos.helpers import constants as constants from lexos.managers import utility, session_manager as session_manager from lexos.interfaces.base_interface import detect_active_docs # this is a flask blue print # it helps us to manage groups of views # see here for more detail: # http://exploreflask.com/en/latest/blueprints.html # http://flask.pocoo.org/docs/0.12/blueprints/ stats_view = Blueprint('statistics', __name__) # Tells Flask to load this function when someone is at '/statsgenerator' @stats_view.route("/statistics", methods=["GET", "POST"]) def statistics(): """ Handles the functionality on the Statistics page ... Note: Returns a response object (often a render_template call) to flask and eventually to the browser. """ # Detect the number of active documents. num_active_docs = detect_active_docs() file_manager = utility.load_file_manager() labels = file_manager.get_active_labels() if request.method == "GET": # "GET" request occurs when the page is first loaded. if 'analyoption' not in session: session['analyoption'] = constants.DEFAULT_ANALYZE_OPTIONS if 'statisticoption' not in session: session['statisticoption'] = {'segmentlist': list( map(str, list(file_manager.files.keys())))} # default is all on return render_template( 'statistics.html', labels=labels, labels2=labels, itm="statistics", numActiveDocs=num_active_docs) if request.method == "POST": token = request.form['tokenType'] file_info_dict, corpus_info_dict = utility.generate_statistics( file_manager) session_manager.cache_analysis_option() session_manager.cache_statistic_option() # DO NOT save fileManager! return render_template( 'statistics.html', labels=labels, FileInfoDict=file_info_dict, corpusInfoDict=corpus_info_dict, token=token, itm="statistics", numActiveDocs=num_active_docs)
Python
0.000002
@@ -121,130 +121,130 @@ xos. -managers import utility, session_manager as session_manager%0Afrom lexos.interfaces.base_interface import detect_active_docs +interfaces.base_interface import detect_active_docs%0Afrom lexos.managers import utility, session_manager as session_manager %0A%0A# @@ -1704,19 +1704,19 @@ le_info_ -dic +lis t, corpu @@ -1721,21 +1721,16 @@ pus_info -_dict = utili @@ -2022,19 +2022,19 @@ FileInfo -Dic +Lis t=file_i @@ -2033,27 +2033,27 @@ t=file_info_ -dic +lis t,%0A @@ -2069,12 +2069,8 @@ Info -Dict =cor @@ -2081,13 +2081,8 @@ info -_dict ,%0A
009cdf804f0f730ed081c6003eedb1015283948f
update to test for non categorized event publishing
lg_replay/test/offline/test_lg_replay.py
lg_replay/test/offline/test_lg_replay.py
#!/usr/bin/env python PKG = 'lg_replay' NAME = 'test_lg_replay' import rospy import unittest import json from evdev import InputEvent from lg_replay import DeviceReplay from interactivespaces_msgs.msg import GenericMessage class MockDevice: def __init__(self): self.events = [ InputEvent(1441716733L, 879280L, 3, 0, 9888L), InputEvent(1441716733L, 879280L, 3, 1, 15600L), InputEvent(1441716733L, 879280L, 0, 0, 0L), InputEvent(1441716733L, 981276L, 3, 53, 9872L), InputEvent(1441716733L, 981276L, 3, 54, 15664L), InputEvent(1441716733L, 981276L, 3, 0, 9872L), InputEvent(1441716733L, 981276L, 3, 1, 15664L), InputEvent(1441716733L, 981276L, 0, 0, 0L), InputEvent(1441716733L, 982263L, 3, 57, -1L), InputEvent(1441716733L, 982263L, 1, 330, 0L) # < this event gets tested ] def read_loop(self): return self.events class MockPublisher: def __init__(self): self.published_messages = [] def get_published_messages(self): return self.published_messages def publish_event(self, message): self.published_messages.append(message) class TestReplay(unittest.TestCase): def setUp(self): self.mock_device = MockDevice() self.mock_publisher = MockPublisher() self.replay = DeviceReplay(self.mock_publisher, 'blah', event_ecode='EV_KEY', device=self.mock_device) def test_events_get_filtered_and_published(self): self.replay.run() self.assertEqual(type(self.mock_publisher.get_published_messages()), list) self.assertEqual(len(self.mock_publisher.get_published_messages()), 1) self.assertEqual(type(self.mock_publisher.get_published_messages()[0]), dict) message = self.mock_publisher.get_published_messages()[0] self.assertEqual(message['scancode'], 330) self.assertEqual(message['keystate'], 0) self.assertEqual(message['keycode'], 'BTN_TOUCH') if __name__ == '__main__': import rostest rostest.rosrun(PKG, NAME, TestReplay)
Python
0
@@ -1904,12 +1904,8 @@ ge%5B' -scan code @@ -1951,79 +1951,18 @@ ge%5B' -keystate'%5D, 0)%0A self.assertEqual(message%5B'keycode'%5D, 'BTN_TOUCH' +value'%5D, 0 )%0A%0Ai
41548ba9efb1d47c823696adeb13560bdbb73878
allow update of IP to timeout without quitting loop
dyndnsc/updater/base.py
dyndnsc/updater/base.py
# -*- coding: utf-8 -*- import logging import requests from ..common.subject import Subject from ..common.events import IP_UPDATE_SUCCESS, IP_UPDATE_ERROR log = logging.getLogger(__name__) class UpdateProtocol(Subject): """ base class for all update protocols that use the dyndns2 update protocol """ _updateurl = None theip = None hostname = None # this holds the desired dns hostname status = 0 def __init__(self): self.updateurl = self._updateurl super(UpdateProtocol, self).__init__() def updateUrl(self): return self.updateurl def success(self): self.status = 0 self.notify_observers(IP_UPDATE_SUCCESS, "Updated IP address of '%s' to %s" % (self.hostname, self.theip)) def abuse(self): self.status = 1 self.notify_observers(IP_UPDATE_ERROR, "This client is considered to be abusive for hostname '%s'" % (self.hostname)) def nochg(self): self.status = 0 def nohost(self): self.status = 1 self.notify_observers(IP_UPDATE_ERROR, "Invalid/non-existant hostname: [%s]" % (self.hostname)) def failure(self): self.status = 1 self.notify_observers(IP_UPDATE_ERROR, "Service is failing") def notfqdn(self): self.status = 1 self.notify_observers(IP_UPDATE_ERROR, "The provided hostname '%s' is not a valid hostname!" % (self.hostname)) def protocol(self): params = {'myip': self.theip, 'hostname': self.hostname} r = requests.get(self.updateUrl(), params=params, auth=(self.userid, self.password), timeout=60) r.close() log.debug("status %i, %s", r.status_code, r.text) if r.status_code == 200: if r.text.startswith("good "): self.success() return self.theip elif r.text.startswith('nochg'): self.nochg() return self.theip elif r.text == 'nohost': self.nohost() return 'nohost' elif r.text == 'abuse': self.abuse() return 'abuse' elif r.text == '911': self.failure() return '911' elif r.text == 'notfqdn': self.notfqdn() return 'notfqdn' else: self.status = 1 self.notify_observers(IP_UPDATE_ERROR, "Problem updating IP address of '%s' to %s: %s" % (self.hostname, self.theip, r.text)) return r.text else: self.status = 1 self.notify_observers(IP_UPDATE_ERROR, "Problem updating IP address of '%s' to %s: %s" % (self.hostname, self.theip, r.status_code)) return 'invalid http status code: %s' % r.status_code
Python
0
@@ -1429,32 +1429,53 @@ protocol(self):%0A + timeout = 60%0A params = @@ -1523,16 +1523,33 @@ stname%7D%0A + try:%0A @@ -1597,16 +1597,45 @@ =params, +%0A auth=(s @@ -1674,12 +1674,256 @@ out= -60)%0A +timeout)%0A except requests.exceptions.Timeout as exc:%0A log.warning(%22HTTP timeout(%25i) occurred while updating IP at '%25s'%22,%0A timeout, self.updateUrl(), exc_info=exc)%0A return False%0A finally:%0A
25f57a023f978fca94bbeb9655a4d90f0b2d95f0
Fix typo
pints/toy/__init__.py
pints/toy/__init__.py
# # Root of the toy module. # Provides a number of toy models and logpdfs for tests of Pints' functions. # # This file is part of PINTS (https://github.com/pints-team/pints/) which is # released under the BSD 3-clause license. See accompanying LICENSE.md for # copyright notice and full license details. # from __future__ import absolute_import, division from __future__ import print_function, unicode_literals from ._toy_classes import ToyLogPDF, ToyModel, ToyODEModel from ._annulus import AnnulusLogPDF from ._beeler_reuter_model import ActionPotentialModel from ._cone import ConeLogPDF from ._constant_model import ConstantModel from ._eight_schools import EightSchoolsLogPDF from ._fitzhugh_nagumo_model import FitzhughNagumoModel from ._gaussian import GaussianLogPDF from ._german_credit import GermanCreditLogPDF from ._german_credit_hierarchical import GermanCreditHierarchicalLogPDF from ._goodwin_oscillator_model import GoodwinOscillatorModel from ._hes1_michaelis_menten import Hes1Model from ._hh_ik_model import HodgkinHuxleyIKModel from ._high_dimensional_gaussian import HighDimensionalGaussianLogPDF from ._logistic_model import LogisticModel from ._lotka_volterra_model import LotkaVolterraModel from ._multimodal_gaussian import MultimodalGaussianLogPDF from ._neals_funnel import NealsFunnelLogPDF from ._parabola import ParabolicError from ._repressilator_model import RepressilatorModel from ._rosenbrock import RosenbrockError, RosenbrockLogPDF from ._sho_model import SimpleHarmonicOscillatorModel from ._simple_egg_box import SimpleEggBoxLogPDF from ._sir_model import SIRModel from ._twisted_gaussian_banana import TwistedGaussianLogPDF from ._stochastic_degradation_model import StochasticDegradationModel from ._stochastic_logistic model import StochasticLogisticModel
Python
0.999999
@@ -1756,17 +1756,17 @@ logistic - +_ model im
e0ea0d4df4d56da14f8f65b75122867d39d1bd45
Remove flake comment and register the product views
pitchfork/__init__.py
pitchfork/__init__.py
# Copyright 2014 Dave Kludt # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from flask import Flask, render_template, g, request from happymongo import HapPyMongo from config import config import re import json from adminbp import bp as admin_bp from manage_globals import bp as manage_bp from engine import bp as engine_bp app = Flask(__name__) app.config.from_object(config) app.register_blueprint(admin_bp, url_prefix='/admin') app.register_blueprint(manage_bp, url_prefix='/manage') app.register_blueprint(engine_bp, url_prefix='/engine') # Setup DB based on the app name mongo, db = HapPyMongo(config) from global_helper import front_page_most_accessed, search_for_calls from global_helper import gather_history import product_views # noqa @app.template_filter() def nl2br(value): if value: _newline_re = re.compile(r'(?:\r\n|\r|\n)') return _newline_re.sub('<br>', value) @app.template_filter() def tab2spaces(value): if value: text = re.sub('\t', '&nbsp;' * 4, value) return text @app.template_filter() def unslug(value): text = re.sub('_', ' ', value) return text @app.template_filter() def slug(value): text = re.sub('\s+', '_', value) return text @app.template_filter() def check_regex(value): if re.match('variable', value): return True else: return False @app.template_filter() def pretty_print_json(string): return json.dumps( string, sort_keys=False, indent=4, separators=(',', ':') ) @app.template_filter() def remove_slash(string): if string: return re.sub('\/', '', string) @app.context_processor def utility_processor(): def unslug(string): return re.sub('_', ' ', string) def parse_field_data(value): choices = re.sub('\r\n', ',', value) return choices.split(',') def slugify(data): temp_string = re.sub(' +', ' ', str(data.strip())) return re.sub(' ', '_', temp_string) return dict( parse_field_data=parse_field_data, unslug=unslug, slugify=slugify ) @app.before_request def before_request(): g.db = db @app.route('/') def index(): active_products, data_centers, = [], [] api_settings = db.api_settings.find_one() if api_settings: active_products = api_settings.get('active_products') most_accessed = front_page_most_accessed(active_products) if api_settings: data_centers = api_settings.get('dcs') return render_template( 'index.html', api_settings=api_settings, active_products=active_products, most_accessed=most_accessed, data_centers=data_centers ) @app.route('/search', methods=['POST']) def search(): search_string = request.json.get('search_string') api_results = search_for_calls(search_string) return render_template( '_api_call_template.html', call_loop=api_results ) @app.route('/history') def history(): active_products = None api_settings = g.db.api_settings.find_one() if api_settings: active_products = api_settings.get('active_products') history = gather_history() return render_template( 'history.html', history=history, api_settings=api_settings, active_products=active_products )
Python
0
@@ -1245,16 +1245,49 @@ iews - # noqa +%0Aproduct_views.ProductsView.register(app) %0A%0A%0A@
5b120e5b89c06a0a5c01f8c710f85a4a179f56f7
Change HTML theme to match BIND ARM, add copyright, EPUB info
doc/conf.py
doc/conf.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = 'ISC DNSSEC Guide' copyright = '2020, Internet Systems Consortium' author = 'Internet Systems Consortium' # The full version, including alpha/beta/rc tags release = '2020' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The master toctree document. master_doc = 'index' # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static']
Python
0
@@ -1,8 +1,537 @@ +############################################################################%0A# Copyright (C) Internet Systems Consortium, Inc. (%22ISC%22)%0A#%0A# This Source Code Form is subject to the terms of the Mozilla Public%0A# License, v. 2.0. If a copy of the MPL was not distributed with this%0A# file, you can obtain one at https://mozilla.org/MPL/2.0/.%0A#%0A# See the COPYRIGHT file distributed with this work for additional%0A# information regarding copyright ownership.%0A############################################################################%0A%0A # Config @@ -581,16 +581,16 @@ uilder.%0A - #%0A# This @@ -1346,76 +1346,8 @@ m'%0A%0A -# The full version, including alpha/beta/rc tags%0Arelease = '2020'%0A%0A%0A # -- @@ -2200,17 +2200,24 @@ = ' -alabaster +sphinx_rtd_theme '%0A%0A# @@ -2445,16 +2445,16 @@ t.css%22.%0A - html_sta @@ -2476,8 +2476,119 @@ tatic'%5D%0A +%0A# -- Options for EPUB output -------------------------------------------------%0A%0Aepub_basename = 'DNSSECGuide'%0A
807e5069c269e915dcecb66dab50881c3b87846f
Add extra path to documentation configuration.
doc/conf.py
doc/conf.py
import sys import os sys.path.append(os.path.abspath('../../music_essentials/')) # -*- coding: utf-8 -*- # # music_essentials documentation build configuration file, created by # sphinx-quickstart on Mon Aug 7 10:47:21 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'music_essentials' copyright = u'2017, Charlotte Pierce' author = u'Charlotte Pierce' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'0.1' # The full version, including alpha/beta/rc tags. release = u'0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { '**': [ 'about.html', 'navigation.html', 'relations.html', # needs 'show_related': True theme option to display 'searchbox.html', 'donate.html', ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'music_essentialsdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'music_essentials.tex', u'music\\_essentials Documentation', u'Charlotte Pierce', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'music_essentials', u'music_essentials Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'music_essentials', u'music_essentials Documentation', author, 'music_essentials', 'One line description of project.', 'Miscellaneous'), ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/': None}
Python
0
@@ -74,16 +74,61 @@ ials/')) +%0Asys.path.insert(0, os.path.abspath('../..')) %0A%0A# -*-
6366fe6da78cd0e910b52352b918ff18d89f25c4
update tests_forms
myideas/core/tests/test_forms.py
myideas/core/tests/test_forms.py
from django.test import TestCase from django.shortcuts import resolve_url as r from registration.forms import RegistrationForm from myideas.core.forms import IdeasForm, IdeasFormUpdate class IdeasFormTest(TestCase): def setUp(self): self.form = IdeasForm() def test_form_has_fields(self): """IdeasForm must have 3 fields""" expected = ('title', 'description', 'tags') self.assertSequenceEqual(expected, list(self.form.fields)) def test_all_required_form_fields(self): """Test Ideasform field is required.""" form = IdeasForm({ 'title': '', 'description': '', }) self.assertFalse(form.is_valid()) self.assertIn('title', form.errors) self.assertIn('description', form.errors) def test_fields_not_present(self): """Test Ideasform field is not present.""" self.assertFalse(self.form.fields.get('created_at')) self.assertFalse(self.form.fields.get('slug')) self.assertFalse(self.form.fields.get('user')) class IdeasFormUpdateTest(TestCase): def setUp(self): self.form = IdeasFormUpdate() def test_form_has_fields(self): """UpdateForm must have 2 fields""" expected = ('title', 'description') self.assertSequenceEqual(expected, list(self.form.fields)) def test_all_required_form_fields(self): """Test Updateform field is required.""" form = IdeasFormUpdate({ 'title': '', 'description': '', }) self.assertFalse(form.is_valid()) self.assertIn('title', form.errors) self.assertIn('description', form.errors) def test_fields_not_present(self): """Test Updateform field is not present.""" self.assertFalse(self.form.fields.get('user')) self.assertFalse(self.form.fields.get('slug')) self.assertFalse(self.form.fields.get('created_at')) self.assertFalse(self.form.fields.get('tags')) class RegisterIdea(TestCase): def test_registration_get(self): resp = self.client.get(r('registration_register')) self.failUnless(isinstance(resp.context['form'], RegistrationForm))
Python
0.000001
@@ -204,33 +204,32 @@ Test(TestCase):%0A -%0A def setUp(se @@ -1081,33 +1081,32 @@ Test(TestCase):%0A -%0A def setUp(se
82b87b05068a8fb56f2983714c08c0b822b5dde5
Remove settings leftover from sphinx-releases
doc/conf.py
doc/conf.py
# -*- coding: utf-8 -*- import os import sys import alagitpull # Get the project root dir, which is the parent dir of this cwd = os.getcwd() project_root = os.path.dirname(cwd) sys.path.insert(0, project_root) sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext"))) # package data about = {} with open("../unihan_db/__about__.py") as fp: exec(fp.read(), about) extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.napoleon', 'alagitpull', 'sphinx_issues', ] releases_unstable_prehistory = True releases_document_name = ["history"] releases_issue_uri = "https://github.com/cihai/unihan-db/issues/%s" releases_release_uri = "https://github.com/cihai/unihan-db/tree/v%s" issues_github_path = about['__github__'] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = about['__title__'] copyright = about['__copyright__'] version = '%s' % ('.'.join(about['__version__'].split('.'))[:2]) release = '%s' % (about['__version__']) exclude_patterns = ['_build'] pygments_style = 'sphinx' html_theme_path = [alagitpull.get_path()] html_static_path = ['_static'] html_favicon = 'favicon.ico' html_theme = 'alagitpull' html_sidebars = { '**': [ 'about.html', 'navigation.html', 'relations.html', 'more.html', 'searchbox.html', ] } html_theme_options = { 'logo': 'img/cihai.svg', 'github_user': 'cihai', 'github_repo': 'unihan-db', 'github_type': 'star', 'github_banner': True, 'projects': alagitpull.projects, 'project_name': 'db', } alagitpull_internal_hosts = ['unihan-db.git-pull.com', '0.0.0.0'] alagitpull_external_hosts_new_window = True htmlhelp_basename = '%sdoc' % about['__title__'] latex_documents = [ ( 'index', '{0}.tex'.format(about['__package_name__']), '{0} Documentation'.format(about['__title__']), about['__author__'], 'manual', ) ] man_pages = [ ( 'index', about['__package_name__'], '{0} Documentation'.format(about['__title__']), about['__author__'], 1, ) ] texinfo_documents = [ ( 'index', '{0}'.format(about['__package_name__']), '{0} Documentation'.format(about['__title__']), about['__author__'], about['__package_name__'], about['__description__'], 'Miscellaneous', ) ] intersphinx_mapping = { 'python': ('http://docs.python.org/', None), 'sqlalchemy': ('http://docs.sqlalchemy.org/en/latest/', None), }
Python
0
@@ -558,219 +558,8 @@ %0A%5D%0A%0A -releases_unstable_prehistory = True%0Areleases_document_name = %5B%22history%22%5D%0Areleases_issue_uri = %22https://github.com/cihai/unihan-db/issues/%25s%22%0Areleases_release_uri = %22https://github.com/cihai/unihan-db/tree/v%25s%22%0A%0A issu
3c2fa8716d82e6f7bb7ed2b381a58c2a5a6f6440
Fix releases_document_name
doc/conf.py
doc/conf.py
# -*- coding: utf-8 -*- import inspect import os import sys from os.path import dirname, relpath import alagitpull import unihan_etl # Get the project root dir, which is the parent dir of this cwd = os.getcwd() project_root = os.path.dirname(cwd) sys.path.insert(0, project_root) sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext"))) # package data about = {} with open("../unihan_etl/__about__.py") as fp: exec(fp.read(), about) extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.napoleon', 'sphinx.ext.linkcode', 'alagitpull', 'sphinxarg.ext', # sphinx-argparse 'sphinx_issues', 'changelog', # _ext/changelog.py ] releases_unstable_prehistory = True releases_document_name = "history" releases_issue_uri = "https://github.com/cihai/unihan-etl/issues/%s" releases_release_uri = "https://github.com/cihai/unihan-etl/tree/v%s" issues_github_path = about['__github__'] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = about['__title__'] copyright = about['__copyright__'] version = '%s' % ('.'.join(about['__version__'].split('.'))[:2]) release = '%s' % (about['__version__']) exclude_patterns = ['_build'] pygments_style = 'sphinx' html_theme_path = [alagitpull.get_path()] html_static_path = ['_static'] html_favicon = 'favicon.ico' html_theme = 'alagitpull' html_sidebars = { '**': [ 'about.html', 'navigation.html', 'relations.html', 'more.html', 'searchbox.html', ] } html_theme_options = { 'logo': 'img/cihai.svg', 'github_user': 'cihai', 'github_repo': 'unihan-etl', 'github_type': 'star', 'github_banner': True, 'projects': alagitpull.projects, 'project_name': about['__title__'], } alagitpull_internal_hosts = ['libtmux.git-pull.com', '0.0.0.0'] alagitpull_external_hosts_new_window = True htmlhelp_basename = '%sdoc' % about['__title__'] latex_documents = [ ( 'index', '{0}.tex'.format(about['__package_name__']), '{0} Documentation'.format(about['__title__']), about['__author__'], 'manual', ) ] man_pages = [ ( 'index', about['__package_name__'], '{0} Documentation'.format(about['__title__']), about['__author__'], 1, ) ] texinfo_documents = [ ( 'index', '{0}'.format(about['__package_name__']), '{0} Documentation'.format(about['__title__']), about['__author__'], about['__package_name__'], about['__description__'], 'Miscellaneous', ) ] intersphinx_mapping = { 'python': ('http://docs.python.org/', None), 'sphinx': ('http://sphinx.readthedocs.org/en/latest/', None), 'sqlalchemy': ('http://sqlalchemy.readthedocs.org/en/latest/', None), } def linkcode_resolve(domain, info): # NOQA: C901 """ Determine the URL corresponding to Python object Notes ----- From https://github.com/numpy/numpy/blob/v1.15.1/doc/source/conf.py, 7c49cfa on Jul 31. License BSD-3. https://github.com/numpy/numpy/blob/v1.15.1/LICENSE.txt """ if domain != 'py': return None modname = info['module'] fullname = info['fullname'] submod = sys.modules.get(modname) if submod is None: return None obj = submod for part in fullname.split('.'): try: obj = getattr(obj, part) except Exception: return None # strip decorators, which would resolve to the source of the decorator # possibly an upstream bug in getsourcefile, bpo-1764286 try: unwrap = inspect.unwrap except AttributeError: pass else: obj = unwrap(obj) try: fn = inspect.getsourcefile(obj) except Exception: fn = None if not fn: return None try: source, lineno = inspect.getsourcelines(obj) except Exception: lineno = None if lineno: linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1) else: linespec = "" fn = relpath(fn, start=dirname(unihan_etl.__file__)) if 'dev' in about['__version__']: return "%s/blob/master/%s/%s%s" % ( about['__github__'], about['__package_name__'], fn, linespec, ) else: return "%s/blob/v%s/%s/%s%s" % ( about['__github__'], about['__version__'], about['__package_name__'], fn, linespec, )
Python
0.000005
@@ -791,16 +791,17 @@ _name = +%5B %22history @@ -801,16 +801,17 @@ history%22 +%5D %0Arelease
b3dfb211d0d81210dcaa317a0d6f79b6ad249816
Update netlogo_example.py
ema_workbench/examples/netlogo_example.py
ema_workbench/examples/netlogo_example.py
''' This example is a proof of principle for how NetLogo models can be controlled using pyNetLogo and the ema_workbench. Note that this example uses the NetLogo 6 version of the predator prey model that comes with NetLogo. If you are using NetLogo 5, replace the model file with the one that comes with NetLogo. ''' from ema_workbench import (RealParameter, ema_logging, TimeSeriesOutcome, MultiprocessingEvaluator) from ema_workbench.connectors.netlogo import NetLogoModel # Created on 20 mrt. 2013 # # .. codeauthor:: jhkwakkel if __name__ == '__main__': # turn on logging ema_logging.log_to_stderr(ema_logging.INFO) model = NetLogoModel('predprey', wd="./models/predatorPreyNetlogo", model_file="Wolf Sheep Predation.nlogo") model.run_length = 100 model.replications = 10 model.uncertainties = [RealParameter("grass-regrowth-time", 1, 99), RealParameter("initial-number-sheep", 50, 100), RealParameter("initial-number-wolves", 50, 100), RealParameter("sheep-reproduce", 5, 10), RealParameter("wolf-reproduce", 5, 10), ] model.outcomes = [TimeSeriesOutcome('sheep'), TimeSeriesOutcome('wolves'), TimeSeriesOutcome('grass')] # perform experiments n = 10 with MultiprocessingEvaluator(model, n_processes=2, maxtasksperchild=4) as evaluator: results = evaluator.perform_experiments(n)
Python
0.000001
@@ -1,11 +1,11 @@ -''' +%22%22%22 %0A%0AThis e @@ -311,11 +311,31 @@ o.%0A%0A -''' +%22%22%22%0Aimport numpy as np%0A %0Afro @@ -385,16 +385,31 @@ logging, + ScalarOutcome, %0A @@ -1318,26 +1318,22 @@ omes = %5B -TimeSeries +Scalar Outcome( @@ -1339,16 +1339,99 @@ ('sheep' +, variable_name='count sheep',%0A function=np.mean ),%0A @@ -1740,8 +1740,20 @@ ents(n)%0A +%0A print()
8c6f178782b6470b98536a2384391970e0cbafb9
Update config file
pelicanconf.py
pelicanconf.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals AUTHOR = 'Edwin Khoo' SITENAME = 'Edwin Khoo' SITEURL = '' PATH = 'content' TIMEZONE = 'America/New_York' DEFAULT_LANG = 'en' # Feed generation is usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS = None # Blogroll LINKS = None # Social widget SOCIAL = (('Twitter', 'https://twitter.com/edwinksl'), ('GitHub', 'https://github.com/edwinksl')) DEFAULT_PAGINATION = 10 # Uncomment following line if you want document-relative URLs when developing # RELATIVE_URLS = True THEME = '/home/edwinksl/Git/pelican-bootstrap3'
Python
0.000001
@@ -444,16 +444,189 @@ CIAL = ( +('GitHub', 'https://github.com/edwinksl'),%0A ('Bitbucket', 'https://bitbucket.org/edwinksl'),%0A ('Facebook', 'https://www.facebook.com/edwinksl'),%0A ('Twitte @@ -674,22 +674,24 @@ (' -GitHub +LinkedIn ', 'http @@ -690,35 +690,44 @@ ', 'https:// -github +www.linkedin .com/ +in/ edwinksl'))%0A
5bfdeb94d64ffe7cbcb750dda2edc48f5a1d23b2
add a link to python course 2018
pelicanconf.py
pelicanconf.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals from pathlib import Path AUTHOR = u'Python Group' SITENAME = u'Python Group UEA' SITEURL = '' PATH = 'content' STATIC_PATHS = ['extra', 'extra/robots.txt', 'pdfs', 'figures', 'extra/favicon.ico', 'extra/custom.css'] EXTRA_PATH_METADATA = { 'extra/favicon.ico': {'path': 'favicon.ico'}, 'extra/custom.css': {'path': 'extra/custom.css'}, 'extra/robots.txt': {'path': 'robots.txt'} } CUSTOM_CSS = 'extra/custom.css' THEME = 'theme' JINJA_ENVIRONMENT = {'extensions': ['jinja2.ext.i18n']} BOOTSTRAP_THEME = 'cosmo' PYGMENTS_STYLE = 'default' OVERWRITE_NB_HEADER = True if not Path('_nb_header.html').exists(): Path('_nb_header.html').touch() EXTRA_HEADER = open('_nb_header.html').read() TIMEZONE = 'Europe/London' DEFAULT_LANG = u'en' PLUGIN_PATHS = ['../pelican-plugins'] PLUGINS = ['tag_cloud', 'summary', 'i18n_subsites', 'liquid_tags.img', 'liquid_tags.video', 'liquid_tags.youtube', 'liquid_tags.vimeo', 'liquid_tags.include_code', 'liquid_tags.notebook'] NOTEBOOK_DIR = 'notebooks' DISPLAY_TAGS_ON_SIDEBAR = True DISPLAY_TAGS_INLINE = True SHOW_ARTICLE_CATEGORY = False SHOW_ARTICLE_AUTHOR = True DISPLAY_PAGES_ON_MENU = True DISPLAY_CATEGORIES_ON_MENU = False ARCHIVES_SAVE_AS = 'archives.html' # DIRECT_TEMPLATES = ['index', 'categories', 'authors', 'archives'] #MENUITEMS = [ # ('Archives', '/archives.html') # ('Python Course 2016', # 'https://ueapy.github.io/enveast_python_course/'), # ('Meetings calendar', 'https://ueapy.github.io/meetings-calendar.html'), # ('Ideas for meetings', 'https://ueapy.github.io/meetings-ideas.html'), # ] # SOCIAL = (('github', 'http://github.com/ueapy')) # Feed generation is usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS = None # Blogroll LINKS = (('Python Course 2016', 'https://ueapy.github.io/enveast_python_course/'), ('Learn Python online', 'http://bafflednerd.com/learn-python-online'), ('Python Videos', 'http://pyvideo.org/'), ('From Python to Numpy', 'http://www.labri.fr/perso/nrougier/from-python-to-numpy/'), ('EarthPy', 'http://earthpy.org/'), ('Python4Oceanographers', 'https://ocefpaf.github.io/python4oceanographers/'), ('PyAOS', 'http://pyaos.johnny-lin.com/'), ('PyHOGs', 'http://pyhogs.github.io/'), ('Pythonic Perambulations', 'https://jakevdp.github.io/'), ('Meteodenny', 'https://dennissergeev.github.io/'), ) DEFAULT_PAGINATION = 5 # Uncomment following line if you want document-relative URLs when developing # RELATIVE_URLS = True # Sharing GITHUB_URL = 'https://github.com/ueapy' DISQUS_SITENAME = 'pythonuea' ADDTHIS_PROFILE = "ra-564e4d3ff0b9f071" FACEBOOK_LIKE = True GOOGLE_PLUS_ONE = True GOOGLE_CUSTOM_SEARCH_SIDEBAR = False
Python
0
@@ -2024,16 +2024,103 @@ INKS = ( +%0A ('Python Course 2018', 'https://ueapy.github.io/pythoncourse2018'),%0A ('Python
5cf5c6028bd7007a867691af966f89574f02de1f
clean up setup
mojolicious/setup.py
mojolicious/setup.py
import subprocess import sys #import setup_util import json from os.path import expanduser import os import getpass home = expanduser("~") def start(args, logfile, errfile): # setup_util.replace_text("mojolicious/app.pl", "localhost", ""+ args.database_host +"") # str(args.max_threads) conf = { 'database_host': args.database_host, 'workers': args.max_threads, } with open('mojolicious/app.conf', 'w') as f: f.write(json.dumps(conf)) try: # os.environ["MOJO_MODE"] = "production" subprocess.Popen("hypnotoad ./app.pl", shell=True, cwd="mojolicious", stderr=errfile, stdout=logfile) return 0 except subprocess.CalledProcessError: return 1 def stop(logfile, errfile): try: subprocess.call("hypnotoad -s ./app.pl", shell=True, cwd="mojolicious", stderr=errfile, stdout=logfile) p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE) out, err = p.communicate() for line in out.splitlines(): if 'hypnotoad' in line: pid = int(line.split(None, 2)[1]) os.kill(pid, 15) return 0 except subprocess.CalledProcessError: return 1
Python
0.000002
@@ -26,27 +26,8 @@ sys%0A -#import setup_util%0A impo @@ -154,125 +154,8 @@ e):%0A - # setup_util.replace_text(%22mojolicious/app.pl%22, %22localhost%22, %22%22+ args.database_host +%22%22)%0A # str(args.max_threads)%0A co @@ -181,16 +181,17 @@ se_host' + : args.d @@ -217,16 +217,23 @@ workers' + : args.m
8efa9c14685026acd143c385de51ccc57606842d
add molopage proxy
molo/polls/models.py
molo/polls/models.py
from django.db import models from django.dispatch import receiver from django.utils.translation import ugettext_lazy as _ from wagtail.wagtailcore.models import Page from wagtail.wagtailadmin.edit_handlers import ( FieldPanel, MultiFieldPanel, FieldRowPanel, ) from molo.core.utils import generate_slug from molo.core.models import ( Main, ArticlePage, SectionPage, TranslatablePageMixinNotRoutable, PreventDeleteMixin, index_pages_after_copy, ) SectionPage.subpage_types += ['polls.Question', 'polls.FreeTextQuestion'] ArticlePage.subpage_types += ['polls.Question', 'polls.FreeTextQuestion'] class PollsIndexPage(Page, PreventDeleteMixin): parent_page_types = ['core.Main'] subpage_types = ['polls.Question', 'polls.FreeTextQuestion'] def copy(self, *args, **kwargs): site = kwargs['to'].get_site() main = site.root_page PollsIndexPage.objects.child_of(main).delete() super(PollsIndexPage, self).copy(*args, **kwargs) @receiver(index_pages_after_copy, sender=Main) def create_polls_index_page(sender, instance, **kwargs): if not instance.get_children().filter( title='Polls').exists(): polls_index = PollsIndexPage( title='Polls', slug=('polls-%s' % ( generate_slug(instance.title), ))) instance.add_child(instance=polls_index) polls_index.save_revision().publish() class Question(TranslatablePageMixinNotRoutable, Page): parent_page_types = [ 'polls.PollsIndexPage', 'core.SectionPage', 'core.ArticlePage'] subpage_types = ['polls.Choice'] short_name = models.TextField( null=True, blank=True, help_text="The short name will replace the title when " "downloading your results. e.g 'How old are you' would be " "replaced by 'Age' in the title column.") extra_style_hints = models.TextField( default='', null=True, blank=True, help_text=_( "Styling options that can be applied to this section " "and all its descendants")) show_results = models.BooleanField( default=True, help_text=_("This option allows the users to see the results.") ) randomise_options = models.BooleanField( default=False, help_text=_( "Randomising the options allows the options to be shown" + " in a different order each time the page is displayed.")) result_as_percentage = models.BooleanField( default=True, help_text=_( "If not checked, the results will be shown as a total" + " instead of a percentage.") ) allow_multiple_choice = models.BooleanField( default=False, help_text=_( "Allows the user to choose more than one option.") ) content_panels = Page.content_panels + [FieldPanel('short_name')] + [ MultiFieldPanel([ FieldPanel('show_results'), FieldPanel('randomise_options'), FieldPanel('result_as_percentage'), FieldPanel('allow_multiple_choice')], heading=_( "Question Settings",))] def user_choice(self, user): return ChoiceVote.objects.get( user=user, question__id=self.id).choice def can_vote(self, user): return not (ChoiceVote.objects.filter( user=user, question__id=self.get_main_language_page().id).exists()) def choices(self): if self.randomise_options: return Choice.objects.live().child_of(self).order_by('?') else: return Choice.objects.live().child_of(self) def get_effective_extra_style_hints(self): parent_section = SectionPage.objects.all().ancestor_of(self).last() if parent_section: return self.extra_style_hints or \ parent_section.get_effective_extra_style_hints() else: parent_article = ArticlePage.objects.all().ancestor_of(self).last() if parent_article: return self.extra_style_hints or \ parent_article .get_effective_extra_style_hints() return self.extra_style_hints Question.settings_panels = [ MultiFieldPanel( [FieldRowPanel( [FieldPanel('extra_style_hints')], classname="label-above")], "Meta") ] class FreeTextQuestion(Question): parent_page_types = [ 'polls.PollsIndexPage', 'core.SectionPage', 'core.ArticlePage'] subpage_types = [] content_panels = Page.content_panels numerical = models.BooleanField( default=False, help_text=_( "When selected, this question will allow numerical data only") ) content_panels = Page.content_panels + [MultiFieldPanel([ FieldPanel('numerical')], heading=_("Question Settings",))] def __init__(self, *args, **kwargs): super(FreeTextQuestion, self).__init__(*args, **kwargs) self.show_results = False def can_vote(self, user): return not (FreeTextVote.objects.filter( user=user, question__id=self.get_main_language_page().id).exists()) class Choice(TranslatablePageMixinNotRoutable, Page): parent_page_types = ['polls.Question'] subpage_types = [] votes = models.IntegerField(default=0) choice_votes = models.ManyToManyField('ChoiceVote', related_name='choices', blank=True) short_name = models.TextField( null=True, blank=True, help_text="The short name will replace the title when " "downloading your results. e.g '10 years old' would be " "replaced by '10' in the title column.") promote_panels = Page.promote_panels + [FieldPanel('short_name')] + [ FieldPanel('votes'), ] class ChoiceVote(models.Model): user = models.ForeignKey('auth.User', related_name='choice_votes') choice = models.ManyToManyField('Choice', blank=True) question = models.ForeignKey('Question') submission_date = models.DateField(null=True, blank=True, auto_now_add=True) @property def answer(self): return ','.join([c.short_name or c.title for c in self.choice.all()]) class FreeTextVote(models.Model): user = models.ForeignKey('auth.User', related_name='text_votes') question = models.ForeignKey('FreeTextQuestion') answer = models.TextField(blank=True, null=True) submission_date = models.DateField(null=True, blank=True, auto_now_add=True)
Python
0
@@ -453,16 +453,68 @@ copy,%0A)%0A +from molo.core.molo_wagtail_models import MoloPage%0A%0A %0ASection @@ -677,16 +677,20 @@ dexPage( +Molo Page, Pr @@ -1492,32 +1492,36 @@ xinNotRoutable, +Molo Page):%0A paren @@ -5227,16 +5227,20 @@ utable, +Molo Page):%0A
2faae3fc05e1b6562546e382b9c5c5afff6d8377
Add debug timing to loading library and building browse cache
mopidy/local/json.py
mopidy/local/json.py
from __future__ import absolute_import, unicode_literals import collections import gzip import json import logging import os import re import sys import tempfile import mopidy from mopidy import local, models from mopidy.local import search, translator logger = logging.getLogger(__name__) # TODO: move to load and dump in models? def load_library(json_file): try: with gzip.open(json_file, 'rb') as fp: return json.load(fp, object_hook=models.model_json_decoder) except (IOError, ValueError) as e: logger.warning('Loading JSON local library failed: %s', e) return {} def write_library(json_file, data): data['version'] = mopidy.__version__ directory, basename = os.path.split(json_file) # TODO: cleanup directory/basename.* files. tmp = tempfile.NamedTemporaryFile( prefix=basename + '.', dir=directory, delete=False) try: with gzip.GzipFile(fileobj=tmp, mode='wb') as fp: json.dump(data, fp, cls=models.ModelJSONEncoder, indent=2, separators=(',', ': ')) os.rename(tmp.name, json_file) finally: if os.path.exists(tmp.name): os.remove(tmp.name) class _BrowseCache(object): encoding = sys.getfilesystemencoding() splitpath_re = re.compile(r'([^/]+)') def __init__(self, uris): # {parent_uri: {uri: ref}} self._cache = {} for track_uri in uris: path = translator.local_track_uri_to_path(track_uri, b'/') parts = self.splitpath_re.findall( path.decode(self.encoding, 'replace')) track_ref = models.Ref.track(uri=track_uri, name=parts.pop()) parent = 'local:directory' for i in range(len(parts)): self._cache.setdefault(parent, collections.OrderedDict()) directory = '/'.join(parts[:i+1]) dir_uri = translator.path_to_local_directory_uri(directory) dir_ref = models.Ref.directory(uri=dir_uri, name=parts[i]) self._cache[parent][dir_uri] = dir_ref parent = dir_uri self._cache.setdefault(parent, collections.OrderedDict()) self._cache[parent][track_uri] = track_ref def lookup(self, uri): return self._cache.get(uri, {}).values() class JsonLibrary(local.Library): name = b'json' def __init__(self, config): self._tracks = {} self._browse_cache = None self._media_dir = config['local']['media_dir'] self._json_file = os.path.join( config['local']['data_dir'], b'library.json.gz') def browse(self, path): if not self._browse_cache: return [] return self._browse_cache.lookup(path) def load(self): logger.debug('Loading json library from %s', self._json_file) library = load_library(self._json_file) self._tracks = dict((t.uri, t) for t in library.get('tracks', [])) self._browse_cache = _BrowseCache(sorted(self._tracks)) return len(self._tracks) def lookup(self, uri): try: return self._tracks[uri] except KeyError: return None def search(self, query=None, limit=100, offset=0, uris=None, exact=False): tracks = self._tracks.values() # TODO: pass limit and offset into search helpers if exact: return search.find_exact(tracks, query=query, uris=uris) else: return search.search(tracks, query=query, uris=uris) def begin(self): return self._tracks.itervalues() def add(self, track): self._tracks[track.uri] = track def remove(self, uri): self._tracks.pop(uri, None) def close(self): write_library(self._json_file, {'tracks': self._tracks.values()}) def clear(self): try: os.remove(self._json_file) return True except OSError: return False
Python
0
@@ -155,16 +155,28 @@ tempfile +%0Aimport time %0A%0Aimport @@ -2337,24 +2337,395 @@ .values()%0A%0A%0A +# TODO: make this available to other code?%0Aclass DebugTimer(object):%0A def __init__(self, msg):%0A self.msg = msg%0A self.start = None%0A%0A def __enter__(self):%0A self.start = time.time()%0A%0A def __exit__(self, exc_type, exc_value, traceback):%0A duration = (time.time() - self.start) * 1000%0A logger.debug('%25s: %25dms', self.msg, duration)%0A%0A%0A class JsonLi @@ -3036,20 +3036,19 @@ e(self, -path +uri ):%0A @@ -3140,20 +3140,19 @@ .lookup( -path +uri )%0A%0A d @@ -3200,25 +3200,16 @@ ing -json library - from +: %25s' @@ -3219,32 +3219,79 @@ elf._json_file)%0A + with DebugTimer('Loading tracks'):%0A library @@ -3322,32 +3322,36 @@ n_file)%0A + self._tracks = d @@ -3401,16 +3401,70 @@ ', %5B%5D))%0A + with DebugTimer('Building browse cache'):%0A @@ -3516,16 +3516,23 @@ ._tracks +.keys() ))%0A
8d935a2141b8f5c080d922189df7d79bb838b3a0
Use default router implementation
mopidy_lux/router.py
mopidy_lux/router.py
import os from tinydb import TinyDB from tinydb.storages import JSONStorage from tinydb.middlewares import CachingMiddleware import tornado.web class LuxRouter(object): def __init__(self, _config): self.config = _config self._db = TinyDB( self.config['lux']['db_file'], storage=CachingMiddleware(JSONStorage) ) def setup_routes(self): args = dict( config=self.config, db=self._db ) return [ (r"/lux/(.*)", tornado.web.StaticFileHandler, { 'path': os.path.join(os.path.dirname(__file__), 'static'), 'default_filename': 'index.html' }), (r"/lux/playlist", Playlists, args), (r"/lux/loved", Loved, args), (r"/lux/discover", EchoNestsDiscover, args), ] class Playlists(tornado.web.RequestHandler): """ Permanent storage for playlists """ pass class Loved(tornado.web.RequestHandler): """ Permanent storage for loved songs """ pass class EchoNestsDiscover(tornado.web.RequestHandler): """ Discover tracks based on mood or similarity """ pass
Python
0
@@ -143,96 +143,109 @@ eb%0A%0A -%0Aclass LuxRouter(object):%0A def __init__(self, _config):%0A self.config = _config +from mopidy import http%0A%0A%0Aclass LuxRouter(http.Router):%0A name = 'lux'%0A%0A def setup_routes(self): %0A @@ -249,22 +249,16 @@ -self._ db = Tin @@ -369,37 +369,8 @@ ) -%0A%0A def setup_routes(self): %0A @@ -438,14 +438,8 @@ db= -self._ db%0A @@ -484,30 +484,34 @@ (r%22/ -lux +%25s /(.*)%22 -, tornado.web + %25 self.name, http .Sta @@ -684,19 +684,18 @@ (r%22/ -lux +%25s /playlis @@ -696,16 +696,28 @@ laylist%22 + %25 self.name , Playli @@ -748,18 +748,29 @@ (r%22/ -lux +%25s /loved%22 + %25 self.name , Lo @@ -797,19 +797,18 @@ (r%22/ -lux +%25s /discove @@ -809,16 +809,28 @@ iscover%22 + %25 self.name , EchoNe
32481a906e00a1c5d301e6227ab43cf8feba31e0
fix double-import trap
json5/__init__.py
json5/__init__.py
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A pure Python implementation of the JSON5 configuration language.""" from . import tool from .lib import load, loads, dump, dumps from .version import VERSION __all__ = [ 'VERSION', 'dump', 'dumps', 'load', 'loads', 'tool', ]
Python
0.000212
@@ -666,27 +666,8 @@ %22%22%0A%0A -from . import tool%0A from @@ -816,18 +816,6 @@ s',%0A - 'tool',%0A %5D%0A
4c86f347347cd3b9289bfa2e26e75f429ba9cba5
Disable win_delay_load_hook to avoid issues with node10
binding.gyp
binding.gyp
{ "includes": ["common.gypi"], "variables": { "shared_gdal%": "false", "runtime_link%": "shared", "enable_logging%": "false" }, "targets": [ { "target_name": "gdal", "type": "loadable_module", "product_prefix": "", "product_extension": "node", "sources": [ "src/utils/fast_buffer.cpp", "src/utils/typed_array.cpp", "src/utils/string_list.cpp", "src/utils/number_list.cpp", "src/utils/warp_options.cpp", "src/utils/ptr_manager.cpp", "src/node_gdal.cpp", "src/gdal_common.cpp", "src/gdal_dataset.cpp", "src/gdal_driver.cpp", "src/gdal_rasterband.cpp", "src/gdal_majorobject.cpp", "src/gdal_feature.cpp", "src/gdal_feature_defn.cpp", "src/gdal_field_defn.cpp", "src/gdal_geometry.cpp", "src/gdal_point.cpp", "src/gdal_linestring.cpp", "src/gdal_linearring.cpp", "src/gdal_polygon.cpp", "src/gdal_geometrycollection.cpp", "src/gdal_multipoint.cpp", "src/gdal_multilinestring.cpp", "src/gdal_multipolygon.cpp", "src/gdal_layer.cpp", "src/gdal_coordinate_transformation.cpp", "src/gdal_spatial_reference.cpp", "src/gdal_warper.cpp", "src/gdal_algorithms.cpp", "src/collections/dataset_bands.cpp", "src/collections/dataset_layers.cpp", "src/collections/layer_features.cpp", "src/collections/layer_fields.cpp", "src/collections/feature_fields.cpp", "src/collections/feature_defn_fields.cpp", "src/collections/geometry_collection_children.cpp", "src/collections/polygon_rings.cpp", "src/collections/linestring_points.cpp", "src/collections/rasterband_overviews.cpp", "src/collections/rasterband_pixels.cpp", "src/collections/gdal_drivers.cpp" ], "include_dirs": [ "<!(node -e \"require('nan')\")" ], "defines": [ "PLATFORM='<(OS)'", "_LARGEFILE_SOURCE", "_FILE_OFFSET_BITS=64" ], "ldflags": [ "-Wl,-z,now" ], "xcode_settings": { "GCC_ENABLE_CPP_EXCEPTIONS": "YES", "OTHER_LDFLAGS":[ "-Wl,-bind_at_load" ] }, "conditions": [ ["enable_logging == 'true'", { "defines": [ "ENABLE_LOGGING=1" ] }], ["shared_gdal == 'false'", { "dependencies": [ "deps/libgdal/libgdal.gyp:libgdal" ] }, { "conditions": [ ['OS == "win"', { "libraries": [ "-lsecur32.lib", "-lws2_32.lib", "-lodbccp32.lib", "-lodbc32.lib" ], "include_dirs": [ "deps/libgdal/arch/win", "deps/libgdal/gdal", "deps/libgdal/gdal/alg", "deps/libgdal/gdal/gcore", "deps/libgdal/gdal/port", "deps/libgdal/gdal/bridge", "deps/libgdal/gdal/frmts", "deps/libgdal/gdal/frmts/zlib", "deps/libgdal/gdal/ogr", "deps/libgdal/gdal/ogr/ogrsf_frmts", "deps/libgdal/gdal/ogr/ogrsf_frmts/mem" ], }, { "conditions": [ ["runtime_link == 'static'", { "libraries": ["<!@(gdal-config --dep-libs)","<!@(gdal-config --libs)"] }, { "libraries": ["<!@(gdal-config --libs)"] }] ], "cflags_cc": ["<!@(gdal-config --cflags)"], "xcode_settings": { "OTHER_CPLUSPLUSFLAGS":[ "<!@(gdal-config --cflags)" ] } }] ] }] ] }, { "target_name": "action_after_build", "type": "none", "dependencies": [ "<(module_name)" ], "copies": [ { "files": [ "<(PRODUCT_DIR)/gdal.node" ], "destination": "<(module_path)" } ] } ] }
Python
0
@@ -200,24 +200,59 @@ le_module%22,%0A +%09%09%09%22win_delay_load_hook%22: %22false%22,%0A %09%09%09%22product_
b2e7eeafb263c12a333056e6a8239d2534833a22
Allow specifying --couchbase-root
binding.gyp
binding.gyp
{ 'targets': [{ 'target_name': 'couchbase_impl', 'defines': ['LCBUV_EMBEDDED_SOURCE'], 'conditions': [ [ 'OS=="win"', { 'variables': { 'couchbase_root%': 'C:/couchbase' }, 'include_dirs': [ '<(couchbase_root)/include/', ], 'link_settings': { 'libraries': [ '-l<(couchbase_root)/lib/libcouchbase.lib', ], }, 'copies': [{ 'files': [ '<(couchbase_root)/bin/libcouchbase.dll' ], 'destination': '<(module_root_dir)/build/Release/', },], 'configurations': { 'Release': { 'msvs_settings': { 'VCCLCompilerTool': { 'ExceptionHandling': '2', 'RuntimeLibrary': 0, }, }, }, }, }], ['OS=="mac"', { 'xcode_settings': { 'GCC_ENABLE_CPP_EXCEPTIONS': 'YES', }, }], ['OS!="win"', { 'link_settings': { 'libraries': [ '$(EXTRA_LDFLAGS)', '-lcouchbase', ], }, 'cflags': [ '-g', '-fPIC', '-Wall', '-Wextra', '-Wno-unused-variable', '-Wno-unused-function', '$(EXTRA_CFLAGS)', '$(EXTRA_CPPFLAGS)', '$(EXTRA_CXXFLAGS)', ], 'cflags_c':[ '-pedantic', '-std=gnu99', ], 'conditions': [ [ 'couchbase_root!=""', { 'include_dirs': [ '<(couchbase_root)/include' ], 'libraries+': [ '-L<(couchbase_root)/lib', '-Wl,-rpath=<(couchbase_root)/lib' ] }] ], }] ], 'sources': [ 'src/couchbase_impl.cc', 'src/control.cc', 'src/constants.cc', 'src/namemap.cc', 'src/cookie.cc', 'src/commandbase.cc', 'src/commands.cc', 'src/exception.cc', 'src/options.cc', 'src/cas.cc', 'src/uv-plugin-all.c' ], 'include_dirs': [ './', ], }] }
Python
0
@@ -975,32 +975,105 @@ %5B'OS!=%22win%22', %7B%0A + 'variables' : %7B%0A 'couchbase_root%25' : '%22%22'%0A %7D,%0A%0A 'link_se @@ -2189,24 +2189,41 @@ './',%0A + './src/io'%0A %5D,%0A %7D%5D%0A
c91147dee9d9910cfc8e6c2e078d388f19d6ab1e
fix build error in FreeBSD (#25)
binding.gyp
binding.gyp
{ "targets": [{ "target_name": "node_snap7", "include_dirs": [ "<!(node -e \"require('nan')\")", "./src" ], "sources": [ "./src/node_snap7.cpp", "./src/node_snap7_client.cpp", "./src/node_snap7_server.cpp", "./src/snap7.cpp" ], "conditions": [ ["OS=='win'", { "libraries": ["-lws2_32.lib", "-lwinmm.lib"], "defines": ["_WINSOCK_DEPRECATED_NO_WARNINGS"] }] ], "dependencies": [ "snap7" ] }, { "target_name": "snap7", "type": "static_library", "include_dirs": [ "./deps/snap7/src/sys", "./deps/snap7/src/core", "./deps/snap7/src/lib" ], "sources": [ "./deps/snap7/src/sys/snap_msgsock.cpp", "./deps/snap7/src/sys/snap_sysutils.cpp", "./deps/snap7/src/sys/snap_tcpsrvr.cpp", "./deps/snap7/src/sys/snap_threads.cpp", "./deps/snap7/src/core/s7_client.cpp", "./deps/snap7/src/core/s7_isotcp.cpp", "./deps/snap7/src/core/s7_partner.cpp", "./deps/snap7/src/core/s7_peer.cpp", "./deps/snap7/src/core/s7_server.cpp", "./deps/snap7/src/core/s7_text.cpp", "./deps/snap7/src/core/s7_micro_client.cpp", "./deps/snap7/src/lib/snap7_libmain.cpp" ], "conditions": [ ["OS=='linux'", { "cflags_cc": ["-fPIC", "-pedantic", "-fexceptions"], "cflags_cc!": ["-fno-exceptions"] }], ["OS=='win'", { "msvs_settings": { "VCCLCompilerTool": { "ExceptionHandling": 1, "AdditionalOptions": ["/EHsc"] # ExceptionHandling=1 is not enough for some versions } }, "defines!": ["_HAS_EXCEPTIONS=0"], "defines": ["_WINSOCK_DEPRECATED_NO_WARNINGS"] }], ["OS=='mac'", { "xcode_settings": { "GCC_ENABLE_CPP_EXCEPTIONS": "YES", "GCC_DYNAMIC_NO_PIC": "NO", "OTHER_CFLAGS": ["-pedantic"] } }] ] }] }
Python
0
@@ -1523,16 +1523,33 @@ ='linux' + or OS=='freebsd' %22, %7B%0A
95bde4f783a4d11627d8bc64e24b383e945bdf01
Revert local CDN location set by Jodok
src/web/tags.py
src/web/tags.py
# -*- coding: utf-8 -*- # vim: set fileencodings=utf-8 __docformat__ = "reStructuredText" import json import datetime from django.template.base import Library from django.utils.safestring import mark_safe register = Library() #CDN_URL = 'https://cdn.crate.io' CDN_URL = 'http://localhost:8001' def media(context, media_url): """ Get the path for a media file. """ if media_url.startswith('http://') or media_url.startswith('https://'): url = media_url elif media_url.startswith('/'): url = u'{0}{1}'.format(CDN_URL, media_url) else: url = u'{0}/media/{1}'.format(CDN_URL, media_url) return url register.simple_tag(takes_context=True)(media)
Python
0
@@ -224,17 +224,16 @@ rary()%0A%0A -# CDN_URL @@ -260,42 +260,8 @@ .io' -%0ACDN_URL = 'http://localhost:8001' %0A%0Ade
06f328b5843d83946b353697745ec82c7741ee3e
Allow colons in record label URLs (for timestamps such as '2013-02-13_08:42:00').
src/web/urls.py
src/web/urls.py
""" Define URL dispatching for the Sumatra web interface. """ from django.conf.urls.defaults import * from django.views.generic import list_detail from django.conf import settings from sumatra.web.views import Timeline P = { 'project': r'(?P<project>\w+[\w ]*)', 'label': r'(?P<label>\w+[\w|\-\.]*)', } urlpatterns = patterns('sumatra.web.views', (r'^$', 'list_projects'), (r'^%(project)s/$' % P, 'list_records'), (r'^%(project)s/about/$' % P, 'show_project'), (r'^%(project)s/delete/$' % P, 'delete_records'), (r'^%(project)s/tag/$' % P, 'list_tags'), (r'^%(project)s/%(label)s/$' % P, 'record_detail'), (r'^%(project)s/%(label)s/datafile$' % P, 'show_file'), (r'^%(project)s/%(label)s/download$' % P, 'download_file'), (r'^%(project)s/%(label)s/image$' % P, 'show_image'), (r'^%(project)s/%(label)s/diff/(?P<package>[\w_]+)*$' % P, 'show_diff'), (r'^%(project)s/simulation$' % P, 'run_sim'), (r'^%(project)s/settings$' % P, 'settings'), (r'^%(project)s/search$' % P, 'search'), (r'^%(project)s/settags$' % P, 'set_tags'), ) urlpatterns += patterns('', (r'^timeline/(?P<user>\w+[\w ]*)/', Timeline.as_view()), (r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}), )
Python
0.000007
@@ -298,16 +298,17 @@ %5B%5Cw%7C%5C-%5C. +: %5D*)',%0A%7D%0A @@ -1298,12 +1298,13 @@ ': True%7D),%0A) +%0A
07c4b2cc8ea3da02ae88c8ee5df8d211c775ad87
fix minor issues
juliet/builder.py
juliet/builder.py
#!/usr/bin/python3 import os, logging, shutil from distutils.dir_util import copy_tree from jinja2 import Template, FileSystemLoader from juliet import paths class Builder: def __init__(self, jinja_env, build_args, src, dest, noclean): """ Constructor for class Builder. Takes a jinja Environment and the build arguments dictionnary as argument. """ self.jinja_env = jinja_env self.build_args = build_args self.source = src self.destination = dest self.noclean = noclean def build(self): """ Build and install the website as described in the configuration. """ if(not self.noclean): logging.info("Cleaning build folder " + self.destination) shutil.rmtree(self.destination, ignore_errors=True) os.makedirs(self.destination, exist_ok=True) logging.info("Building static pages...") self._build_statics() logging.info("Building posts and pages...") self._build_posts() self._build_pages() logging.info("Installing assets...") self._install_data() def _write(self, path, string): """ Write passed string to passed path. """ if(not self._is_safe_path(path)): raise ValueError("Trying to build element to unsafe path (" + path + " outside of " + self.destination + ").") if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) # may raise OSError with open(path, 'w') as stream: stream.write(string) def _format_args_and_render(self, page, template): """ Render passed template as a page/post template and return it. """ rendering_args = self.build_args rendering_args["page"] = page rendering_args["content"] = page["body"] return template.render(rendering_args) def _install_data(self): """ Install data and assets. """ assets_path = os.path.join(self.source, paths.ASSETS_PATH) data_path = os.path.join(self.source, paths.THEMES_PATH, self.build_args["site"]["theme"], "data") copy_tree(data_path, os.path.join(self.destination, paths.DATA_BUILDDIR)) copy_tree(assets_path, os.path.join(self.destination, paths.ASSETS_BUILDDIR)) def _build_statics(self): """ Build static pages and install them. """ staticsdir = os.path.join(self.source, paths.THEMES_PATH, self.build_args["site"]["theme"], "statics") for element in os.listdir(staticsdir): html = self.jinja_env.get_template(os.path.join("statics", element)).render(self.build_args) self._write(os.path.join(self.destination, element), html) def _build_posts(self): """ Build posts and install them. """ builddir = os.path.join(self.destination, paths.POSTS_BUILDDIR) os.makedirs(builddir, exist_ok=True) template = self.jinja_env.get_template(os.path.join("templates", "posts.html")) for post in self.build_args["posts"]: html = self._format_args_and_render(post, template) if("permalink" in post.keys()): self._build_permalinked(post, html) else: self._write(os.path.join(builddir, post["slug"]), html) def _build_pages(self): """ Build pages and install them. """ template = self.jinja_env.get_template(os.path.join("templates", "pages.html")) for page in self.build_args["pages"]: html = self._format_args_and_render(page, template) if("permalink" in page.keys()): self._build_permalinked(page, html) else: self._write(os.path.join(self.destination, post["slug"]), html) def _build_permalinked(self, p, html): """ Build page/post to permalink. """ if(not "permalink" in p.keys()): raise ValueError("Called _build_permalinked with header that doesn't define permalink entry") self._write(os.path.join(self.destination, p["permalink"]), html) def _is_safe_path(self, path, follow_symlinks=False): """ Check directories before writing to avoid directory traversal. """ if follow_symlinks: return os.path.realpath(path).startswith(self.destination) return os.path.abspath(path).startswith(self.destination)
Python
0.000024
@@ -1983,22 +1983,20 @@ -assets +data _path = @@ -2031,19 +2031,61 @@ ths. -ASSET +THEME S_PATH +, self.build_args%5B%22site%22%5D%5B%22theme%22%5D, %22data%22 )%0A @@ -2082,36 +2082,38 @@ %22data%22)%0A -data +assets _path = os.path. @@ -2140,64 +2140,66 @@ ths. -THEME +ASSET S_PATH -, self.build_args%5B%22site%22%5D%5B%22theme%22%5D, %22data%22)%0A%0A +)%0A%0A if (os.path.exists(data_path)):%0A @@ -2276,16 +2276,62 @@ LDDIR))%0A + if (os.path.exists(assets_path)):%0A @@ -2508,19 +2508,21 @@ statics -dir +_path = os.pa @@ -2603,24 +2603,150 @@ %22statics%22)%0A%0A + # statics folder might very well not exist if theme doesn't define one%0A if (os.path.exists(statics_path)):%0A for @@ -2778,14 +2778,20 @@ tics -dir):%0A +_path):%0A @@ -2891,16 +2891,20 @@ d_args)%0A + @@ -4541,32 +4541,49 @@ ath).startswith( +os.path.realpath( self.destination @@ -4575,32 +4575,33 @@ elf.destination) +) %0A return @@ -4633,16 +4633,32 @@ rtswith( +os.path.abspath( self.des @@ -4650,25 +4650,26 @@ bspath(self.destination) +) %0A
10f787963b82422abd47b167b6b3ec9e40842e29
update check_online
fabfile/hadoop.py
fabfile/hadoop.py
#!/usr/bin/env python # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 import os import yaml from fabric.api import task, run, sudo, put, task, \ parallel, execute, env from cuisine import file_exists, file_write, file_append, \ text_strip_margin, mode_sudo, file_update, ssh_keygen, \ ssh_authorize, dir_ensure @task def status(): """ Check the status """ # Read cofiguration file to cfg cfg_dir = os.path.dirname(__file__).replace('fabfile','ymlfile') cfg_file = cfg_dir + '/hadoop.yml' f = open(cfg_file) cfg = yaml.safe_load(f) f.close() # Set ssh user and have ssh not check .ssh/known_hosts env.user = cfg['admin_user'] env.disable_known_hosts = True # Set hosts hosts = [] for host in cfg['hosts']: hosts.append(cfg['hosts'][host]['ipaddr']) # Execute check_status on the hosts. execute(check_status, hosts=hosts) def check_status(): sudo('jps', user='hdfs') @task def install(): """ Install Hadoop Cluster """ cfg_dir = os.path.dirname(__file__).replace('fabfile','ymlfile') cfg_file = cfg_dir + '/hadoop.yml' f = open(cfg_file) cfg = yaml.safe_load(f) f.close() env.user = cfg['admin_user'] env.disable_known_hosts = True hosts = [] for host in cfg['hosts']: hosts.append(cfg['hosts'][host]['ipaddr']) execute(pkg_install,hosts=hosts) execute(update_etc_hosts,cfg_hosts=cfg['hosts'],hosts=hosts) execute(update_roles,cfg_hosts=cfg['hosts'],hosts=hosts) sites = ['core-site', 'hdfs-site', 'mapred-site'] for site in sites: execute(update_config,cfg_name=site,cfg_list=cfg[site],hosts=hosts) execute(update_env_sh,hosts=hosts) admin_node = cfg['admin_node'] admin_node_ip = cfg['hosts'][admin_node]['ipaddr'] output = execute(create_hdfs_sshkey,hosts=[admin_node_ip]) key = output[admin_node_ip] execute(update_authorized_keys,key=key,hosts=hosts) execute(update_dir,cfg['update_dir_list'],hosts=hosts) @parallel def update_dir(update_dir_list): with mode_sudo(): for dir in update_dir_list: owner = update_dir_list[dir]['owner'] mode = update_dir_list[dir]['mode'] dir_ensure(dir, mode=mode, owner=owner) @parallel def update_authorized_keys(key): with mode_sudo(): ssh_authorize(user='hdfs',key=key) def create_hdfs_sshkey(): with mode_sudo(): ssh_keygen(user='hdfs',keytype='rsa') key = sudo('cat /usr/lib/hadoop/.ssh/id_rsa.pub') return key @parallel def update_env_sh(): """ Update /usr/lib/hadoop/conf/hadoop-env.sh """ file = '/usr/lib/hadoop/conf/hadoop-env.sh' with mode_sudo(): file_update(file, _update_env_sh_like_this) def _update_env_sh_like_this(text): res = [] for line in text.split('\n'): if line.strip().startswith("# export JAVA_HOME"): res.append("export JAVA_HOME=/usr/lib/jvm/java-7-oracle") else: res.append(line) return '\n'.join(res) + '\n' @parallel def update_config(cfg_name, cfg_list): """ Update xml files """ lines = [] header = text_strip_margin( """ |<?xml version="1.0"?> |<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> | |<!-- Put site-specific property overrides in this file. --> | |<configuration> |""") lines.append(header) for entry in cfg_list: property = text_strip_margin( """ | <property> | <name>{0}</name> | <value>{1}</value> | </property> |""".format(entry, cfg_list[entry])) lines.append(property) footer = '</configuration>\n' lines.append(footer) file = '/usr/lib/hadoop/conf/' + cfg_name + '.xml' text = '\n'.join(lines) + '\n' file_write(file, text, sudo=True) @parallel def update_etc_hosts(cfg_hosts): """Update /etc/hosts """ file = '/etc/hosts' lines = [] lines.append("127.0.0.1 localhost") for host in cfg_hosts: lines.append("{0} {1}".format(cfg_hosts[host]['ipaddr'], host)) text = '\n'.join(lines) + '\n' file_write(file, text, sudo=True) @parallel def update_roles(cfg_hosts): """ Update /usr/lib/hadoop/conf/[masters/slaves] """ dir = '/usr/lib/hadoop/conf/' masters = [] slaves = [] for host in cfg_hosts: if cfg_hosts[host]['group'] == 'masters': masters.append(host) elif cfg_hosts[host]['group'] == 'slaves': slaves.append(host) # Update masters file = dir + 'masters' text = '\n'.join(masters) + '\n' file_write(file, text, sudo=True) # Update slaves file = dir + 'slaves' text = '\n'.join(slaves) + '\n' file_write(file, text, sudo=True) @parallel def pkg_install(): ''':hostname - Install Hadoop Master''' file_name = '/usr/lib/jvm/java-7-oracle' if not file_exists(file_name): sudo('add-apt-repository -y ppa:webupd8team/java') sudo('add-apt-repository -y ppa:hadoop-ubuntu/stable') sudo('apt-get update && sudo apt-get -y upgrade') sudo('echo debconf shared/accepted-oracle-license-v1-1 select true | sudo debconf-set-selections') sudo('echo debconf shared/accepted-oracle-license-v1-1 seen true | sudo debconf-set-selections') sudo('apt-get -y install oracle-java7-installer') sudo('apt-get -y install hadoop') else: print '{0} exists. Oracle Java is already installed.'.format(file_name) @task @parallel def check_online(): sudo('cat .ssh/authorized_keys > /root/.ssh/authorized_keys') yml_path = __file__.replace('fabfile','ymlfile').rstrip(r'\py|\pyc') + 'yml' f = open(yml_path) cfg = yaml.safe_load(f) f.close() env.user = cfg['admin_user'] env.disable_known_hosts = True hosts = [] for host in cfg['hosts']: hosts.append(cfg['hosts'][host]['ipaddr']) execute(hello,hosts=hosts) @parallel def hello(): run('hostname && id && echo hello')
Python
0.000002
@@ -5697,75 +5697,8 @@ ):%0A%0A - sudo('cat .ssh/authorized_keys %3E /root/.ssh/authorized_keys')%0A%0A
977481ed9fab0eb703fe721b25702bba9c53223b
test update sites
fabfile/hadoop.py
fabfile/hadoop.py
#!/usr/bin/env python # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 import yaml from fabric.api import task, run, sudo, put, task, \ parallel, execute, env from cuisine import file_exists, file_write, file_append, \ text_strip_margin, mode_sudo, file_update @task def install(): yml_path = __file__.replace('fabfile','ymlfile').rstrip(r'\py|\pyc') + 'yml' f = open(yml_path) cfg = yaml.safe_load(f) f.close() env.user = cfg['remote_user'] env.disable_known_hosts = True hosts = [] for host in cfg['hosts']: hosts.append(cfg['hosts'][host]['ipaddr']) #execute(pkg_install,hosts=hosts) #execute(update_etc_hosts,cfg_hosts=cfg['hosts'],hosts=hosts) #execute(update_roles,cfg_hosts=cfg['hosts'],hosts=hosts) #sites = ['core-site', # 'hdfs-site', # 'mapred-site'] #for site in sites: # execute(update_config,cfg_name=site,cfg_list=cfg[site],hosts=hosts) @parallel execute(file_update,'/usr/lib/hadoop/conf/hadoop-env.sh',update_env_sh,hosts=hosts) def update_env_sh(text): """ Update /usr/lib/hadoop/conf/hadoop-env.sh""" res = [] for line in text.split('\n'): if line.strip().startswith("# export JAVA_HOME"): res.append("export JAVA_HOME=/usr/lib/jvm/java-7-oracle") else: res.append(line) return '\n'.join(res) + '\n' @task @parallel def update_config(cfg_name, cfg_list): """ Update xml files """ lines = [] header = text_strip_margin( """ |<?xml version="1.0"?> |<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> | |<!-- Put site-specific property overrides in this file. --> | |<configuration> |""") lines.append(header) for entry in cfg_list: property = text_strip_margin( """ | <property> | <name>{0}</name> | <value>{1}</value> | </property> |""".format(entry, cfg_list[entry])) lines.append(property) footer = '</configuration>\n' lines.append(footer) file = '/usr/lib/hadoop/conf/' + cfg_name + '.xml' text = '\n'.join(lines) + '\n' file_write(file, text, sudo=True) @task @parallel def update_etc_hosts(cfg_hosts): """Update /etc/hosts """ file = '/etc/hosts' lines = [] lines.append("127.0.0.1 localhost") for host in cfg_hosts: lines.append("{0} {1}".format(cfg_hosts[host]['ipaddr'], host)) text = '\n'.join(lines) + '\n' file_write(file, text, sudo=True) @task @parallel def update_roles(cfg_hosts): """ Update /usr/lib/hadoop/conf/[masters/slaves] """ dir = '/usr/lib/hadoop/conf/' masters = [] slaves = [] for host in cfg_hosts: if cfg_hosts[host]['group'] == 'masters': masters.append(host) elif cfg_hosts[host]['group'] == 'slaves': slaves.append(host) # Update masters file = dir + 'masters' text = '\n'.join(masters) + '\n' file_write(file, text, sudo=True) # Update slaves file = dir + 'slaves' text = '\n'.join(slaves) + '\n' file_write(file, text, sudo=True) @task @parallel def pkg_install(): ''':hostname - Install Hadoop Master''' file_name = '/usr/lib/jvm/java-7-oracle' if not file_exists(file_name): sudo('add-apt-repository -y ppa:webupd8team/java') sudo('add-apt-repository -y ppa:hadoop-ubuntu/stable') sudo('apt-get update && sudo apt-get -y upgrade') sudo('echo debconf shared/accepted-oracle-license-v1-1 select true | sudo debconf-set-selections') sudo('echo debconf shared/accepted-oracle-license-v1-1 seen true | sudo debconf-set-selections') sudo('apt-get -y install oracle-java7-installer') sudo('apt-get -y install hadoop') else: print '{0} exists. Oracle Java is already installed.'.format(file_name) #file_names = [ # '/etc/hosts', # '/usr/lib/hadoop/conf/core-site.xml', # '/usr/lib/hadoop/conf/hdfs-site.xml', # '/usr/lib/hadoop/conf/mapred-site.xml', # '/usr/lib/hadoop/conf/hadoop-env.sh', # '/usr/lib/hadoop/conf/slaves' # ] #for file_name in file_names: # put('templates'+ file_name, file_name, use_sudo=True) @task @parallel def enable_root_login(): sudo('cat .ssh/authorized_keys > /root/.ssh/authorized_keys') @parallel def hello(): run('hostname && id && echo hello')
Python
0
@@ -993,42 +993,93 @@ -@parallel%0A execute(file_update, +execute(update_env_sh,hosts=hosts)%0A%0A@task%0A@parallel%0Adef update_env_sh():%0A%0A file = '/us @@ -1110,17 +1110,44 @@ -env.sh' -, +%0A file_update(file, _sub_ update_e @@ -1155,27 +1155,20 @@ v_sh -,hosts=hosts )%0A%0Adef +_sub_ upda
5baa2075d7f12d5123542dca31f430e7e112631b
test update sites
fabfile/hadoop.py
fabfile/hadoop.py
#!/usr/bin/env python # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 import yaml from fabric.api import task, run, sudo, put, task, \ parallel, execute, env from cuisine import file_exists, file_write, file_append, \ text_strip_margin, mode_sudo @task def install(): yml_path = __file__.replace('fabfile','ymlfile').rstrip(r'\py|\pyc') + 'yml' f = open(yml_path) cfg = yaml.safe_load(f) f.close() env.user = cfg['remote_user'] env.disable_known_hosts = True hosts = [] for host in cfg['hosts']: hosts.append(cfg['hosts'][host]['ipaddr']) #execute(pkg_install,hosts=hosts) #execute(update_etc_hosts,cfg_hosts=cfg['hosts'],hosts=hosts) #execute(update_roles,cfg_hosts=cfg['hosts'],hosts=hosts) execute(update_config,cfg_name='core-site',cfg_list=cfg['core-site'],hosts=hosts) @task @parallel def update_config(cfg_name, cfg_list): """ Update xml files """ lines = [] header = text_strip_margin( """ |<?xml version="1.0"?> |<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> | |<!-- Put site-specific property overrides in this file. --> | |<configuration> |""") lines.append(header) for entry in cfg_list: property = text_strip_margin( """ | <property> | <name>{0}</name> | <value>{1}</value> | </property> |""".format(entry, cfg_list[entry])) lines.append(property) footer = '</configuration>\n' lines.append(footer) file = '/usr/lib/hadoop/conf/' + cfg_name + '.xml' text = '\n'.join(lines) + '\n' file_write(file, text, sudo=True) @task @parallel def update_etc_hosts(cfg_hosts): """Update /etc/hosts """ file = '/etc/hosts' lines = [] lines.append("127.0.0.1 localhost") for host in cfg_hosts: lines.append("{0} {1}".format(cfg_hosts[host]['ipaddr'], host)) text = '\n'.join(lines) + '\n' file_write(file, text, sudo=True) @task @parallel def update_roles(cfg_hosts): """ Update /usr/lib/hadoop/conf/[masters/slaves] """ dir = '/usr/lib/hadoop/conf/' masters = [] slaves = [] for host in cfg_hosts: if cfg_hosts[host]['group'] == 'masters': masters.append(host) elif cfg_hosts[host]['group'] == 'slaves': slaves.append(host) # Update masters file = dir + 'masters' text = '\n'.join(masters) + '\n' file_write(file, text, sudo=True) # Update slaves file = dir + 'slaves' text = '\n'.join(slaves) + '\n' file_write(file, text, sudo=True) @task @parallel def pkg_install(): ''':hostname - Install Hadoop Master''' file_name = '/usr/lib/jvm/java-7-oracle' if not file_exists(file_name): sudo('add-apt-repository -y ppa:webupd8team/java') sudo('add-apt-repository -y ppa:hadoop-ubuntu/stable') sudo('apt-get update && sudo apt-get -y upgrade') sudo('echo debconf shared/accepted-oracle-license-v1-1 select true | sudo debconf-set-selections') sudo('echo debconf shared/accepted-oracle-license-v1-1 seen true | sudo debconf-set-selections') sudo('apt-get -y install oracle-java7-installer') sudo('apt-get -y install hadoop') else: print '{0} exists. Oracle Java is already installed.'.format(file_name) #file_names = [ # '/etc/hosts', # '/usr/lib/hadoop/conf/core-site.xml', # '/usr/lib/hadoop/conf/hdfs-site.xml', # '/usr/lib/hadoop/conf/mapred-site.xml', # '/usr/lib/hadoop/conf/hadoop-env.sh', # '/usr/lib/hadoop/conf/slaves' # ] #for file_name in file_names: # put('templates'+ file_name, file_name, use_sudo=True) @task @parallel def enable_root_login(): sudo('cat .ssh/authorized_keys > /root/.ssh/authorized_keys') @parallel def hello(): run('hostname && id && echo hello')
Python
0
@@ -763,24 +763,131 @@ osts=hosts)%0A + sites = %5B'core-site',%0A 'hdfs-site',%0A 'mapred-site'%5D%0A for site in sites:%0A execute( @@ -909,27 +909,20 @@ fg_name= -'core- site -' ,cfg_lis @@ -927,27 +927,20 @@ ist=cfg%5B -'core- site -' %5D,hosts=
485a363cf0ff10b0c4a3b4836e629474791f5374
remove intermediate assignments to `text_width`
ftfy/formatting.py
ftfy/formatting.py
from unicodedata import east_asian_width, combining, category, normalize def character_width(char): """ Determine the width that a character is likely to be displayed as in a monospaced terminal. The width will always be 0, 1, or 2. We are assuming that the character will appear in a modern, Unicode-aware terminal emulator, where CJK characters -- plus characters designated "fullwidth" by CJK encodings -- will span two character cells. (Maybe it's an over-simplification to call these displays "monospaced".) When a character width is "Ambiguous", we assume it to span one character cell, because we assume that the monospaced display you're using is designed mostly to display Western characters with CJK characters as the exception. This assumption will go wrong, for example, if your display is actually on a Japanese flip-phone... but let's assume it isn't. Combining marks and formatting codepoints do not advance the cursor, so their width is 0. If the character is a particular kind of control code -- the kind represented by the lowest bytes of ASCII, with Unicode category Cc -- the idea of it having a "width" probably doesn't even apply, because it will probably have some other effect on your terminal. For example, there's no sensible width for a line break. We return the default of 1 in the absence of anything sensible to do there. >>> character_width('車') 2 >>> character_width('A') 1 >>> character_width('\N{SOFT HYPHEN}') 0 """ if combining(char) != 0: # Combining characters don't advance the cursor; they modify the # previous character instead. return 0 elif east_asian_width(char) in 'FW': # Characters designated Wide (W) or Fullwidth (F) will take up two # columns in many Unicode-aware terminal emulators. return 2 elif category(char) == 'Cf': # Characters in category Cf are un-printable formatting characters # that do not advance the cursor, such as zero-width spaces or # right-to-left marks. return 0 else: return 1 def monospaced_width(text): """ Return the number of character cells that this string is likely to occupy when displayed in a monospaced, modern, Unicode-aware terminal emulator. We refer to this as the "display width" of the string. This can be useful for formatting text that may contain non-spacing characters, or CJK characters that take up two character cells. >>> monospaced_width('ちゃぶ台返し') 12 >>> len('ちゃぶ台返し') 6 """ # NFC-normalize the text first, so that we don't need special cases for # Hangul jamo. text = normalize('NFC', text) return sum([character_width(char) for char in text]) def display_ljust(text, width, fillchar=' '): """ Return `text` left-justified in a Unicode string whose display width, in a monospaced terminal, should be at least `width` character cells. The rest of the string will be padded with `fillchar`, which must be a width-1 character. "Left" here means toward the beginning of the string, which may actually appear on the right in an RTL context. This is similar to the use of the word "left" in "left parenthesis". >>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し'] >>> for line in lines: ... print(display_ljust(line, 20, '▒')) Table flip▒▒▒▒▒▒▒▒▒▒ (╯°□°)╯︵ ┻━┻▒▒▒▒▒▒▒ ちゃぶ台返し▒▒▒▒▒▒▒▒ """ if character_width(fillchar) != 1: raise ValueError("The padding character must have display width 1") text_width = monospaced_width(text) padding = max(0, width - text_width) return text + fillchar * padding def display_rjust(text, width, fillchar=' '): """ Return `text` right-justified in a Unicode string whose display width, in a monospaced terminal, should be at least `width` character cells. The rest of the string will be padded with `fillchar`, which must be a width-1 character. "Right" here means toward the end of the string, which may actually be on the left in an RTL context. This is similar to the use of the word "right" in "right parenthesis". >>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し'] >>> for line in lines: ... print(display_rjust(line, 20, '▒')) ▒▒▒▒▒▒▒▒▒▒Table flip ▒▒▒▒▒▒▒(╯°□°)╯︵ ┻━┻ ▒▒▒▒▒▒▒▒ちゃぶ台返し """ if character_width(fillchar) != 1: raise ValueError("The padding character must have display width 1") text_width = monospaced_width(text) padding = max(0, width - text_width) return fillchar * padding + text def display_center(text, width, fillchar=' '): """ Return `text` centered in a Unicode string whose display width, in a monospaced terminal, should be at least `width` character cells. The rest of the string will be padded with `fillchar`, which must be a width-1 character. >>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し'] >>> for line in lines: ... print(display_center(line, 20, '▒')) ▒▒▒▒▒Table flip▒▒▒▒▒ ▒▒▒(╯°□°)╯︵ ┻━┻▒▒▒▒ ▒▒▒▒ちゃぶ台返し▒▒▒▒ """ if character_width(fillchar) != 1: raise ValueError("The padding character must have display width 1") text_width = monospaced_width(text) padding = max(0, width - text_width) left_padding = padding // 2 right_padding = padding - left_padding return fillchar * left_padding + text + fillchar * right_padding
Python
0.000316
@@ -3645,48 +3645,8 @@ 1%22)%0A - text_width = monospaced_width(text)%0A @@ -3666,34 +3666,46 @@ (0, width - -text_width +monospaced_width(text) )%0A return @@ -4548,48 +4548,8 @@ 1%22)%0A - text_width = monospaced_width(text)%0A @@ -4569,34 +4569,46 @@ (0, width - -text_width +monospaced_width(text) )%0A return @@ -5260,48 +5260,8 @@ 1%22)%0A - text_width = monospaced_width(text)%0A @@ -5289,18 +5289,30 @@ h - -text_width +monospaced_width(text) )%0A
a1cbeb7f7a03d0618ec9f60f65308168e521af18
Add encodings for imul instructions to RISC-V.
meta/isa/riscv/encodings.py
meta/isa/riscv/encodings.py
""" RISC-V Encodings. """ from __future__ import absolute_import from cretonne import base from .defs import RV32, RV64 from .recipes import OPIMM, OPIMM32, OP, OP32, R, Rshamt, I # Basic arithmetic binary instructions are encoded in an R-type instruction. for inst, inst_imm, f3, f7 in [ (base.iadd, base.iadd_imm, 0b000, 0b0000000), (base.isub, None, 0b000, 0b0100000), (base.bxor, base.bxor_imm, 0b100, 0b0000000), (base.bor, base.bor_imm, 0b110, 0b0000000), (base.band, base.band_imm, 0b111, 0b0000000) ]: RV32.enc(inst.i32, R, OP(f3, f7)) RV64.enc(inst.i64, R, OP(f3, f7)) # Immediate versions for add/xor/or/and. if inst_imm: RV32.enc(inst_imm.i32, I, OPIMM(f3)) RV64.enc(inst_imm.i64, I, OPIMM(f3)) # 32-bit ops in RV64. RV64.enc(base.iadd.i32, R, OP32(0b000, 0b0000000)) RV64.enc(base.isub.i32, R, OP32(0b000, 0b0100000)) # There are no andiw/oriw/xoriw variations. RV64.enc(base.iadd_imm.i32, I, OPIMM32(0b000)) # Dynamic shifts have the same masking semantics as the cton base instructions. for inst, inst_imm, f3, f7 in [ (base.ishl, base.ishl_imm, 0b001, 0b0000000), (base.ushr, base.ushr_imm, 0b101, 0b0000000), (base.sshr, base.sshr_imm, 0b101, 0b0100000), ]: RV32.enc(inst.i32.i32, R, OP(f3, f7)) RV64.enc(inst.i64.i64, R, OP(f3, f7)) RV64.enc(inst.i32.i32, R, OP32(f3, f7)) # Allow i32 shift amounts in 64-bit shifts. RV64.enc(inst.i64.i32, R, OP(f3, f7)) RV64.enc(inst.i32.i64, R, OP32(f3, f7)) # Immediate shifts. RV32.enc(inst_imm.i32, Rshamt, OPIMM(f3, f7)) RV64.enc(inst_imm.i64, Rshamt, OPIMM(f3, f7)) RV64.enc(inst_imm.i32, Rshamt, OPIMM32(f3, f7))
Python
0.000003
@@ -172,16 +172,44 @@ shamt, I +%0Afrom .settings import use_m %0A%0A# Basi @@ -1785,20 +1785,301 @@ t, OPIMM32(f3, f7))%0A +%0A# %22M%22 Standard Extension for Integer Multiplication and Division.%0A# Gated by the %60use_m%60 flag.%0ARV32.enc(base.imul.i32, R, OP(0b000, 0b0000001), isap=use_m)%0ARV64.enc(base.imul.i64, R, OP(0b000, 0b0000001), isap=use_m)%0ARV64.enc(base.imul.i32, R, OP32(0b000, 0b0000001), isap=use_m)%0A
baea8c22208ea26c7ed59d82caed3fa9def4e699
Version 1.1.0
fedora/release.py
fedora/release.py
''' Information about this python-fedora release ''' NAME = 'python-fedora' VERSION = '1.0.0' DESCRIPTION = 'Python modules for interacting with Fedora Services' LONG_DESCRIPTION = ''' The Fedora Project runs many different services. These services help us to package software, develop new programs, and generally put together the distro. This package contains software that helps us do that. ''' AUTHOR = 'Toshio Kuratomi, Luke Macken, Ricky Elrod, Ralph Bean, Patrick Uiterwijk' EMAIL = 'admin@fedoraproject.org' COPYRIGHT = '2007-2020 Red Hat, Inc.' URL = 'https://github.com/fedora-infra/python-fedora' DOWNLOAD_URL = 'https://pypi.python.org/pypi/python-fedora' LICENSE = 'LGPLv2+'
Python
0
@@ -84,17 +84,17 @@ ON = '1. -0 +1 .0'%0ADESC
a639e3cd15d52947bba44c0efa0f96a4247d961a
Change Lines import to lace
blmath/geometry/primitives/polyline.py
blmath/geometry/primitives/polyline.py
import numpy as np from blmath.util.decorators import setter_property class Polyline(object): ''' Represent the geometry of a polygonal chain in 3-space. The chain may be open or closed, and there are no constraints on the geometry. For example, the chain may be simple or self-intersecting, and the points need not be unique. Mutable by setting polyline.v or polyline.closed or calling a method like polyline.partition_by_length(). This replaces the functions in blmath.geometry.segment. Note this class is distinct from lace.lines.Lines, which allows arbitrary edges and enables visualization. To convert to a Lines object, use the as_lines() method. ''' def __init__(self, v, closed=False): ''' v: An array-like thing containing points in 3-space. closed: True indicates a closed chain, which has an extra segment connecting the last point back to the first point. ''' # Avoid invoking _update_edges before setting closed and v. self.__dict__['closed'] = closed self.v = v def copy(self): ''' Return a copy of this polyline. ''' v = None if self.v is None else np.copy(self.v) return self.__class__(v, closed=self.closed) def as_lines(self): ''' Return a Lines instance with our vertices and edges. ''' from bodylabs.mesh.lines import Lines return Lines(v=self.v, e=self.e) def to_dict(self, decimals=3): return { 'vertices': [np.around(v, decimals=decimals).tolist() for v in self.v], 'edges': self.e, } def _update_edges(self): if self.v is None: self.__dict__['e'] = None return num_vertices = self.v.shape[0] num_edges = num_vertices if self.closed else num_vertices - 1 edges = np.vstack([np.arange(num_edges), np.arange(num_edges) + 1]).T if self.closed: edges[-1][1] = 0 edges.flags.writeable = False self.__dict__['e'] = edges @setter_property def v(self, val): # setter_property incorrectly triggers method-hidden. pylint: disable=method-hidden ''' Update the vertices to a new array-like thing containing points in 3D space. Set to None for an empty polyline. ''' from blmath.numerics import as_numeric_array self.__dict__['v'] = as_numeric_array(val, dtype=np.float64, shape=(-1, 3), allow_none=True) self._update_edges() @setter_property def closed(self, val): ''' Update whether the polyline is closed or open. ''' self.__dict__['closed'] = val self._update_edges() @property def e(self): ''' Return a np.array of edges. Derived automatically from self.v and self.closed whenever those values are set. ''' return self.__dict__['e'] @property def segment_lengths(self): ''' The length of each of the segments. ''' if self.e is None: return np.empty((0,)) return ((self.v[self.e[:, 1]] - self.v[self.e[:, 0]]) ** 2.0).sum(axis=1) ** 0.5 @property def total_length(self): ''' The total length of all the segments. ''' return np.sum(self.segment_lengths) def partition_by_length(self, max_length, ret_indices=False): ''' Subdivide each line segment longer than max_length with equal-length segments, such that none of the new segments are longer than max_length. ret_indices: If True, return the indices of the original vertices. Otherwise return self for chaining. ''' from blmath.geometry.segment import partition_segment lengths = self.segment_lengths num_segments_needed = np.ceil(lengths / max_length) indices_of_orig_vertices = [] new_v = np.empty((0, 3)) for i, num_segments in enumerate(num_segments_needed): start_point, end_point = self.v[self.e[i]] indices_of_orig_vertices.append(len(new_v)) # In the simple case, one segment, or degenerate case, with # a repeated vertex, we do not need to subdivide. if num_segments <= 1: new_v = np.vstack((new_v, start_point.reshape(-1, 3))) else: new_v = np.vstack((new_v, partition_segment(start_point, end_point, np.int(num_segments), endpoint=False))) if not self.closed: indices_of_orig_vertices.append(len(new_v)) new_v = np.vstack((new_v, self.v[-1].reshape(-1, 3))) self.v = new_v return np.array(indices_of_orig_vertices) if ret_indices else self def apex(self, axis): ''' Find the most extreme point in the direction of the axis provided. axis: A vector, which is an 3x1 np.array. ''' from blmath.geometry.apex import apex return apex(self.v, axis)
Python
0
@@ -1426,21 +1426,12 @@ rom -bodylabs.mesh +lace .lin
8000607b4e0b4a65f020f233ed112f0554cc5d42
remove fxn untested
database.py
database.py
from pymongo import MongoClient from bson.objectid import ObjectId from datetime import datetime connection = MongoClient() db = connection['database'] def create_student(student_name, student_email): students = db['students'] if students.find_one({'student_email': student_email}) == None: new_student = {'student_name': student_name, 'student_email': student_email, 'preferred_name': '', 'student_phone': '', 'address': '', 'parent_name': '', 'parent_phone': '', 'parent_email': '', 'counselor_name': '', 'counselor_phone': '', 'counselor_email': ''} students.insert_one(new_student) def check_contact_info(student_email): students = db['students'] student = students.find_one({'student_email': student_email}) return student def add_contact_info(student_email, preferred_name, student_phone, address, parent_name, parent_phone, parent_email, counselor_name, counselor_phone, counselor_email): students = db['students'] students.find_one_and_update({'student_email': student_email}, {'$set':{'preferred_name': preferred_name, 'student_phone': student_phone, 'address': address, 'parent_name': parent_name, 'parent_phone': parent_phone, 'parent_email': parent_email, 'counselor_name': counselor_name, 'counselor_phone': counselor_phone, 'counselor_email': counselor_email}}) def create_teacher(teacher_name, teacher_email): teachers = db['teachers'] if teachers.find_one({'teacher_email': teacher_email}) == None: new_teacher = {'teacher_name': teacher_name, 'teacher_email': teacher_email} teachers.insert_one(new_teacher) def create_class(teacher_name, teacher_email, course_code, class_name, class_period): classes = db['classes'] new_class = {'teacher_name': teacher_name, 'teacher_email': teacher_email, 'course_code': course_code, 'class_name': class_name, 'class_period': class_period, '_id': ObjectId()} classes.insert_one(new_class) db['teachers'].find_one_and_update({'teacher_email': teacher_email}, {'$push':{'classes': new_class.get('_id')}}) def find_classes(teacher_email): classes = db['classes'] return classes.find({'teacher_email': teacher_email}) def find_class(class_id): classes = db['classes'] #return classes.find_one({'_id': class_id}) ret_class = classes.find_one({'_id': ObjectId(class_id)}) return ret_class def all_classes_in_period(class_periods): #class_period in string form (array to allow multiple checkboxes) classes = db['classes'] class_by_period = [] for x in xrange(len(class_periods)): class_by_period.append(class_periods[x][1:]) return classes.find({'class_period': {"$in": class_by_period}}) #print all_classes_in_period(['p1', 'p6']) def add_to_class(student_email, class_id): classes = db['classes'] classes.find_one_and_update({'_id' : ObjectId(class_id)}, {'$addToSet': {'students': student_email}}) def remove_from_class(student_email, class_id): classes = db['classes'] classes.find_one_and_update({'_id' : ObjectId(class_id)}, {'$pull': {'students': student_email}}) def all_students_in_class(class_id): students = [] emails = db['classes'].find_one({'_id': ObjectId(class_id)}).get('students') if emails == None: return {} for email in emails: students.append(db['students'].find_one({'student_email': email})) return students
Python
0.000339
@@ -2711,24 +2711,129 @@ t('_id')%7D%7D)%0A +def delete_class(class_id):%0A classes = db%5B'classes'%5D%0A return classes.remove(%7B'_id': class_id%7D)%0A %0Adef find_cl