commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
e7c3c5f91be44d10502e5f6260d40ce89391cc14
write only to dict if splitting possible and split on ': ' instead of just :
sauron/metrics/HttpdServerStatus.py
sauron/metrics/HttpdServerStatus.py
#! /usr/bin/env python # # Copyright (c) 2014 johnny-die-tulpe # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import re import os import httplib2 from sauron import logger from sauron.metrics import Metric, MetricException class HttpdServerStatus(Metric): AVAILABLE_METRICS_DATA = { 'CPULoad' : 'Count', 'AvgReqPerSec' : 'Count/Second', 'CurrentReqPerSec' : 'Count/Second', 'BytesPerSec' : 'Bytes/Second', 'BusyWorkers' : 'Count', 'IdleWorkers' : 'Count', 'FreeClients' : 'Count' } def __init__(self, name, url, **kwargs): Metric.__init__(self, name, **kwargs) self.reconfig(name, url, **kwargs) def reconfig(self, name, url, metrics, interval='60', **kwargs): Metric.reconfig(self, name, **kwargs) self.name = name self.url = url self.interval = interval if not isinstance(metrics, list): raise MetricException('metrics should be a list') self.serverstatus_metrics = metrics for metric in self.serverstatus_metrics: try: assert HttpdServerStatus.AVAILABLE_METRICS_DATA.has_key(metric) except AssertionError: raise MetricException('Metric is not available, choose out of %s' % (", ".join(HttpdServerStatus.AVAILABLE_METRICS_DATA.keys()))) try: server_status = httplib2.Http() except Exception as e: raise MetricException(e) def count_freeclients(self, value): return str(value.count('.')) def get_values_of_serverstatus(self, serverstatus_key, value): value = value.strip() serverstatus_key = serverstatus_key.strip() metricmap = {'Scoreboard' : 'FreeClients', 'ReqPerSec' : 'AvgReqPerSec', 'Total Accesses' : 'CurrentReqPerSec'} valuemap = {'Scoreboard' : self.count_freeclients, 'Total Accesses' : self.calculate_req_per_second} metric_mapper = lambda x: metricmap[x] if metricmap.has_key(x) else x metricname = metric_mapper(serverstatus_key) if not metricname in self.serverstatus_metrics: return None, None value_mapper = lambda x,y: valuemap[x](y) if valuemap.has_key(x) else y value = value_mapper(serverstatus_key, value) if value: if str(value).startswith('.'): value = '0' + value value = "%.3f" % (float(value)) return metricname, value def calculate_req_per_second(self, total_httpd_access): current_access = float(total_httpd_access) # only send results if uptime greater than 70 seconds if int(self.serverstatus_result['Uptime']) > 70: if self.tempdict.has_key('last_httpd_total_access') and current_access > self.tempdict['last_httpd_total_access']: result = abs(current_access - self.tempdict['last_httpd_total_access']) / self.interval else: # fallback to aggregated req per sec if no last_httpd_total_access value is available logger.info('no last state of total accesses or it\'s greater than current, falling back to apaches requests per seconds') result = self.serverstatus_result['ReqPerSec'] else: logger.info('uptime from webserver not enough (>70 seconds), still in warump phase, we dont send any data!') result = None self.tempdict['last_httpd_total_access'] = current_access return result def values(self): try: server_status = httplib2.Http() response, content = server_status.request(self.url, 'GET') result = {} self.serverstatus_result = dict([line.split(':') for line in content.splitlines()]) for k,v in self.serverstatus_result.iteritems(): metricname, value = self.get_values_of_serverstatus(k,v) if value: result[metricname] = (value, HttpdServerStatus.AVAILABLE_METRICS_DATA[metricname]) return {'results' : result } except Exception as e: raise MetricException(e)
Python
0.000006
@@ -4519,16 +4519,17 @@ split(': + ') for l @@ -4555,16 +4555,32 @@ tlines() + if ': ' in line %5D)%0A
56151ad549e82206c3894e8c945c00d4bac24ab5
Complete error message.
source/memex/rest.py
source/memex/rest.py
from rest_framework import routers, serializers, viewsets, parsers, filters from django.core.exceptions import ValidationError from django.core.files.uploadedfile import SimpleUploadedFile, InMemoryUploadedFile from base.models import Project from apps.crawl_space.models import Crawl, CrawlModel class SlugModelSerializer(serializers.ModelSerializer): slug = serializers.SlugField(required=False, read_only=True) class ProjectSerializer(SlugModelSerializer): url = serializers.CharField(read_only=True) class Meta: model = Project class CrawlSerializer(SlugModelSerializer): # Expose these fields, but only as read only. id = serializers.ReadOnlyField() seeds_list = serializers.FileField(use_url=False) status = serializers.CharField(read_only=True) config = serializers.CharField(read_only=True) index_name = serializers.CharField(read_only=True) url = serializers.CharField(read_only=True) pages_crawled = serializers.IntegerField(read_only=True) harvest_rate = serializers.FloatField(read_only=True) location = serializers.CharField(read_only=True) def validate_crawler(self, value): if value == "ache" and not self.initial_data.get("crawl_model"): raise serializers.ValidationError("Ache crawls require a Crawl Model.") return value class Meta: model = Crawl class CrawlModelSerializer(SlugModelSerializer): model = serializers.FileField(use_url=False) features = serializers.FileField(use_url=False) url = serializers.CharField(read_only=True) class Meta: model = CrawlModel """ Viewset Classes. Filtering is provided by django-filter. Backend settings are in common_settings.py under REST_FRAMEWORK. Setting is: 'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',) This backend is supplied to every viewset by default. Alter query fields by adding or removing items from filter_fields """ class ProjectViewSet(viewsets.ModelViewSet): queryset = Project.objects.all() serializer_class = ProjectSerializer filter_fields = ('id', 'slug', 'name',) class CrawlViewSet(viewsets.ModelViewSet): queryset = Crawl.objects.all() serializer_class = CrawlSerializer filter_fields = ('id', 'slug', 'name', 'description', 'status', 'project', 'crawl_model', 'crawler',) def create(self, request): if request.data.get('textseeds', False) and not request.FILES.get("seeds_list", False): request.data["seeds_list"] = SimpleUploadedFile( 'seeds', bytes(request.data.get("textseeds")), 'utf-8' ) return super(CrawlViewSet, self).create(request) class CrawlModelViewSet(viewsets.ModelViewSet): queryset = CrawlModel.objects.all() serializer_class = CrawlModelSerializer filter_fields = ('id', 'slug', 'name', 'project',) def destroy(self, request, pk=None): crawls = Crawl.objects.all().filter(crawl_model=pk) if crawls: raise serializers.ValidationError("Cannot delete") return super(CrawlModelViewSet, self).destroy(request) router = routers.DefaultRouter() router.register(r"projects", ProjectViewSet) router.register(r"crawls", CrawlViewSet) router.register(r"crawl_models", CrawlModelViewSet)
Python
0
@@ -3052,57 +3052,179 @@ -raise serializers.ValidationError(%22Cannot delete%22 +message = %22The Crawl Model is being used by the following Crawls and cannot be deleted: %22%0A raise serializers.ValidationError(%7Bmessage: %5Bx.name for x in crawls%5D%7D )%0A
74e64f740769363b05b2987baa776ab783af8f40
add is_published if blogpost is already published in update form
homepage/blogs/views.py
homepage/blogs/views.py
import logging from django.views.generic import ( ListView, DetailView, CreateView, UpdateView, ) from django.contrib.syndication.views import Feed from django.contrib.auth.mixins import LoginRequiredMixin from django.core.urlresolvers import reverse from django.shortcuts import get_object_or_404 from .forms import BlogPostForm from .models import ( Blog, BlogPost, ) from .viewmixins import ( RenderPostMixin, AddRequestUserMixin, PostChangeMixin, ) logger = logging.getLogger(__name__) class BlogsListView(ListView): model = Blog template_name = 'blogs/blog_list.html' context_object_name = 'blogs' class BlogDetailView(DetailView): model = BlogPost template_name = 'blogs/blog_detail.html' context_object_name = 'blog' class PostsListView(RenderPostMixin, ListView): model = BlogPost template_name = 'blogs/blogpost_list.html' context_object_name = 'blogposts' paginate_by = 5 def get_queryset(self): self.blog = get_object_or_404(Blog, slug=self.kwargs['slug']) if not self.request.user.is_authenticated(): queryset = BlogPost.published.filter(blog=self.blog).order_by('-pub_date') else: queryset = BlogPost.objects.filter(blog=self.blog).order_by('-created') return queryset def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['blog'] = self.blog for blogpost in context[self.context_object_name]: self.render_post(blogpost) return context class LatestEntriesFeed(RenderPostMixin, Feed): def get_object(self, request, *args, **kwargs): self.object = get_object_or_404(Blog, slug=kwargs['slug']) def title(self): return self.object.title def description(self): return self.object.description def link(self): return reverse('blogs:blogpost_feed', kwargs={'slug': self.object.slug}) def items(self): queryset = (BlogPost.published .filter(blog=self.object) .order_by('-pub_date')) return queryset[:5] def item_title(self, item): return item.title def item_description(self, item): self.render_post(item, javascript=False) return item.description class PostDetailView(RenderPostMixin, DetailView): model = BlogPost template_name = 'blogs/blogpost_detail.html' context_object_name = 'blogpost' slug_url_kwarg = 'slug' query_pk_and_slug = True def get_queryset(self): if not self.request.user.is_authenticated(): queryset = BlogPost.published.order_by('-pub_date') else: queryset = BlogPost.objects.order_by('-created') return queryset def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) blogpost = context[self.context_object_name] self.render_post(blogpost) return context class PostCreateView(LoginRequiredMixin, PostChangeMixin, AddRequestUserMixin, CreateView): model = BlogPost form_class = BlogPostForm template_name = 'blogs/blogpost_edit.html' user_field_name = 'author' success_msg = "Blogentry created!" def form_valid(self, form): self.blog_slug = self.kwargs['slug'] return super().form_valid(form) class PostUpdateView(LoginRequiredMixin, PostChangeMixin, AddRequestUserMixin, UpdateView): model = BlogPost form_class = BlogPostForm template_name = 'blogs/blogpost_edit.html' user_field_name = 'author' success_msg = "Blogentry updated!" def form_valid(self, form): self.blog_slug = self.kwargs['blog_slug'] return super().form_valid(form)
Python
0
@@ -3692,32 +3692,182 @@ ntry updated!%22%0A%0A + def get_initial(self):%0A initial = super().get_initial()%0A initial%5B'is_published'%5D = self.object.is_published%0A return initial%0A%0A def form_val
25c242ed3352bcf52683454e12c3e9ae7e51622b
Check line endings for all modified files with 'binary' attr not set.
hooks.d/line_endings.py
hooks.d/line_endings.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # vim:ts=4:sw=4:expandtab # # ================================================================== # # Copyright (c) 2016, Parallels IP Holdings GmbH # Released under the terms of MIT license (see LICENSE for details) # # ================================================================== # ''' line_endings: A hook to deny commiting files with mixed line endings ''' import logging import hookutil class Hook(object): def __init__(self, repo_dir, settings, params): self.repo_dir = repo_dir self.settings = settings self.params = params def check(self, branch, old_sha, new_sha): logging.debug("branch='%s', old_sha='%s', new_sha='%s', params='%s'", branch, old_sha, new_sha, self.params) permit = True # Do not run the hook if the branch is being deleted if new_sha == '0' * 40: logging.debug("Deleting the branch, skip the hook") return True, [] # Before the hook is run git has already created # a new_sha commit object log = hookutil.parse_git_log(self.repo_dir, branch, old_sha, new_sha) messages = [] for commit in log: modfiles = hookutil.parse_git_show(self.repo_dir, commit['commit']) def has_mixed_le(file_contents): ''' Check if file contains both lf and crlf file_contents = open(file).read() ''' if ('\r\n' in file_contents and '\n' in file_contents.replace('\r\n', '')): return True return False for modfile in modfiles: # Skip deleted files if modfile['status'] == 'D': logging.debug("Deleted '%s', skip", modfile['path']) continue text_attr = hookutil.get_attr( self.repo_dir, new_sha, modfile['path'], 'text') # Attr 'text' enables eol normalization, so # the file won't have crlf when the attr is set if text_attr == 'unspecified': cmd = ['git', 'show', modfile['new_blob']] _, file_contents, _ = hookutil.run(cmd, self.repo_dir) permit_file = not has_mixed_le(file_contents) if not permit_file: messages.append({'at': commit['commit'], 'text': "Error: file '%s' has mixed line endings (CRLF/LF)" % modfile['path']}) permit = permit and permit_file logging.debug("modfile='%s', permit='%s'", modfile['path'], permit) return permit, messages
Python
0
@@ -1911,20 +1911,22 @@ -text +binary _attr = @@ -2006,20 +2006,22 @@ ath'%5D, ' -text +binary ')%0A%0A @@ -2036,164 +2036,33 @@ -# Attr 'text' enables eol normalization, so%0A # the file won't have crlf when the attr is set%0A if text +if binary _attr -== 'unspecified +!= 'set ':%0A -%0A
19d81520a7fe9dd8098bd1603b455f08e465c5f7
add getspire to init
hsadownload/__init__.py
hsadownload/__init__.py
__all__ = ['access', 'getpacs'] from hsadownload import access, getpacs
Python
0
@@ -24,16 +24,28 @@ getpacs' +, 'getspire' %5D%0Afrom h @@ -77,9 +77,19 @@ getpacs +, getspire %0A
278215d3afb04e37b1e5ad3072038d359309000a
Remove humanfriendly.compat.unittest (#53)
humanfriendly/compat.py
humanfriendly/compat.py
# Human friendly input/output in Python. # # Author: Peter Odding <peter@peterodding.com> # Last Change: December 10, 2020 # URL: https://humanfriendly.readthedocs.io """ Compatibility with Python 2 and 3. This module exposes aliases and functions that make it easier to write Python code that is compatible with Python 2 and Python 3. .. data:: basestring Alias for :func:`python2:basestring` (in Python 2) or :class:`python3:str` (in Python 3). See also :func:`is_string()`. .. data:: HTMLParser Alias for :class:`python2:HTMLParser.HTMLParser` (in Python 2) or :class:`python3:html.parser.HTMLParser` (in Python 3). .. data:: interactive_prompt Alias for :func:`python2:raw_input()` (in Python 2) or :func:`python3:input()` (in Python 3). .. data:: StringIO Alias for :class:`python2:StringIO.StringIO` (in Python 2) or :class:`python3:io.StringIO` (in Python 3). .. data:: unicode Alias for :func:`python2:unicode` (in Python 2) or :class:`python3:str` (in Python 3). See also :func:`coerce_string()`. .. data:: monotonic Alias for :func:`python3:time.monotonic()` (in Python 3.3 and higher) or `monotonic.monotonic()` (a `conditional dependency <https://pypi.org/project/monotonic/>`_ on older Python versions). """ __all__ = ( 'HTMLParser', 'StringIO', 'basestring', 'coerce_string', 'interactive_prompt', 'is_string', 'is_unicode', 'monotonic', 'name2codepoint', 'on_macos', 'on_windows', 'unichr', 'unicode', # This export remains here so as not to break my dozen or so other Python # projects using 'from humanfriendly.compat import unittest' from good old # times (when Python 2.6 was still a thing). It will eventually be removed. 'unittest', 'which', ) # Standard library modules. import sys import unittest # Differences between Python 2 and 3. try: # Python 2. unicode = unicode unichr = unichr basestring = basestring interactive_prompt = raw_input from distutils.spawn import find_executable as which from HTMLParser import HTMLParser from StringIO import StringIO from htmlentitydefs import name2codepoint except (ImportError, NameError): # Python 3. unicode = str unichr = chr basestring = str interactive_prompt = input from shutil import which from html.parser import HTMLParser from io import StringIO from html.entities import name2codepoint try: # Python 3.3 and higher. from time import monotonic except ImportError: # A replacement for older Python versions: # https://pypi.org/project/monotonic/ try: from monotonic import monotonic except (ImportError, RuntimeError): # We fall back to the old behavior of using time.time() instead of # failing when {time,monotonic}.monotonic() are both missing. from time import time as monotonic def coerce_string(value): """ Coerce any value to a Unicode string (:func:`python2:unicode` in Python 2 and :class:`python3:str` in Python 3). :param value: The value to coerce. :returns: The value coerced to a Unicode string. """ return value if is_string(value) else unicode(value) def is_string(value): """ Check if a value is a :func:`python2:basestring` (in Python 2) or :class:`python3:str` (in Python 3) object. :param value: The value to check. :returns: :data:`True` if the value is a string, :data:`False` otherwise. """ return isinstance(value, basestring) def is_unicode(value): """ Check if a value is a :func:`python2:unicode` (in Python 2) or :class:`python2:str` (in Python 3) object. :param value: The value to check. :returns: :data:`True` if the value is a Unicode string, :data:`False` otherwise. """ return isinstance(value, unicode) def on_macos(): """ Check if we're running on Apple MacOS. :returns: :data:`True` if running MacOS, :data:`False` otherwise. """ return sys.platform.startswith('darwin') def on_windows(): """ Check if we're running on the Microsoft Windows OS. :returns: :data:`True` if running Windows, :data:`False` otherwise. """ return sys.platform.startswith('win')
Python
0
@@ -102,25 +102,26 @@ ge: -Dec +Sept ember 1 -0 +7 , 202 -0 +1 %0A# U @@ -1521,261 +1521,8 @@ e',%0A - # This export remains here so as not to break my dozen or so other Python%0A # projects using 'from humanfriendly.compat import unittest' from good old%0A # times (when Python 2.6 was still a thing). It will eventually be removed.%0A 'unittest',%0A @@ -1575,24 +1575,8 @@ sys -%0Aimport unittest %0A%0A#
0d05cc9f76b97b8c4e39c96b2126854080ce9963
Make the config update slightly safer
ssdeploy.py
ssdeploy.py
#!/usr/bin/env python3 #SuPeRMiNoR2, 2015 #MIT License import sys, os, requests, hashlib, json, shutil, zipfile, argparse, platform print("Running on python version {0}".format(platform.python_version())) major = platform.python_version_tuple()[0] if not major == "3": print("Only python 3.x is supported.") sys.exit() parser = argparse.ArgumentParser() parser.add_argument("-f", "--force", help="Forces the update, even if you already have that version installed", action="store_true") parser.add_argument("-c", "--clean", help="Clean all downloaded mods (WIP)", action="store_true") args = parser.parse_args() #Check to make sure we are running from the right directory if not os.path.exists("dlib"): print("Error, please run this script from its base directory!.") sys.exit() from dlib import files from dlib import tqdm files.checkstructure() #Eventually refactor so that everything uses fullconfig, then rename it to config data, config, fullconfig = files.loadconfig() files.checkupdate(config) mod_database = config["moddbdir"] modcachedir = config["cachedir"] servermoddir = config["servermoddir"] #Who needs error detection anyway print("\nStarting SuperSolderDeploy") print("Using solder instance: {0}".format(config["solderurl"])) print("Modpack name: {0}".format(config["modpackname"])) print("Server mods folder: {0}".format(config["servermoddir"])) print("Currently installed modpack version: {0}".format(data["last"])) print("\nChecking solder for new version...") index = requests.get(config["modpackurl"]) index = index.json() mpversion = index["recommended"] print("\nNewest modpack version: {}\n".format(mpversion)) if mpversion == data["last"] and args.force == False: print("Already updated to this version, use -f to force update") sys.exit() if args.force: print("Force mode enabled, force updating server mods...\n") modindex = requests.get(config["modpackurl"] + index["recommended"]) modindex = modindex.json() modinfo = {} for i in tqdm.tqdm(modindex["mods"], desc="Downloading Mod Info", leave=True): mod = requests.get(config["modsurl"] + i["name"]) modinfo[i["name"]] = mod.json() def generate_filename(i): st = "{name}-{version}.zip".format(name=i["name"], version=i["version"]) return st def download_file(url, filename): r = requests.get(url, stream=True) with open(filename, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) f.flush() def md5(filename, blocksize=2**20): m = hashlib.md5() with open(filename, "rb") as f: while True: buf = f.read(blocksize) if not buf: break m.update( buf ) return m.hexdigest() msgs = [] for i in tqdm.tqdm(modindex["mods"], desc="Downloading Mods", leave=True): info = modinfo[i["name"]] if not "#clientonly" in info["description"]: if not os.path.exists(os.path.join(mod_database, generate_filename(i))): download_file(i["url"], os.path.join(mod_database, generate_filename(i))) dlhash = md5(os.path.join(mod_database, generate_filename(i))) if not dlhash == i["md5"]: msgs.append("Warning, {0} does not match the hash".format(info["pretty_name"])) zipf = zipfile.ZipFile(os.path.join(mod_database, generate_filename(i)), "r") zipf.extractall(modcachedir) else: msgs.append("Skipped client only mod: "+info["pretty_name"]) for i in msgs: print(i) modlocation = os.path.join(modcachedir, "mods") modfiles = os.listdir(modlocation) oldmpversion = data["last"] data["last"] = mpversion data["filelists"][mpversion] = modfiles if oldmpversion == False: for i in modfiles: fl = os.path.join(modcachedir, "mods", i) if not i == "1.7.10": shutil.copy(fl, servermoddir) else: oldfiles = data["filelists"][oldmpversion] print("Cleaning up old mods from server dir") for i in oldfiles: l = os.path.join(servermoddir, i) if not os.path.exists(l): print("Failed to remove file: "+l) else: os.remove(os.path.join(servermoddir, i)) for i in modfiles: fl = os.path.join(modcachedir, "mods", i) if not i == "1.7.10": shutil.copy(fl, servermoddir) #Config Update Section if fullconfig["system"]["configupdate"] == "true": updatemode = fullconfig["configupdate"]["configupdatemode"] configupdatedir = fullconfig["configupdate"]["configdir"] print("Config Update enabled, mode: {mode}, Config dir: {cdir}".format(mode=updatemode, cdir=configupdatedir)) if updatemode == "overwrite": if not configupdatedir == "/": print("Deleting current config files") shutil.rmtree(configupdatedir) print("Updating config files") shutil.copytree(os.path.join(modcachedir, "config"), configupdatedir) files.saveconfig(data)
Python
0.000001
@@ -4742,17 +4742,16 @@ tedir))%0A -%0A @@ -4757,31 +4757,65 @@ if +not config update -mode +dir == %22 -overwrit +/%22 or configupdatedir == %22changem e%22:%0A @@ -4833,33 +4833,32 @@ if -not config update -dir +mode == %22 -/ +overwrite %22:%0A @@ -4997,32 +4997,32 @@ config files%22)%0A - @@ -5090,16 +5090,159 @@ datedir) +%0A else:%0A print(%22Error, please change configdir in the config.ini to the path to the config folder in your minecraft server.%22) %0A%0Afiles.
54b40488a7b0baefba3ada33cf9b792af1c2ca4d
fix bug with api v1
people/api.py
people/api.py
# -*- coding: utf-8 -*- from django.contrib.auth.models import User from tastypie import fields from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS from common.api import WebsiteResource from .models import Artist, Staff, Organization class UserResource(ModelResource): class Meta: queryset = User.objects.exclude(pk=-1) # Exclude anonymous user detail_uri_name = 'username' resource_name = 'people/user' fields = ['username', 'first_name', 'last_name', 'id'] filtering = { 'first_name': ALL, 'last_name': ALL } def dehydrate(self, bundle): bundle.data['photo'] = bundle.obj.profile.photo bundle.data['birthdate'] = bundle.obj.profile.birthdate bundle.data['birthplace'] = bundle.obj.profile.birthplace bundle.data['cursus'] = bundle.obj.profile.cursus bundle.data['gender'] = bundle.obj.profile.gender # Nationality : country code separated by commas bundle.data['nationality'] = bundle.obj.profile.nationality bundle.data['homeland_country'] = bundle.obj.profile.homeland_country bundle.data['birthplace_country'] = bundle.obj.profile.birthplace_country return bundle class ArtistResource(ModelResource): class Meta: queryset = Artist.objects.all() resource_name = 'people/artist' filtering = { 'user': ALL_WITH_RELATIONS, 'resource_uri': ALL } fields = ['id', 'nickname', 'bio_short_fr', 'bio_short_en', 'bio_fr', 'bio_en', 'twitter_account', 'facebook_profile'] websites = fields.ToManyField(WebsiteResource, 'websites', full=True) user = fields.ForeignKey(UserResource, 'user', full=True) artworks = fields.ToManyField('production.api.ArtworkResource', 'artworks', full=False, null=True, use_in=['detail']) class StaffResource(ModelResource): class Meta: queryset = Staff.objects.all() resource_name = 'people/staff' fields = ('user',) user = fields.ForeignKey(UserResource, 'user', full=True) class OrganizationResource(ModelResource): class Meta: queryset = Organization.objects.all() resource_name = 'people/organization'
Python
0
@@ -632,24 +632,71 @@ f, bundle):%0A + if hasattr(bundle.obj, 'profile'):%0A bund @@ -739,32 +739,36 @@ e.photo%0A + + bundle.data%5B'bir @@ -807,16 +807,20 @@ rthdate%0A + @@ -877,32 +877,36 @@ thplace%0A + bundle.data%5B'cur @@ -939,32 +939,36 @@ .cursus%0A + + bundle.data%5B'gen @@ -1009,16 +1009,20 @@ + + # Nation @@ -1062,32 +1062,36 @@ commas%0A + + bundle.data%5B'nat @@ -1134,16 +1134,20 @@ onality%0A + @@ -1216,16 +1216,20 @@ country%0A +
94716665743e80f789644938769abdfb09654d0f
use itervalues
salt/states/ansiblegate.py
salt/states/ansiblegate.py
# -*- coding: utf-8 -*- # # Author: Bo Maryniuk <bo@suse.de> # # Copyright 2017 SUSE LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r''' :codeauthor: :email:`Bo Maryniuk <bo@suse.de>` Execution of Ansible modules from within states =============================================== With `ansible.call` these states allow individual Ansible module calls to be made via states. To call an Ansible module function use a :mod:`module.run <salt.states.ansible.call>` state: .. code-block:: yaml some_set_of_tasks: ansible: - system.ping - packaging.os.zypper - name: emacs - state: installed ''' from __future__ import absolute_import, print_function, unicode_literals import sys try: import ansible except ImportError as err: ansible = None # Import salt modules import salt.fileclient import salt.ext.six as six __virtualname__ = 'ansible' class AnsibleState(object): ''' Ansible state caller. ''' def get_args(self, argset): ''' Get args and kwargs from the argset. :param argset: :return: ''' args = [] kwargs = {} for element in argset or []: if isinstance(element, dict): kwargs.update(element) else: args.append(element) return args, kwargs def __call__(self, **kwargs): ''' Call Ansible module. :return: ''' ret = { 'name': kwargs.pop('name'), 'changes': {}, 'comment': '', 'result': True, } for mod_name, mod_params in kwargs.items(): args, kwargs = self.get_args(mod_params) try: ans_mod_out = __salt__['ansible.{0}'.format(mod_name)](**{'__pub_arg': [args, kwargs]}) except Exception as err: ans_mod_out = 'Module "{0}" failed. Error message: ({1}) {2}'.format( mod_name, err.__class__.__name__, err) ret['result'] = False ret['changes'][mod_name] = ans_mod_out return ret def __virtual__(): ''' Disable, if Ansible is not available around on the Minion. ''' setattr(sys.modules[__name__], 'call', lambda **kwargs: AnsibleState()(**kwargs)) # pylint: disable=W0108 return ansible is not None def _client(): ''' Get a fileclient ''' return salt.fileclient.get_file_client(__opts__) def _changes(plays): changes = {} for play in plays['plays']: task_changes = {} for task in play['tasks']: host_changes = {} for host, data in six.iteritems(task['hosts']): if data['changed'] is True: host_changes[host] = data.get('diff', data.get('changes', {})) if host_changes: task_changes[task['task']['name']] = host_changes if task_changes: changes[play['play']['name']] = task_changes return changes def playbooks(name, rundir=None, gitrepo=None, git_kwargs=None, ansible_kwargs=None): ''' Run Ansible Playbooks :param name: path to playbook. This can be relative to rundir or the git repo :param rundir: location to run ansible-playbook from. :param gitrepo: gitrepo to clone for ansible playbooks. This is cloned using the `git.latest` state, and is cloned to the `rundir` if specified, otherwise it is clone to the `cache_dir` :param git_kwargs: extra kwargs to pass to `git.latest` state module besides the `name` and `target` :param ansible_kwargs: extra kwargs to pass to `ansible.playbooks` execution module besides the `name` and `target` :return: Ansible playbook output. .. code-block:: yaml run nginx install: ansible.playbooks: - name: install.yml - gitrepo: git://github.com/gituser/playbook.git - git_kwargs: rev: master ''' ret = { 'result': False, 'changes': {}, 'comment': 'Running playbook {0}'.format(name), 'name': name, } if gitrepo is not None: if rundir is None: rundir = _client()._extrn_path(gitrepo, 'base') if git_kwargs is None: git_kwargs = {} __states__['git.latest']( name=gitrepo, target=rundir, **git_kwargs, ) if ansible_kwargs is None: ansible_kwargs = {} checks = __salt__['ansible.playbooks'](name, rundir=rundir, check=True, diff=True, **ansible_kwargs) if all(not check['changed'] for _, check in six.iteritems(checks['stats'])): ret['comment'] = 'No changes to be made from playbook {0}'.format(name) ret['result'] = True elif __opts__['test']: ret['comment'] = 'Changes will be made from playbook {0}'.format(name) ret['result'] = None ret['changes'] = _changes(checks) else: results = __salt__['ansible.playbooks'](name, rundir=rundir, diff=True, **ansible_kwargs) ret['comment'] = 'Changes were made by playbook {0}'.format(name) ret['changes'] = _changes(results) ret['result'] = all(not check['failures'] for _, check in six.iteritems(checks['stats'])) return ret
Python
0.000003
@@ -5192,27 +5192,24 @@ hanged'%5D for - _, check in si @@ -5206,36 +5206,37 @@ heck in six.iter -item +value s(checks%5B'stats' @@ -5808,11 +5808,8 @@ for - _, che @@ -5822,20 +5822,21 @@ six.iter -item +value s(checks
59b2d0418c787066c37904816925dad15b0b45cf
Use author display name in document list_filter
scanblog/scanning/admin.py
scanblog/scanning/admin.py
from django.contrib import admin from scanning.models import PendingScan, Document, DocumentPage, Scan, ScanPage, Transcription class ScanPageInline(admin.TabularInline): model = ScanPage class ScanAdmin(admin.ModelAdmin): model = Scan inlines = [ScanPageInline] admin.site.register(Scan, ScanAdmin) class PendingScanAdmin(admin.ModelAdmin): model = PendingScan list_display = ('author', 'editor', 'code', 'created', 'completed') search_fields = ('code',) admin.site.register(PendingScan, PendingScanAdmin) class DocumentAdmin(admin.ModelAdmin): list_display = ['title', 'author', 'status', 'created'] search_fields = ['title', 'author__profile__display_name', 'body', 'transcription__revisions__body'] date_hierarchy = 'created' list_filter = ['type', 'status', 'author', 'author__profile__managed'] admin.site.register(Document, DocumentAdmin) admin.site.register(DocumentPage) admin.site.register(Transcription)
Python
0.000001
@@ -828,18 +828,55 @@ 'author -', +__profile__managed',%0A 'author @@ -886,23 +886,28 @@ rofile__ -managed +display_name '%5D%0Aadmin
3bc11eea2d629b316eb9a8bdf4d9c2a2c801ddf5
Remove unused imports
whylog/tests/tests_front/tests_whylog_factory.py
whylog/tests/tests_front/tests_whylog_factory.py
from whylog.config.investigation_plan import LineSource from whylog.front.whylog_factory import whylog_factory from whylog.front.utils import FrontInput from whylog.log_reader import LogReader from whylog.teacher import Teacher from whylog.tests.utils import TestRemovingSettings class TestWhylogFactory(TestRemovingSettings): def tests_whylog_factory(self): log_reader, teacher_generator = whylog_factory() teacher = teacher_generator() front_input = FrontInput(1, 'line content', LineSource('host', 'path')) log_reader.get_causes(front_input) teacher.add_line(0, front_input, True)
Python
0.000001
@@ -150,83 +150,8 @@ put%0A -from whylog.log_reader import LogReader%0Afrom whylog.teacher import Teacher%0A from
e098c13a4c94abe00aa6c8ce2ea67900ea14f239
revert array license author info
mne/io/array/array.py
mne/io/array/array.py
"""Tools for creating MNE objects from numpy arrays""" # Authors: Eric Larson <larson.eric.d@gmail.com> # Denis Engemann <denis.engemann@gmail.com> # # License: BSD (3-clause) import numpy as np from ...constants import FIFF from ..meas_info import Info from ..base import _BaseRaw from ...utils import verbose, logger from ...externals.six import string_types _kind_dict = dict( eeg=(FIFF.FIFFV_EEG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V), mag=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_MAG_T3, FIFF.FIFF_UNIT_T), grad=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_PLANAR_T1, FIFF.FIFF_UNIT_T_M), misc=(FIFF.FIFFV_MISC_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_NONE), stim=(FIFF.FIFFV_STIM_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V), eog=(FIFF.FIFFV_EOG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V), ecg=(FIFF.FIFFV_ECG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V), ) def create_info(ch_names, sfreq, ch_types=None): """Create a basic Info instance suitable for use with create_raw Parameters ---------- ch_names : list of str Channel names. sfreq : float Sample rate of the data. ch_types : list of str Channel types. If None, data are assumed to be misc. Currently supported fields are "mag", "grad", "eeg", and "misc". Notes ----- The info dictionary will be sparsely populated to enable functionality within the rest of the package. Advanced functionality such as source localization can only be obtained through substantial, proper modifications of the info structure (not recommended). """ if not isinstance(ch_names, (list, tuple)): raise TypeError('ch_names must be a list or tuple') sfreq = float(sfreq) if sfreq <= 0: raise ValueError('sfreq must be positive') nchan = len(ch_names) if ch_types is None: ch_types = [_kind_dict['misc']] * nchan if len(ch_types) != nchan: raise ValueError('ch_types and ch_names must be the same length') info = Info() info['meas_date'] = [0, 0] info['sfreq'] = sfreq for key in ['bads', 'projs', 'comps']: info[key] = list() for key in ['meas_id', 'file_id', 'highpass', 'lowpass', 'acq_pars', 'acq_stim', 'filename', 'dig']: info[key] = None info['ch_names'] = ch_names info['nchan'] = nchan info['chs'] = list() loc = np.concatenate((np.zeros(3), np.eye(3).ravel())).astype(np.float32) for ci, (name, kind) in enumerate(zip(ch_names, ch_types)): if not isinstance(name, string_types): raise TypeError('each entry in ch_names must be a string') if not isinstance(kind, string_types): raise TypeError('each entry in ch_names must be a string') if kind not in _kind_dict: raise KeyError('kind must be one of %s, not %s' % (list(_kind_dict.keys()), kind)) kind = _kind_dict[kind] chan_info = dict(loc=loc, eeg_loc=None, unit_mul=0, range=1., cal=1., coil_trans=None, kind=kind[0], coil_type=kind[1], unit=kind[2], coord_frame=FIFF.FIFFV_COORD_UNKNOWN, ch_name=name, scanno=ci + 1, logno=ci + 1) info['chs'].append(chan_info) info['dev_head_t'] = None info['dev_ctf_t'] = None info['ctf_head_t'] = None return info class RawArray(_BaseRaw): """Raw object from numpy array Parameters ---------- data : array, shape (n_channels, n_times) The channels' time series. info : instance of Info Info dictionary. Consider using ``create_info`` to populate this structure. """ @verbose def __init__(self, data, info, verbose=None): dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64 data = np.asanyarray(data, dtype=dtype) if data.ndim != 2: raise ValueError('data must be a 2D array') logger.info('Creating RawArray with %s data, n_channels=%s, n_times=%s' % (dtype.__name__, data.shape[0], data.shape[1])) if len(data) != len(info['ch_names']): raise ValueError('len(data) does not match len(info["ch_names"])') assert len(info['ch_names']) == info['nchan'] cals = np.zeros(info['nchan']) for k in range(info['nchan']): cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal'] self.verbose = verbose self.cals = cals self.rawdir = None self.proj = None self.comp = None self._filenames = list() self._preloaded = True self.info = info self._data = data self.first_samp, self.last_samp = 0, self._data.shape[1] - 1 self._times = np.arange(self.first_samp, self.last_samp + 1) / info['sfreq'] self._projectors = list() logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % ( self.first_samp, self.last_samp, float(self.first_samp) / info['sfreq'], float(self.last_samp) / info['sfreq'])) logger.info('Ready.')
Python
0
@@ -20,11 +20,11 @@ ing -MNE +Raw obj @@ -103,61 +103,8 @@ om%3E%0A -# Denis Engemann %3Cdenis.engemann@gmail.com%3E%0A #%0A#
67fd00b47278dbc1bb8958cbe00f9aa0f29910b8
Fix test after solr_token_to_entity -> plural
src/adhocracy/tests/lib/test_pager.py
src/adhocracy/tests/lib/test_pager.py
from unittest import TestCase class TestVisiblePages(TestCase): ''' Test the functionality of :func:`adhocracy.lib.pager.visible_pages` ''' def test_few_pages(self): from adhocracy.lib.pager import visible_pages visible, seperators = visible_pages(1, 3) self.assertEqual(visible, [1, 2, 3]) self.assertEqual(seperators, []) visible, seperators = visible_pages(2, 3) self.assertEqual(visible, [1, 2, 3]) self.assertEqual(seperators, []) visible, seperators = visible_pages(3, 3) self.assertEqual(visible, [1, 2, 3]) self.assertEqual(seperators, []) def test_max_displayed_pages(self): ''' If we have the maximum number (11)of pages, we don't need seperators ''' from adhocracy.lib.pager import visible_pages visible, seperators = visible_pages(1, 11) self.assertEqual(visible, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) self.assertEqual(seperators, []) visible, seperators = visible_pages(5, 11) self.assertEqual(visible, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) self.assertEqual(seperators, []) visible, seperators = visible_pages(11, 11) self.assertEqual(visible, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) self.assertEqual(seperators, []) def test_gt_max_displayed_pages(self): ''' If we have the maximum number (11)of pages, we don't need seperators ''' from adhocracy.lib.pager import visible_pages visible, seperators = visible_pages(1, 20) self.assertEqual(visible, [1, 2, 3, 4, 5, 6, 7, 8, 9, 20]) self.assertEqual(seperators, [10]) visible, seperators = visible_pages(7, 20) self.assertEqual(visible, [1, 2, 3, 4, 5, 6, 7, 8, 9, 20]) self.assertEqual(seperators, [10]) visible, seperators = visible_pages(11, 20) self.assertEqual(visible, [1, 8, 9, 10, 11, 12, 13, 14, 20]) self.assertEqual(seperators, [2, 20]) visible, seperators = visible_pages(12, 20) self.assertEqual(visible, [1, 9, 10, 11, 12, 13, 14, 15, 20]) self.assertEqual(seperators, [2, 20]) visible, seperators = visible_pages(13, 20) self.assertEqual(visible, [1, 12, 13, 14, 15, 16, 17, 18, 19, 20]) self.assertEqual(seperators, [2]) class TestSolrTokenHelpers(TestCase): def test_entity_to_solr_token_with_hierachy(self): from adhocracy.model import CategoryBadge from adhocracy.lib.pager import entity_to_solr_token badge0 = CategoryBadge.create('testbadge0', '#ccc', True, 'descr') badge11 = CategoryBadge.create('testbadge11', '#ccc', True, 'descr') badge12 = CategoryBadge.create('testbadge12', '#ccc', True, 'descr') badge121 = CategoryBadge.create('testbadge121', '#ccc', True, 'descr') badge11.parent = badge0 badge12.parent = badge0 badge121.parent = badge12 result = entity_to_solr_token(badge121) shouldbe = u"%s/%s/%s" % (str(badge0.id), str(badge12.id), str(badge121.id)) self.assertEqual(result, shouldbe) def test_entity_to_solr_token_no_hierachy(self): from adhocracy.model import UserBadge from adhocracy.lib.pager import entity_to_solr_token badge = UserBadge.create('testbadge', '#ccc', True, 'description') result = entity_to_solr_token(badge) shouldbe = u"%s" % str(badge.id) self.assertEqual(result, shouldbe) def test_solr_token_to_entity_with_hierachy(self): from adhocracy.model import CategoryBadge from adhocracy.lib.pager import solr_token_to_entity badge = CategoryBadge.create('testbadge', '#ccc', True, 'description') token = u"1/2/%s" % str(badge.id) self.assertEqual(solr_token_to_entity(token, CategoryBadge), badge) def test_solr_token_to_entity_no_hierachy(self): from adhocracy.model import CategoryBadge from adhocracy.lib.pager import solr_token_to_entity badge = CategoryBadge.create('testbadge', '#ccc', True, 'description') token = u"%s" % str(badge.id) self.assertEqual(solr_token_to_entity(token, CategoryBadge), badge) wrongtoken = "1A" self.assertEqual(solr_token_to_entity(wrongtoken, CategoryBadge), None)
Python
0.000004
@@ -3714,32 +3714,33 @@ mport solr_token +s _to_entity%0A @@ -3724,33 +3724,35 @@ _tokens_to_entit -y +ies %0A badge = @@ -3884,32 +3884,33 @@ Equal(solr_token +s _to_entity(token @@ -3894,39 +3894,43 @@ _tokens_to_entit -y( +ies(%5B token +%5D , CategoryBadge) @@ -3931,21 +3931,23 @@ Badge), +%5B badge +%5D )%0A%0A d @@ -4086,32 +4086,33 @@ mport solr_token +s _to_entity%0A @@ -4104,17 +4104,19 @@ to_entit -y +ies %0A @@ -4252,32 +4252,33 @@ Equal(solr_token +s _to_entity(token @@ -4274,15 +4274,19 @@ ntit -y( +ies(%5B token +%5D , Ca @@ -4299,21 +4299,23 @@ Badge), +%5B badge +%5D )%0A @@ -4361,32 +4361,33 @@ Equal(solr_token +s _to_entity(wrong @@ -4379,18 +4379,21 @@ to_entit -y( +ies(%5B wrongtok @@ -4394,16 +4394,17 @@ ongtoken +%5D , Catego @@ -4417,10 +4417,8 @@ e), -None +%5B%5D )%0A
b875084e74ee03c6b251a79f04f0db340bb356b8
Fix #604
scout/constants/indexes.py
scout/constants/indexes.py
from pymongo import (IndexModel, ASCENDING, DESCENDING) INDEXES = { 'hgnc_collection': [IndexModel( [('build', ASCENDING), ('chromosome', ASCENDING)], name="build_chromosome"), ], 'variant_collection': [ IndexModel([('case_id', ASCENDING),('rank_score', DESCENDING)], name="caseid_rankscore"), IndexModel([('case_id', ASCENDING),('variant_rank', ASCENDING)], name="caseid_variantrank") ] }
Python
0.000001
@@ -86,16 +86,25 @@ tion': %5B +%0A IndexMod @@ -106,25 +106,29 @@ exModel( -%0A +%5B%0A %5B('build @@ -119,17 +119,17 @@ -%5B + ('build' @@ -142,16 +142,29 @@ NDING), +%0A ('chromo @@ -179,24 +179,37 @@ SCENDING)%5D, +%0A name=%22build_ @@ -269,32 +269,45 @@ IndexModel(%5B +%0A ('case_id', ASCE @@ -313,16 +313,29 @@ ENDING), +%0A ('rank_s @@ -347,32 +347,45 @@ , DESCENDING)%5D, +%0A name=%22caseid_ran @@ -414,16 +414,29 @@ xModel(%5B +%0A ('case_i @@ -450,16 +450,29 @@ ENDING), +%0A ('varian @@ -493,16 +493,29 @@ DING)%5D, +%0A name=%22ca @@ -532,17 +532,695 @@ ntrank%22) +, %0A -%5D + IndexModel(%5B%0A ('case_id', ASCENDING), %0A ('category', ASCENDING), %0A ('variant_type', ASCENDING), %0A ('rank_score', DESCENDING)%5D,%0A name=%22caseid_category_varianttype_rankscore%22),%0A IndexModel(%5B%0A ('case_id', ASCENDING), %0A ('variant_id', ASCENDING)%5D,%0A name=%22caseid_variantid%22),%0A IndexModel(%5B%0A ('case_id', ASCENDING), %0A ('variant_type', ASCENDING), %0A ('variant_rank', ASCENDING), %0A ('panels', ASCENDING), %0A ('thousand_genomes_frequency', ASCENDING)%5D,%0A name=%22caseid_varianttype_variantrank_panels_thousandg%22)%0A %5D, %0A%7D%0A
87371774ad332a3adbe927e2609d73710f4a7678
change method name
tests/graph/test_dag.py
tests/graph/test_dag.py
from pybbn.graph.edge import Edge, EdgeType from pybbn.graph.node import Node from pybbn.graph.dag import Dag from nose import with_setup def setup(): pass def teardown(): pass @with_setup(setup, teardown) def test_graph_creation(): n0 = Node(0) n1 = Node(1) n2 = Node(2) e0 = Edge(n0, n1, EdgeType.DIRECTED) e1 = Edge(n1, n2, EdgeType.DIRECTED) e2 = Edge(n2, n0, EdgeType.DIRECTED) g = Dag() g.add_node(n0) g.add_node(n1) g.add_edge(e0) g.add_edge(e1) g.add_edge(e2) print(g) assert len(g.get_nodes()) == 3 assert len(g.get_edges()) == 2 assert len(list(g.get_neighbors(0))) == 1 assert len(list(g.get_neighbors(1))) == 2 assert len(list(g.get_neighbors(2))) == 1 assert 1 in g.get_neighbors(0) assert 0 in g.get_neighbors(1) assert 2 in g.get_neighbors(1) assert 1 in g.get_neighbors(2) assert g.edge_exists(0, 1) == 1 assert g.edge_exists(1, 2) == 1 assert g.edge_exists(0, 2) == 0 assert len(g.get_parents(0)) == 0 assert len(g.get_parents(1)) == 1 assert len(g.get_parents(2)) == 1 assert 0 in g.get_parents(1) assert 1 in g.get_parents(2) assert len(g.get_children(0)) == 1 assert len(g.get_children(1)) == 1 assert len(g.get_children(2)) == 0 assert 1 in g.get_children(0) assert 2 in g.get_children(1)
Python
0.000021
@@ -222,21 +222,19 @@ ef test_ -graph +dag _creatio
a3dea467c373e9f4a8a5dcf61693696d98a51800
Add a default timeout of 90 (#1912)
iap/make_iap_request.py
iap/make_iap_request.py
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example use of a service account to authenticate to Identity-Aware Proxy.""" # [START iap_make_request] import google.auth import google.auth.app_engine import google.auth.compute_engine.credentials import google.auth.iam from google.auth.transport.requests import Request import google.oauth2.credentials import google.oauth2.service_account import requests import requests_toolbelt.adapters.appengine IAM_SCOPE = 'https://www.googleapis.com/auth/iam' OAUTH_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token' def make_iap_request(url, client_id, method='GET', **kwargs): """Makes a request to an application protected by Identity-Aware Proxy. Args: url: The Identity-Aware Proxy-protected URL to fetch. client_id: The client ID used by Identity-Aware Proxy. method: The request method to use ('GET', 'OPTIONS', 'HEAD', 'POST', 'PUT', 'PATCH', 'DELETE') **kwargs: Any of the parameters defined for the request function: https://github.com/requests/requests/blob/master/requests/api.py Returns: The page body, or raises an exception if the page couldn't be retrieved. """ # Figure out what environment we're running in and get some preliminary # information about the service account. bootstrap_credentials, _ = google.auth.default( scopes=[IAM_SCOPE]) if isinstance(bootstrap_credentials, google.oauth2.credentials.Credentials): raise Exception('make_iap_request is only supported for service ' 'accounts.') elif isinstance(bootstrap_credentials, google.auth.app_engine.Credentials): requests_toolbelt.adapters.appengine.monkeypatch() # For service account's using the Compute Engine metadata service, # service_account_email isn't available until refresh is called. bootstrap_credentials.refresh(Request()) signer_email = bootstrap_credentials.service_account_email if isinstance(bootstrap_credentials, google.auth.compute_engine.credentials.Credentials): # Since the Compute Engine metadata service doesn't expose the service # account key, we use the IAM signBlob API to sign instead. # In order for this to work: # # 1. Your VM needs the https://www.googleapis.com/auth/iam scope. # You can specify this specific scope when creating a VM # through the API or gcloud. When using Cloud Console, # you'll need to specify the "full access to all Cloud APIs" # scope. A VM's scopes can only be specified at creation time. # # 2. The VM's default service account needs the "Service Account Actor" # role. This can be found under the "Project" category in Cloud # Console, or roles/iam.serviceAccountActor in gcloud. signer = google.auth.iam.Signer( Request(), bootstrap_credentials, signer_email) else: # A Signer object can sign a JWT using the service account's key. signer = bootstrap_credentials.signer # Construct OAuth 2.0 service account credentials using the signer # and email acquired from the bootstrap credentials. service_account_credentials = google.oauth2.service_account.Credentials( signer, signer_email, token_uri=OAUTH_TOKEN_URI, additional_claims={ 'target_audience': client_id }) # service_account_credentials gives us a JWT signed by the service # account. Next, we use that to obtain an OpenID Connect token, # which is a JWT signed by Google. google_open_id_connect_token = get_google_open_id_connect_token( service_account_credentials) # Fetch the Identity-Aware Proxy-protected URL, including an # Authorization header containing "Bearer " followed by a # Google-issued OpenID Connect token for the service account. resp = requests.request( method, url, headers={'Authorization': 'Bearer {}'.format( google_open_id_connect_token)}, **kwargs) if resp.status_code == 403: raise Exception('Service account {} does not have permission to ' 'access the IAP-protected application.'.format( signer_email)) elif resp.status_code != 200: raise Exception( 'Bad response from application: {!r} / {!r} / {!r}'.format( resp.status_code, resp.headers, resp.text)) else: return resp.text def get_google_open_id_connect_token(service_account_credentials): """Get an OpenID Connect token issued by Google for the service account. This function: 1. Generates a JWT signed with the service account's private key containing a special "target_audience" claim. 2. Sends it to the OAUTH_TOKEN_URI endpoint. Because the JWT in #1 has a target_audience claim, that endpoint will respond with an OpenID Connect token for the service account -- in other words, a JWT signed by *Google*. The aud claim in this JWT will be set to the value from the target_audience claim in #1. For more information, see https://developers.google.com/identity/protocols/OAuth2ServiceAccount . The HTTP/REST example on that page describes the JWT structure and demonstrates how to call the token endpoint. (The example on that page shows how to get an OAuth2 access token; this code is using a modified version of it to get an OpenID Connect token.) """ service_account_jwt = ( service_account_credentials._make_authorization_grant_assertion()) request = google.auth.transport.requests.Request() body = { 'assertion': service_account_jwt, 'grant_type': google.oauth2._client._JWT_GRANT_TYPE, } token_response = google.oauth2._client._token_endpoint_request( request, OAUTH_TOKEN_URI, body) return token_response['id_token'] # [END iap_make_request]
Python
0
@@ -1652,16 +1652,87 @@ s/api.py +%0A If no timeout is provided, it is set to 90 by default. %0A%0A Re @@ -1825,16 +1825,130 @@ %22%22%22%0A + # Set the default timeout, if missing%0A if 'timeout' not in kwargs:%0A kwargs%5B'timeout'%5D = 90%0A %0A # Fi
40972c542e7018f925167fa79ccbc2c7b241b1b3
revert unneded change on testBuildWarningTokens
tests/lib/srxlo_test.py
tests/lib/srxlo_test.py
# Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest for Srxlo rendering module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest from capirca.lib import naming from capirca.lib import policy from capirca.lib import srxlo import mock GOOD_HEADER_1 = """ header { comment:: "this is a test acl" target:: srxlo test-filter inet6 } """ GOOD_TERM_1 = """ term good-term-1 { protocol:: icmpv6 action:: accept } """ GOOD_TERM_2 = """ term good-term-2 { protocol:: icmpv6 icmp-type:: destination-unreachable action:: accept } """ GOOD_TERM_3 = """ term good-term-3 { protocol:: icmpv6 action:: accept option:: inactive } """ SUPPORTED_TOKENS = { 'action', 'address', 'comment', 'counter', 'destination_address', 'destination_address_exclude', 'destination_port', 'destination_prefix', 'destination_prefix_except', 'dscp_except', 'dscp_match', 'dscp_set', 'ether_type', 'expiration', 'forwarding_class', 'forwarding_class_except', 'fragment_offset', 'hop_limit', 'icmp_code', 'icmp_type', 'stateless_reply', 'logging', 'loss_priority', 'name', 'next_ip', 'option', 'owner', 'packet_length', 'platform', 'platform_exclude', 'policer', 'port', 'precedence', 'protocol', 'protocol_except', 'qos', 'routing_instance', 'source_address', 'source_address_exclude', 'source_port', 'source_prefix', 'source_prefix_except', 'traffic_class_count', 'traffic_type', 'translated', 'ttl', 'verbatim', } SUPPORTED_SUB_TOKENS = { 'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'}, 'icmp_type': { 'alternate-address', 'certification-path-advertisement', 'certification-path-solicitation', 'conversion-error', 'destination-unreachable', 'echo-reply', 'echo-request', 'mobile-redirect', 'home-agent-address-discovery-reply', 'home-agent-address-discovery-request', 'icmp-node-information-query', 'icmp-node-information-response', 'information-request', 'inverse-neighbor-discovery-advertisement', 'inverse-neighbor-discovery-solicitation', 'mask-reply', 'mask-request', 'information-reply', 'mobile-prefix-advertisement', 'mobile-prefix-solicitation', 'multicast-listener-done', 'multicast-listener-query', 'multicast-listener-report', 'multicast-router-advertisement', 'multicast-router-solicitation', 'multicast-router-termination', 'neighbor-advertisement', 'neighbor-solicit', 'packet-too-big', 'parameter-problem', 'redirect', 'redirect-message', 'router-advertisement', 'router-renumbering', 'router-solicit', 'router-solicitation', 'source-quench', 'time-exceeded', 'timestamp-reply', 'timestamp-request', 'unreachable', 'version-2-multicast-listener-report', }, 'option': {'established', 'first-fragment', 'inactive', 'is-fragment', '.*', # not actually a lex token! 'sample', 'tcp-established', 'tcp-initial'} } # Print a info message when a term is set to expire in that many weeks. # This is normally passed from command line. EXP_INFO = 2 class SRXloTest(unittest.TestCase): def setUp(self): self.naming = mock.create_autospec(naming.Naming) def testIcmpv6(self): output = str(srxlo.SRXlo(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO)) self.failUnless('next-header icmp6;' in output, 'missing or incorrect ICMPv6 specification') def testIcmpv6Type(self): output = str(srxlo.SRXlo(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_2, self.naming), EXP_INFO)) self.failUnless('next-header icmp6;' in output, 'missing or incorrect ICMPv6 specification') self.failUnless('icmp-type 1;' in output, 'missing or incorrect ICMPv6 type specification') def testBuildTokens(self): # self.naming.GetServiceByProto.side_effect = [['25'], ['26']] pol1 = srxlo.SRXlo(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.maxDiff = None self.assertEquals(st, SUPPORTED_TOKENS) self.assertEquals(sst, SUPPORTED_SUB_TOKENS) def testBuildWarningTokens(self): pol1 = srxlo.SRXlo(policy.ParsePolicy( GOOD_HEADER_1 + GOOD_TERM_3, self.naming), EXP_INFO) st, sst = pol1._BuildTokens() self.assertEquals(st, SUPPORTED_TOKENS) self.assertEquals(sst, SUPPORTED_SUB_TOKENS) def testInactiveTerm(self): output = str(srxlo.SRXlo(policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_3, self.naming), EXP_INFO)) self.failUnless('inactive: term good-term-3 {' in output, output) if __name__ == '__main__': unittest.main()
Python
0
@@ -5501,17 +5501,17 @@ OD_TERM_ -3 +1 , self.n
99784ea1c0f24787ff9c4e6ffc1cf6c5b5e0e04c
Add binding_required to Campo
importasol/db/fields.py
importasol/db/fields.py
import types from decimal import Decimal from datetime import date from ..exceptions import ValidationError, ProgrammingError class Campo(object): field_name = None size = None required = None base_type = None default = None def __init__(self, nombre, size, default=None, required=False): self.nombre = nombre self.size = size self.required = required self.default = default def is_valid(self, obj, value): raise ProgrammingError("Hay que implementar is_valid!!") def get_valor(self, obj): """ Devolver el valor que debe almacenarse en la Salida """ return getattr(obj, self.field_name) def from_valor(self, obj, value): """ De un valor que viene de un archivo o BBDD ponerlo en el objeto. """ return setattr(obj, self.field_name, value) def contribute_to_class(self, cls, field_name): self.field_name = field_name setattr(cls, field_name, None) cls._meta.add_field(field_name, self) def bind(self, obj, entorno): pass def unbind(self, obj, entorno): pass class CampoA(Campo): base_type = types.UnicodeType truncate = None def __init__(self, nombre, truncate=True, **kwargs): self.truncate = truncate return super(CampoA, self).__init__(nombre, **kwargs) def is_valid(self, obj): val = self.get_valor(obj) if len(val) > self.size: raise ValidationError("El texto es mayor de lo permitido y truncate=False") return True def get_valor(self, obj): val = super(CampoA, self).get_valor(obj) if self.truncate and val: return val[:self.size] else: return val class CampoT(CampoA): def __init__(self, nombre, **kwargs): if kwargs.has_key('size'): raise ValueError("El CampoT siempre tiene un largo de 255!") kwargs.update({'size': 255}) return super(CampoT, self).__init__(nombre, **kwargs) class CampoND(Campo): base_type = Decimal pass class CampoN(CampoND): base_type = types.IntType pass class CampoF(Campo): base_type = date def __init__(self, *args, **kwargs): kwargs.update({'size': 0}) super(CampoF, self).__init__(*args, **kwargs) class CampoAlias(object): alias_de = None field_name = None def __init__(self, alias_de): self.alias_de = alias_de def contribute_to_class(self, cls, field_name): self.field_name = field_name p = property(self.getvalue, self.setvalue) setattr(cls, field_name, p) pass def getvalue(self, obj): return getattr(obj, self.alias_de) def setvalue(self, obj, value): return setattr(obj, self.alias_de, value) class CampoV(Campo): getter = None setter = None parametros = None def __init__(self, nombre, getter=None, setter=None , parametros=tuple(), **kwargs): self.getter = getter self.setter = setter self.parametros = parametros if not kwargs.has_key('size'): kwargs.update({'size': 0}) return super(CampoV, self).__init__(nombre, **kwargs) def getvalue(self, obj): if self.getter is not None: val = self.getter(obj, *self.parametros) return val else: return None def setvalue(self, obj, value): if self.setter is not None: return self.setter(obj, value, *self.parametros) else: return None def contribute_to_class(self, cls, field_name): self.field_name = field_name p = property(self.getvalue, self.setvalue) setattr(cls, field_name, p)
Python
0
@@ -235,24 +235,123 @@ fault = None +%0A binding_required = None # indica si get_valor y from_valor requieren estar vinculado a entorno %0A%0A def __ @@ -391,16 +391,40 @@ t=None, +required=False, binding_ required @@ -549,16 +549,65 @@ default +%0A self.binding_required = binding_required %0A%0A de @@ -796,24 +796,164 @@ Salida %22%22%22%0A + if self.binding_required and not obj.is_bound:%0A raise ProgrammingError(%22%25s tiene que estar enlazado a un entorno%22 %25 obj)%0A retu @@ -1101,24 +1101,164 @@ objeto. %22%22%22%0A + if self.binding_required and not obj.is_bound:%0A raise ProgrammingError(%22%25s tiene que estar enlazado a un entorno%22 %25 obj)%0A retu
2cb3f20bfd6482b0418ee75556417b7229d765aa
Fix scene editor referencing graphics indefinitely
src/opencmiss/neon/ui/editors/sceneeditorwidget.py
src/opencmiss/neon/ui/editors/sceneeditorwidget.py
''' Copyright 2015 University of Auckland Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' """ Zinc Scene Editor Widget Allows a Zinc Scene object to be edited in Qt / Python. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. """ from PySide import QtCore, QtGui from opencmiss.zinc.field import Field from opencmiss.zinc.graphics import Graphics from opencmiss.neon.ui.editors.ui_sceneeditorwidget import Ui_SceneEditorWidget class SceneEditorWidget(QtGui.QWidget): def __init__(self, parent=None): ''' Call the super class init functions ''' QtGui.QWidget.__init__(self, parent) self._scene = None # Using composition to include the visual element of the GUI. self.ui = Ui_SceneEditorWidget() self.ui.setupUi(self) def getScene(self): ''' Get the scene currently in the editor ''' return self._scene def setScene(self, scene): ''' Set the current scene in the editor ''' if not (scene and scene.isValid()): self._scene = None else: self._scene = scene self.ui.graphics_editor.setScene(scene) if self._scene: self.ui.graphics_editor.setGraphics(self._scene.getFirstGraphics()) self._buildGraphicsList() def _getDefaultCoordinateField(self): ''' Get the first coordinate field from the current scene ''' if self._scene: fielditer = self._scene.getRegion().getFieldmodule().createFielditerator() field = fielditer.next() while field.isValid(): if field.isTypeCoordinate() and (field.getValueType() == Field.VALUE_TYPE_REAL) and \ (field.getNumberOfComponents() <= 3) and field.castFiniteElement().isValid(): return field field = fielditer.next() return None def _getGraphicsDisplayName(self, graphics): ''' Build a display name from the graphics graphics_type and domain ''' graphics_type = graphics.getType() fieldDomainType = graphics.getFieldDomainType() if graphics_type == Graphics.TYPE_POINTS: if fieldDomainType == Field.DOMAIN_TYPE_POINT: return "point" if fieldDomainType == Field.DOMAIN_TYPE_NODES: return "node points" if fieldDomainType == Field.DOMAIN_TYPE_DATAPOINTS: return "data points" return "element points" elif graphics_type == Graphics.TYPE_LINES: return "lines" elif graphics_type == Graphics.TYPE_SURFACES: return "surfaces" elif graphics_type == Graphics.TYPE_CONTOURS: return "contours" elif graphics_type == Graphics.TYPE_STREAMLINES: return "streamlines" def _buildGraphicsList(self): ''' Fill the graphics list view with the list of graphics for current region/scene ''' self._graphicsItems = QtGui.QStandardItemModel(self.ui.graphics_listview) selectedIndex = None if self._scene: selectedGraphics = self.ui.graphics_editor.getGraphics() graphics = self._scene.getFirstGraphics() while graphics and graphics.isValid(): name = self._getGraphicsDisplayName(graphics) item = QtGui.QStandardItem(name) item.setData(graphics) item.setCheckable(True) item.setEditable(False) visible = graphics.getVisibilityFlag() item.setCheckState(QtCore.Qt.Checked if visible else QtCore.Qt.Unchecked) self._graphicsItems.appendRow(item) if graphics == selectedGraphics: selectedIndex = self._graphicsItems.indexFromItem(item) graphics = self._scene.getNextGraphics(graphics) self.ui.graphics_listview.setModel(self._graphicsItems) # self.ui.graphics_listview.setMovement(QtGui.QListView.Snap) # self.ui.graphics_listview.setDragDropMode(QtGui.QListView.InternalMove) # self.ui.graphics_listview.setDragDropOverwriteMode(False) # self.ui.graphics_listview.setDropIndicatorShown(True) if selectedIndex: self.ui.graphics_listview.setCurrentIndex(selectedIndex) self.ui.graphics_listview.show() def graphicsListItemClicked(self, modelIndex): ''' Either changes visibility flag or selects current graphics ''' model = modelIndex.model() item = model.item(modelIndex.row()) graphics = item.data() visibilityFlag = item.checkState() == QtCore.Qt.Checked graphics.setVisibilityFlag(visibilityFlag) selectedModelIndex = self.ui.graphics_listview.currentIndex() selectedItem = model.item(selectedModelIndex.row()) selectedGraphics = selectedItem.data() if graphics == selectedGraphics: self.ui.graphics_editor.setGraphics(selectedGraphics) def addGraphicsEntered(self, name): ''' Add a new chosen graphics type ''' if not self._scene: return graphicsType = Graphics.TYPE_INVALID fieldDomainType = Field.DOMAIN_TYPE_INVALID if name == "point": graphicsType = Graphics.TYPE_POINTS fieldDomainType = Field.DOMAIN_TYPE_POINT elif name == "node points": graphicsType = Graphics.TYPE_POINTS fieldDomainType = Field.DOMAIN_TYPE_NODES elif name == "data points": graphicsType = Graphics.TYPE_POINTS fieldDomainType = Field.DOMAIN_TYPE_DATAPOINTS elif name == "element points": graphicsType = Graphics.TYPE_POINTS fieldDomainType = Field.DOMAIN_TYPE_MESH_HIGHEST_DIMENSION elif name == "lines": graphicsType = Graphics.TYPE_LINES elif name == "surfaces": graphicsType = Graphics.TYPE_SURFACES elif name == "contours": graphicsType = Graphics.TYPE_CONTOURS elif name == "streamlines": graphicsType = Graphics.TYPE_STREAMLINES else: pass if graphicsType != Graphics.TYPE_INVALID: self._scene.beginChange() graphics = self._scene.createGraphics(graphicsType) if fieldDomainType != Field.DOMAIN_TYPE_INVALID: graphics.setFieldDomainType(fieldDomainType) if fieldDomainType != Field.DOMAIN_TYPE_POINT: coordinateField = self._getDefaultCoordinateField() if coordinateField is not None: graphics.setCoordinateField(coordinateField) self._scene.endChange() self.ui.graphics_editor.setGraphics(graphics) self._buildGraphicsList() self.ui.add_graphics_combobox.setCurrentIndex(0) def deleteGraphicsClicked(self): ''' Delete the current graphics type ''' if not self._scene: return graphics = self.ui.graphics_editor.getGraphics() if graphics: nextGraphics = self._scene.getNextGraphics(graphics) if not (nextGraphics and nextGraphics.isValid()): nextGraphics = self._scene.getPreviousGraphics(graphics) if not (nextGraphics and nextGraphics.isValid()): nextGraphics = self._scene.getFirstGraphics() if nextGraphics == graphics: nextGraphics = None self.ui.graphics_editor.setGraphics(nextGraphics) self._scene.removeGraphics(graphics) self._buildGraphicsList()
Python
0
@@ -1405,24 +1405,59 @@ torWidget()%0A + self._graphicsItems = None%0A self @@ -3699,32 +3699,165 @@ ene%0A '''%0A + if self._graphicsItems is not None:%0A self._graphicsItems.clear() # Must clear or holds on to graphics references%0A self._gr
0230de965fbc4247bf1f087f8454d09f30a6b0f3
Fix naming convention of array references
angr/engines/soot/expressions/newArray.py
angr/engines/soot/expressions/newArray.py
from .base import SimSootExpr from ..values import SimSootValue_ArrayRef import logging l = logging.getLogger('angr.engines.soot.expressions.newarray') class SimSootExpr_NewArray(SimSootExpr): def __init__(self, expr, state): super(SimSootExpr_NewArray, self).__init__(expr, state) def _execute(self): base_type = self.expr.base_type size = self._translate_expr(self.expr.size).expr self.expr = self.new_array(self.state, base_type, size) @staticmethod def new_array(state, base_type, size): """ Allocates a new array in memory and returns the reference of the base element. """ size_bounded = SimSootExpr_NewArray._bound_array_size(state, size) # arrays are stored on the heap # => create a unique reference javavm_memory = state.get_javavm_view_of_plugin('memory') heap_alloc_id = "{type}_array_{uuid}".format(type=base_type, uuid=javavm_memory.get_new_uuid()) # return the reference of the base element # => elements as such getting lazy initialized in the javavm memory return SimSootValue_ArrayRef(heap_alloc_id, 0, base_type, size_bounded) @staticmethod def _bound_array_size(state, array_size): # check if array size can exceed MAX_ARRAY_SIZE javavm_memory = state.get_javavm_view_of_plugin('memory') max_array_size = state.solver.BVV(javavm_memory.max_array_size, 32) size_exceeds_maximum = state.solver.eval_upto( max_array_size.SGE(array_size), 2 ) # overwrite size, if it *always* exceeds the maximum if not True in size_exceeds_maximum: l.warning("Array size %s always execeeds maximum. " "It gets overwritten with the maximum %s." % (array_size, max_array_size)) return max_array_size # bound size, if it *can* exceeds the maximum if True in size_exceeds_maximum and\ False in size_exceeds_maximum: l.warning("Array size %s can execeed maximum. " "It gets bounded with the maximum %s." % (array_size, max_array_size)) state.solver.add(max_array_size.SGE(array_size)) return array_size
Python
0.000289
@@ -897,16 +897,28 @@ _id = %22%7B +uuid%7D.%7Bbase_ type%7D_ar @@ -924,15 +924,8 @@ rray -_%7Buuid%7D %22.fo @@ -929,16 +929,21 @@ .format( +base_ type=bas @@ -950,16 +950,21 @@ e_type,%0A +
063acbeb5c95bf8e02aff7cf7f238606d814567a
change billet
totvserprm/financial.py
totvserprm/financial.py
# -*- coding: utf-8 -*- from datetime import datetime from baseapi import BaseApi class Client(BaseApi): dataservername = 'FinCFODataBR' def create(self,**kwargs): return super(Client, self).create({ 'NewDataSet': { 'FCFO': { 'ATIVO': kwargs.get('ativo'), # enviar -1 para que sejá criado de forma incremental 'CODCFO': -1, 'IDCFO': -1, 'CODEXTERNO': kwargs.get('codexterno'), 'CODCOLIGADA': kwargs.get('codcoligada'), 'CGCCFO': kwargs.get('cpf_cnpj'), 'TIPORUA': kwargs.get('tipo_rua'), 'TIPOBAIRRO': kwargs.get('tipo_bairro'), 'BAIRRO': kwargs.get('bairro'), 'RUA': kwargs.get('rua'), 'NUMERO': kwargs.get('numero'), 'CEP': kwargs.get('cep'), 'CODETD': kwargs.get('estado'), 'CIDADE': kwargs.get('cidade'), 'CODMUNICIPIO': kwargs.get('codigo_municipio'), 'PAIS': kwargs.get('cod_pais'), 'DTNASCIMENTO': '{:%Y-%m-%d}T03:00:00.000'.format(kwargs.get('data_nascimento')), 'NOME': kwargs.get('nome'), 'NOMEFANTASIA': kwargs.get('nome'), 'PAGREC': kwargs.get('classificacao'), 'PESSOAFISOUJUR': kwargs.get('categoria'), } } }, 'CODCOLIGADA={}'.format(kwargs.get('codcoligada'))) class Billet(BaseApi): dataservername = 'FinLanBoletoData' def create(self,**kwargs): return super(Billet, self).create({ 'NewDataSet': { 'FLAN': { 'CODCOLIGADA': kwargs.get('codcoligada'), 'IDLAN': kwargs.get('id_lancamento'), 'NUMERODOCUMENTO': -1, 'NFOUDUP': 0, 'CLASSIFICACAO': 0, 'PAGREC': 1, 'STATUSLAN': 0, 'CODTDO': kwargs.get('tipo_documento'), 'DATAVENCIMENTO': kwargs.get('data_vencimento'), 'DATAEMISSAO': "{:%d/%m/%Y %H:%M:%S}".format(datetime.now()), 'VALORORIGINAL': kwargs.get('valor'), 'CODCOLCFO': kwargs.get('codcoligada'), 'CODCFO': kwargs.get('codcliente'), 'CODFILIAL': kwargs.get('codfilial'), 'SERIEDOCUMENTO': kwargs.get('serie_documento'), 'CODCXA': kwargs.get('conta'), 'CODMOEVALORORIGINAL': 'R$', 'NUMLOTECONTABIL': 0, 'NUMEROCONTABIL': 0, 'NUMCONTABILBX': 0, 'TIPOCONTABILLAN': 0, 'FILIALCONTABIL': 1, 'HISTORICO': kwargs.get('historico'), 'CODCCUSTO': kwargs.get('centro_custo'), 'CODTCF': '000', 'CODCOLSACADO': '0' } } }, 'CODCOLIGADA={}'.format(kwargs.get('codcoligada')))
Python
0.000001
@@ -75,16 +75,28 @@ BaseApi%0A +import uuid%0A %0A%0Aclass @@ -1752,34 +1752,30 @@ ' -NewDataSet +FinLAN ': %7B%0A @@ -1888,37 +1888,11 @@ N': -kwargs.get('id_lancamento'), +-1, %0A @@ -1923,26 +1923,41 @@ DOCUMENTO': --1 +str(uuid.uuid4()) ,%0A @@ -2395,33 +2395,9 @@ O': -kwargs.get('codcoligada') +1 ,%0A @@ -2550,37 +2550,13 @@ O': -kwargs.get('serie_documento') +'@@@' ,%0A @@ -2982,57 +2982,8 @@ '),%0A -%09%09 'CODTCF': '000',%0A%09%09 'CODCOLSACADO': '0'%0A @@ -2987,32 +2987,33 @@ %7D +, %0A %7D%0A
e4e10ee0ae5a18cfec0e15b7b85986b7f4fc4f9d
Fix prefetched fields in Institutions in API
feder/institutions/viewsets.py
feder/institutions/viewsets.py
import django_filters from rest_framework import filters, viewsets from teryt_tree.rest_framework_ext.viewsets import custom_area_filter from .models import Institution, Tag from .serializers import InstitutionSerializer, TagSerializer class InstitutionFilter(filters.FilterSet): jst = django_filters.CharFilter(method=custom_area_filter) def __init__(self, *args, **kwargs): super(InstitutionFilter, self).__init__(*args, **kwargs) self.filters['name'].lookup_expr = 'icontains' class Meta: model = Institution fields = ['name', 'tags', 'jst', 'regon'] class InstitutionViewSet(viewsets.ModelViewSet): queryset = (Institution.objects. select_related('jst'). prefetch_related('tags'). all()) serializer_class = InstitutionSerializer filter_backends = (filters.DjangoFilterBackend,) filter_class = InstitutionFilter class TagViewSet(viewsets.ModelViewSet): queryset = Tag.objects.all() serializer_class = TagSerializer
Python
0.000007
@@ -762,16 +762,26 @@ d('tags' +,'parents' ).%0A
9bdaf963843a9f0b44487ea3b258b50b328153d8
Remove redis connection logic from each view, make it global, keep it threadsafe
firetower/web/firetower_web.py
firetower/web/firetower_web.py
from calendar import timegm import datetime import time from flask import Flask, render_template from firetower import redis_util REDIS_HOST = "localhost" REDIS_PORT = 6379 app = Flask(__name__) def timestamp(dttm): return timegm(dttm.utctimetuple()) @app.route("/") def root(): lines = [] redis = redis_util.Redis(REDIS_HOST, REDIS_PORT) categories = redis.get_categories() for cat in categories: lines.append("<li>%s</li>" % cat) return "<ul>%s</ul>" % "\n".join(lines) @app.route("/default/") def default(): redis = redis_util.Redis(REDIS_HOST, REDIS_PORT) cat_dict = redis.conn.hgetall("category_ids") end = datetime.datetime.now() start = end - datetime.timedelta(hours=1) results = [] for cat_id in cat_dict: cat = cat_dict[cat_id] time_series = redis.get_timeseries(cat, timestamp(start), timestamp(end)) items = [(int(x)*1000, int(y)) for x,y in time_series.items()] items.sort(lambda x,y: cmp(x[0], y[0])) results.append( (cat_id, cat, items) ) return render_template( "last_5_index.html", categories = cat_dict.items(), results = results ) @app.route("/aggregate") def aggregate(): redis = redis_util.Redis(REDIS_HOST, REDIS_PORT) cat_dict = redis.conn.hgetall("category_ids") start = end - 300 error_totals = {} for cat_id in cat_dict: cat = cat_dict[cat_id] time_series = redis.get_timeseries(cat, start, end) for time_point in time_series: error_totals[cat_id] = error_totals.get(cat_id, 0) + int(time_point[1]) totals = [] print error_totals for i in error_totals.items(): totals.append((i[0], cat_dict[i[0]], i[1])) return render_template( "aggregate.html", totals = totals) def main(): app.run(debug=True, use_evalex=False, host='0.0.0.0')
Python
0
@@ -170,16 +170,65 @@ T = 6379 +%0AREDIS = redis_util.Redis(REDIS_HOST, REDIS_PORT) %0A%0Aapp = @@ -356,61 +356,8 @@ %5B%5D%0A - redis = redis_util.Redis(REDIS_HOST, REDIS_PORT)%0A @@ -365,29 +365,29 @@ ategories = -redis +REDIS .get_categor @@ -550,61 +550,8 @@ ():%0A - redis = redis_util.Redis(REDIS_HOST, REDIS_PORT)%0A @@ -553,37 +553,37 @@ %0A cat_dict = -redis +REDIS .conn.hgetall(%22c @@ -768,37 +768,37 @@ time_series = -redis +REDIS .get_timeseries( @@ -1182,61 +1182,8 @@ ():%0A - redis = redis_util.Redis(REDIS_HOST, REDIS_PORT)%0A @@ -1193,21 +1193,21 @@ _dict = -redis +REDIS .conn.hg @@ -1355,21 +1355,21 @@ eries = -redis +REDIS .get_tim
447a8bd2ba1dc448d19ca1cafa27f917b4aae18e
version dev
toughradius/__init__.py
toughradius/__init__.py
#!/usr/bin/env python __version__ = '1.1.2' __license__ = 'AGPL' __auther__ = 'support@toughstruct.com'
Python
0.000001
@@ -39,9 +39,12 @@ 1.1. -2 +3dev '%0A__
2b45ccfa122e9b197de1ff8a30d4b6bf9e0abe52
Save figure as results/analyze_graphs.png and rotate z-axis label
scripts/analyze_results.py
scripts/analyze_results.py
"""Tools for viewing and analyzing prediction results .. moduleauthor:: Jan Van Bruggen <jancvanbruggen@gmail.com> .. moduleauthor:: Quinn Osha <qosha@caltech.edu> """ from fnmatch import fnmatch from matplotlib import cm from matplotlib.ticker import MaxNLocator import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import proj3d from os.path import abspath, dirname, join from os import listdir import sys sys.path.append(abspath(dirname(dirname(__file__)))) from utils.data_paths import RESULTS_DIR_PATH def graph_rmse_surface(algorithm_name, train_name, test_name, max_epochs, min_features, max_features): desired_file_paths = [] file_name_pattern = ('{alg}_{train}_*epochs_*features_rmse_{test}_*.txt' .format(alg=algorithm_name, train=train_name, test=test_name)) for file_name in listdir(RESULTS_DIR_PATH): if fnmatch(file_name, file_name_pattern): file_name_parts = split_results_file_name(file_name) file_name_epochs = file_name_parts['epochs'] file_name_features = file_name_parts['features'] result = file_name_parts['result'] if (result == 'rmse' and file_name_epochs == max_epochs and min_features <= file_name_features <= max_features): file_path = join(RESULTS_DIR_PATH, file_name) desired_file_paths.append(file_path) points = [] for file_path in desired_file_paths: file_name_parts = split_results_file_name(file_path) features = file_name_parts['features'] rmse_values = [] with open(file_path) as rmse_file: for line in rmse_file: rmse_value = float(line.strip()) rmse_values.append(rmse_value) if len(rmse_values) == 1: points.append((max_epochs, features, rmse_values[0])) else: epoch = 0 for rmse_value in rmse_values: epoch += 1 points.append((epoch, features, rmse_value)) figure = plt.figure() axes = figure.add_subplot(111, projection='3d') x = [value[0] for value in points] y = [value[1] for value in points] z = [value[2] for value in points] axes.set_xlim(min(x), max(x)) axes.set_ylim(min(y), max(y)) axes.set_zlim(int(min(z) * 100) / 100, int(max(z) * 100 + 1) / 100) axes.get_xaxis().set_major_locator(MaxNLocator(integer=True)) axes.get_yaxis().set_major_locator(MaxNLocator(integer=True)) min_rmse_index = z.index(min(z)) min_rmse_x = x[min_rmse_index] min_rmse_y = y[min_rmse_index] min_rmse_z = z[min_rmse_index] min_rmse_color = '#00DD00' axes.plot([min_rmse_x] * 2, axes.get_ylim(), zs=[axes.get_zlim()[0]] * 2, color=min_rmse_color, ls=':') axes.plot(axes.get_xlim(), [min_rmse_y] * 2, zs=[axes.get_zlim()[0]] * 2, color=min_rmse_color, ls=':') axes.plot([min_rmse_x] * 2, [min_rmse_y] * 2, zs=[axes.get_zlim()[0], min_rmse_z], color=min_rmse_color, ls=':') if len(set(x)) == 1 or len(set(y)) == 1: axes.plot(x, y, z) else: axes.plot_trisurf(x, y, z, cmap=cm.CMRmap, linewidth=0) axes.set_title('Results from training on {train}, testing on {test}' .format(train=train_name, test=test_name)) axes.set_xlabel('Number of Epochs') axes.set_ylabel('Number of Features') axes.set_zlabel('RMSE') xp, yp, _ = proj3d.proj_transform(min_rmse_x, min_rmse_y, min_rmse_z, axes.get_proj()) label = axes.annotate( '{:.3g}'.format(min_rmse_z), xy=(xp, yp), xytext = (-20, 40), textcoords = 'offset points', ha = 'right', va = 'bottom', bbox = dict(boxstyle='round,pad=0.5', fc=min_rmse_color, alpha=0.5), arrowprops = dict(arrowstyle='->', connectionstyle='arc3,rad=0', color=min_rmse_color)) def update_position(e): xp, yp, _ = proj3d.proj_transform(min_rmse_x, min_rmse_y, min_rmse_z, axes.get_proj()) label.xy = xp, yp label.update_positions(figure.canvas.renderer) figure.canvas.draw() figure.canvas.mpl_connect('button_release_event', update_position) plt.show() def lowest_rmse(file_name): rmse_values = [] rmse_file_path = join(RESULTS_DIR_PATH, file_name) with open(rmse_file_path, 'r') as rmse_file: for line in rmse_file: rmse_value = line.strip(); rmse_values.append(rmse_value) return min(rmse_values) def split_results_file_name(file_name): algorithm, train, epochs, features, result, test, *_ = file_name.split('_') parts = {'algorithm': algorithm, 'epochs': int(epochs[:epochs.index('e')]), 'features': int(features[:features.index('f')]), 'result': result, 'test': test, 'train': train} return parts if __name__ == '__main__': graph_rmse_surface(algorithm_name='svd', train_name='base', test_name='valid', max_epochs=8, min_features=40, max_features=60)
Python
0
@@ -257,16 +257,33 @@ NLocator +, MultipleLocator %0Aimport @@ -3501,16 +3501,17 @@ el('RMSE + ')%0A%0A @@ -4345,16 +4345,79 @@ sition)%0A + plt.savefig(join(RESULTS_DIR_PATH, 'analyze_results.png'))%0A plt.
1c48f9ad2c2a66d7c15c9216665b7f802d3498b4
Set deprecation_summary_result so we can summarize deprecations and they get written to the report plist if specified.
SharedProcessors/DeprecationWarning.py
SharedProcessors/DeprecationWarning.py
#!/usr/bin/python # # Copyright 2019 Greg Neagle # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor that outputs a warning message. Intended to alert recipe users of upcoming removal of a recipe.""" from autopkglib import Processor __all__ = ["DeprecationWarning"] class DeprecationWarning(Processor): """This processor outputs a warning that the recipe has been deprecated.""" input_variables = { "warning_message": { "required": False, "description": "Warning message to output.", }, } output_variables = { } description = __doc__ def main(self): warning_message = self.env.get( "warning_message", "### This recipe has been deprecated. It may be removed soon. ###" ) self.output(warning_message, verbose_level=0) if __name__ == '__main__': PROCESSOR = DeprecationWarning() PROCESSOR.execute_shell()
Python
0
@@ -701,16 +701,27 @@ pe.%22%22%22%0A%0A +import os%0A%0A from aut @@ -1078,24 +1078,139 @@ riables = %7B%0A + %22deprecation_summary_result%22: %7B%0A %22description%22: %22Description of interesting results.%22%0A %7D%0A %7D%0A de @@ -1450,26 +1450,483 @@ sage -, verbose_level=0) +)%0A recipe_name = os.path.basename(self.env%5B'RECIPE_PATH'%5D)%0A if recipe_name.endswith('.recipe'):%0A recipe_name = os.path.splitext(recipe_name)%5B0%5D%0A self.env%5B%22deprecation_summary_result%22%5D = %7B%0A 'summary_text': 'The following recipes have deprecation warnings:',%0A 'report_fields': %5B'name', 'warning'%5D,%0A 'data': %7B%0A 'name': recipe_name,%0A 'warning': warning_message%0A %7D%0A %7D %0A%0A%0Ai
1e139567767a98914df90ec152d543bb8bfde38c
add test
basic_zappa_project/public/views_tests.py
basic_zappa_project/public/views_tests.py
from basic_zappa_project.test_utils import BaseTestCase class TestViews(BaseTestCase): def test_status(self): expected = {'status': 'ok'} response = self.client.get('/status') self.assert200(response) self.assertEqual(response.json, expected) def test_about(self): response = self.client.get('/about') self.assert200(response) def test_home_get(self): response = self.client.get('/') self.assert200(response)
Python
0.000002
@@ -463,28 +463,144 @@ self.assert200(response)%0A +%0A def test_register_get(self):%0A response = self.client.get('/register')%0A self.assert200(response)%0A%0A
a82fc92938a647de620cf8a96fd5907c08060c32
fix mistake
scripts/install/install.py
scripts/install/install.py
import os import subprocess import os.path def apt_get_install(what): with open(fname, 'r') as f: items = f.readlines() for item in items: os.system('sudo apt-get install -y %s' % (item)) def npm_global_install(what): with open(fname, 'r') as f: items = f.readlines() for item in items: os.system('sudo npm -g install %s' % (item)) def pip_install(what): with open(fname, 'r') as f: items = f.readlines() for item in items: os.system('sudo pip install %s' % (item)) def cmd_exists(cmd): # this is from http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python return subprocess.call("type " + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0 functions_to_handle_requirements = {} functions_to_handle_requirements['apt_get'] = apt_get_install functions_to_handle_requirements['npm'] = npm_global_install functions_to_handle_requirements['pip'] = pip_install order_of_files_to_handle = ['apt_get_requirements.txt', 'npm_requirements.txt', 'pip_requirements.txt'] for fname in order_of_files_to_handle: if os.path.isfile(fname): # assume fname endswith _requirements.txt l = len('_requirements.txt') fname_first_part = fname[:-l] functions_to_handle_requirements[fname_first_part](fname)
Python
0.999908
@@ -49,36 +49,37 @@ apt_get_install( -what +fname ):%0A with open @@ -223,36 +223,37 @@ _global_install( -what +fname ):%0A with open @@ -398,12 +398,13 @@ all( -what +fname ):%0A
5179172b4a6d61ea60fec9cd7624725031017482
Make use of the sqlite3 package
dump.py
dump.py
#!/usr/bin/env python import glob, os, subprocess, sys sys.path.append(os.path.abspath("csv2sqlite")) import csv2sqlite setup_sql = { "job_events": """ DROP TABLE IF EXISTS `job_events`; CREATE TABLE `job_events` ( `time` INTEGER NOT NULL, `missing info` INTEGER, `job ID` INTEGER NOT NULL, `event type` INTEGER NOT NULL, `user` TEXT, `scheduling class` INTEGER, `job name` TEXT, `logical job name` TEXT ); """, "task_events": """ DROP TABLE IF EXISTS `task_events`; CREATE TABLE `task_events` ( `time` INTEGER NOT NULL, `missing info` INTEGER, `job ID` INTEGER NOT NULL, `task index` INTEGER NOT NULL, `machine ID` INTEGER, `event type` INTEGER NOT NULL, `user` TEXT, `scheduling class` INTEGER, `priority` INTEGER NOT NULL, `CPU request` REAL, `memory request` REAL, `disk space request` REAL, `different machines restriction` INTEGER ); """, "task_usage": """ DROP TABLE IF EXISTS `task_usage`; CREATE TABLE `task_usage` ( `start time` INTEGER NOT NULL, `end time` INTEGER NOT NULL, `job ID` INTEGER NOT NULL, `task index` INTEGER NOT NULL, `machine ID` INTEGER NOT NULL, `CPU rate` REAL, `canonical memory usage` REAL, `assigned memory usage` REAL, `unmapped page cache` REAL, `total page cache` REAL, `maximum memory usage` REAL, `disk IO time` REAL, `local disk space usage` REAL, `maximum CPU rate` REAL, `maximum disk IO time` REAL, `cycles per instruction` REAL, `memory accesses per instruction` REAL, `sample portion` REAL, `aggregation type` INTEGER, `sampled CPU usage` REAL ); """ } def fail(message): print(message) sys.exit(1) def setup_sqlite(table): filename = 'google.sqlite3' if not table in setup_sql: fail('the table is unknown') sql = setup_sql[table] p = subprocess.Popen(['sqlite3', filename], stdin=subprocess.PIPE) p.communicate(input=bytes(sql)) p.wait() if p.returncode != 0: fail('cannot set up the database') return filename def find_parts(table): return sorted(glob.glob(os.path.join(table, '*.csv.gz'))) for table in sys.argv[1:]: sqlite = setup_sqlite(table) headers = 'headers/%s.csv' % table types = 'types/%s.csv' % table for csv in find_parts(table): print('Processing %s...' % csv) csv2sqlite.convert(csv, sqlite, table, headers, 'gzip', types)
Python
0.000001
@@ -34,25 +34,22 @@ b, os, s -ubprocess +qlite3 , sys%0A%0As @@ -2201,73 +2201,94 @@ -if not table in setup_sql: fail('the table is unknown' +connection = sqlite3.connect(filename)%0A cursor = connection.cursor( )%0A +for sql -= +in set @@ -2304,188 +2304,94 @@ ble%5D -%0A p = subprocess.Popen(%5B'sqlite3', filename%5D, stdin=subprocess.PIPE)%0A p.communicate(input=bytes(sql))%0A p.wait()%0A if p.returncode != 0: fail('cannot set up the database' +.split(';'):%0A cursor.execute(sql)%0A connection.commit()%0A connection.close( )%0A
72169058552ffd783c2ab309e5a77ff817928186
make sub dirs for fastq_screen
ehive/runnable/process/RunFastqscreen.py
ehive/runnable/process/RunFastqscreen.py
import os, subprocess,fnmatch from shutil import copytree from ehive.runnable.IGFBaseProcess import IGFBaseProcess from igf_data.utils.fileutils import get_temp_dir,remove_dir class RunFastqscreen(IGFBaseProcess): def param_defaults(self): params_dict=super(IGFBaseProcess,self).param_defaults() params_dict.update({ 'force_overwrite':True, 'tag':None, 'fastqscreen_dir_label':'fastqscreen', 'fastqscreen_exe':'fastqscreen', 'fastqscreen_conf':None, 'fastqscreen_options':{'--aligner':'bowtie2', '--force':'', '--quiet':'', '--subset':'100000', '--threads':'1'}, }) return params_dict def run(self): try: fastq_file=self.param_required('fastq_file') seqrun_igf_id=self.param_required('seqrun_igf_id') base_results_dir=self.param_required('base_results_dir') project_name=self.param_required('project_name') seqrun_date=self.param_required('seqrun_date') flowcell_id=self.param_required('flowcell_id') fastqscreen_exe=self.param_required('fastqscreen_exe') fastqscreen_conf=self.param_required('fastqscreen_conf') tag=self.param_required('tag') fastqscreen_options=self.param('fastqscreen_options') force_overwrite=self.param('force_overwrite') fastqscreen_dir_label=self.param('fastqscreen_dir_label') lane_index_info=os.path.basename(os.path.dirname(fastq_file)) # get the lane and index length info fastq_file_label=os.path.basename(fastq_file).replace('.fastq.gz','') fastqscreen_result_dir=os.path.join(base_results_dir, \ project_name, \ fastqscreen_dir_label, \ seqrun_date, \ flowcell_id, \ lane_index_info,\ tag,\ fastq_file_label) # result dir path is generic if os.path.exists(fastqscreen_result_dir) and force_overwrite: remove_dir(fastqscreen_result_dir) # remove existing output dir if force_overwrite is true if not os.path.exists(fastqscreen_result_dir): os.mkdir(fastqscreen_result_dir) # create output dir if its not present temp_work_dir=get_temp_dir() # get a temp work dir if not os.path.exists(fastq_file): raise IOError('fastq file {0} not readable'.format(fastq_file)) # raise if fastq file path is not readable filename=os.path.basename(fastq_file).replace('.fastq.gz','') # get fastqfile base name and remove file ext fastqscreen_output=os.path.join(temp_work_dir,filename) os.mkdir(fastqscreen_output) # create fastqc output dir fastqscreen_param=self.format_tool_options(fastqscreen_options) # format fastqc params fastqscreen_cmd=[fastqscreen_exe, '-conf',fastqscreen_conf, '--outdir',fastqscreen_output, ] # fastqscreen base parameters fastqscreen_cmd.extend(fastqscreen_param) # add additional parameters fastqscreen_cmd.extend(fastq_file) # fastqscreen input file subprocess.check_call(fastqscreen_cmd) # run fastqscreen copytree(fastqscreen_output,fastqscreen_result_dir) # copy fastqscreen output files fastqscreen_stat=None fastqscreen_html=None fastqscreen_png=None for root,dirs,files in os.walk(top=fastqscreen_result_dir): for file in files: if fnmatch.fnmatch(file, '*.txt'): fastqscreen_stat=os.path.join(root,file) if fnmatch.fnmatch(file, '*.html'): fastqscreen_html=os.path.join(root,file) if fnmatch.fnmatch(file, '*.png'): fastqscreen_png=os.path.join(root,file) self.param('dataflow_params',{'fastqscreen_html':fastqscreen_html, \ 'fastqscreen': \ {'fastqscreen_path':fastqscreen_result_dir, 'fastqscreen_stat':fastqscreen_stat, 'fastqscreen_html':fastqscreen_html, 'fastqscreen_png':fastqscreen_png}}) # set dataflow params except Exception as e: message='seqrun: {2}, Error in {0}: {1}'.format(self.__class__.__name__, \ e, \ seqrun_igf_id) self.warning(message) self.post_message_to_slack(message,reaction='fail') # post msg to slack for failed jobs raise
Python
0
@@ -2450,36 +2450,39 @@ r):%0A os.m -k +ake dir +s (fastqscreen_res @@ -2484,39 +2484,36 @@ n_result_dir -) +,mode=0o775)
7f22812917846dbc420eee8c80cf3a4ee7d2fc1c
fix typo in tag (#618)
scripts/publish_release.py
scripts/publish_release.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """Insert a TOML header into the latest release note.""" from __future__ import absolute_import, print_function import sys from datetime import date from glob import glob from builtins import open from os.path import join, basename from shutil import copy def insert_break(lines, break_pos=9): """ Insert a <!-- more --> tag for larger release notes. Parameters ---------- lines : list of str The content of the release note. break_pos : int Line number before which a break should approximately be inserted. Returns ------- list of str The text with the inserted tag or no modification if it was sufficiently short. """ def line_filter(line): if len(line) == 0: return True return any(line.startswith(c) for c in "-*+") if len(lines) <= break_pos: return lines newlines = [ i for i, line in enumerate(lines[break_pos:], start=break_pos) if line_filter(line.strip())] if len(newlines) > 0: break_pos = newlines[0] lines.insert(break_pos, "<!-- more -->\n") return lines def build_hugo_md(filename, tag, bump): """ Build the markdown release notes for Hugo. Inserts the required TOML header with specific values and adds a break for long release notes. Parameters ---------- filename : str, path The release notes file. tag : str The tag, following semantic versioning, of the current release. bump : {"major", "minor", "patch", "alpha", "beta"} The type of release. """ header = [ '+++\n', 'date = "{}"\n'.format(date.today().isoformat()), 'title = "{}"\n'.format(tag), 'author = "The COBRApy Team"\n', 'release = "{}"\n'.format(bump), '+++\n', '\n' ] with open(filename, "r") as file_h: content = insert_break(file_h.readlines()) header.extend(content) with open(filename, "w") as file_h: file_h.writelines(header) def intify(filename): """ Turn a release note filename into something sortable. Parameters ---------- filename : str A release note of expected filename format '<major>.<minor>.<patch>.md'. Returns ------- tuple A pair of the major and minor versions as integers. """ tmp = filename[:-3].split(".") return int(tmp[0]), int(tmp[1]) def find_bump(target, tag): """Identify the kind of release by comparing to existing ones.""" tmp = tag.split(".") existing = [intify(basename(f)) for f in glob(join(target, "[0-9]*.md"))] latest = max(existing) if int(tmp[0]) > latest[0]: return "major" elif int(tmp[1]) > latest[1]: return "minor" else: return "patch" def main(argv): """ Identify the release type and create a new target file with TOML header. Requires three arguments. """ source, target, tag = argv if "a" in tag: bump = "alpha" if "b" in tag: bump = "beta" else: bump = find_bump(target, tag) filename = "{}.md".format(tag) destination = copy(join(source, filename), target) build_hugo_md(destination, tag, bump) if __name__ == "__main__": if len(sys.argv) != 4: print("Usage:\n{} <source dir> <target dir> <tag>" "".format(sys.argv[0])) sys.exit(2) sys.exit(main(sys.argv[1:]))
Python
0.000048
@@ -363,22 +363,20 @@ t a %3C!-- - more - --%3E tag @@ -1144,14 +1144,12 @@ %3C!-- - more - --%3E%5C
19c0e8d856049677bc7de2bc293a87a0aac306f8
Fix wsgi config file access for HTTPD
httpd/keystone.py
httpd/keystone.py
import os from paste import deploy from keystone import config from keystone.common import logging LOG = logging.getLogger(__name__) CONF = config.CONF config_files = ['/etc/keystone.conf'] CONF(config_files=config_files) conf = CONF.config_file[0] name = os.path.basename(__file__) if CONF.debug: CONF.log_opt_values(logging.getLogger(CONF.prog), logging.DEBUG) options = deploy.appconfig('config:%s' % CONF.config_file[0]) application = deploy.loadapp('config:%s' % conf, name=name)
Python
0.000001
@@ -178,16 +178,25 @@ keystone +/keystone .conf'%5D%0A @@ -200,16 +200,45 @@ '%5D%0ACONF( +project='keystone', default_ config_f
c26c944b349aaaedae2e848946b00b4b2f5f3e13
Fix bug in Podoc
podoc/core.py
podoc/core.py
# -*- coding: utf-8 -*- """Core functionality.""" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ import logging import os.path as op from .plugin import get_plugin logger = logging.getLogger(__name__) #------------------------------------------------------------------------------ # Utility functions #------------------------------------------------------------------------------ def open_text(path): with open(path, 'r') as f: return f.read() def save_text(path, contents): with open(path, 'w') as f: return f.write(contents) #------------------------------------------------------------------------------ # Main class #------------------------------------------------------------------------------ class Podoc(object): """Conversion pipeline for markup documents. This class implements the core conversion functionality of podoc. """ opener = open_text preprocessors = None reader = None filters = None writer = None postprocessors = None saver = save_text def __init__(self): if self.preprocessors is None: self.preprocessors = [] if self.filters is None: self.filters = [] if self.postprocessors is None: self.postprocessors = [] # Individual stages # ------------------------------------------------------------------------- def open(self, path): """Open a file and return an object.""" assert self.opener is not None return self.opener(path) def save(self, path, contents): """Save contents to a file.""" assert self.saver is not None return self.saver(path, contents) def preprocess(self, contents): """Apply preprocessors to contents.""" for p in self.preprocessors: contents = p(contents) return contents def read(self, contents): """Read contents to an AST.""" if self.reader is None: raise RuntimeError("No reader has been set.") assert self.reader is not None ast = self.reader(contents) return ast def filter(self, ast): """Apply filters to an AST.""" for f in self.filters: ast = f(ast) return ast def write(self, ast): """Write an AST to contents.""" if self.writer is None: raise RuntimeError("No writer has been set.") assert self.writer is not None converted = self.writer(ast) return converted def postprocess(self, contents): """Apply postprocessors to contents.""" for p in self.postprocessors: contents = p(contents) return contents # Partial conversion methods # ------------------------------------------------------------------------- def read_contents(self, contents): """Read contents and return an AST. Preprocessors -> Reader. """ contents = self.preprocess(contents) ast = self.read(contents) return ast def read_file(self, from_path): """Read a file and return an AST. Opener -> Preprocessors -> Reader. """ contents = self.open(from_path) return self.read_contents(contents) def write_contents(self, ast): """Write an AST to contents. Writer -> Postprocessors. """ converted = self.write(ast) converted = self.postprocess(converted) return converted def write_file(self, to_path, ast): """Write an AST to a file. Writer -> Postprocessors -> Saver. """ converted = self.write_contents(ast) return self.save(to_path, converted) if to_path else converted # Complete conversion methods # ------------------------------------------------------------------------- def convert_file(self, from_path, to_path=None): """Convert a file.""" contents = self.open(from_path) converted = self.convert_contents(contents) return self.save(to_path, converted) if to_path else converted def convert_contents(self, contents): """Convert contents without writing files.""" ast = self.read_contents(contents) ast = self.filter(ast) converted = self.write_contents(ast) return converted # Pipeline configuration # ------------------------------------------------------------------------- def set_opener(self, func): """An Opener is a function `str (path)` -> `str (or object)`. The output may be a string or another type of object, like a file handle, etc. """ self.opener = func return self def add_preprocessor(self, func): self.preprocessors.append(func) return self def set_reader(self, func): """A reader is a function `str (or object)` -> `ast`. The input corresponds to the output of the file opener. """ self.reader = func return self def add_filter(self, func): self.filters.append(func) return self def set_writer(self, func): """A reader is a function `ast` -> `str (or object)`. The output corresponds to the input of the file saver. """ self.writer = func return self def add_postprocessor(self, func): self.postprocessors.append(func) return self def set_saver(self, func): """A Saver is a function `str (path), str (or object) -> None`. The second input corresponds to the output of the writer. """ self.saver = func return self # Plugins # ------------------------------------------------------------------------- _from_steps = ('opener', 'preprocessors', 'reader') _to_steps = ('writer', 'postprocessors', 'saver') _all_steps = _from_steps + _to_steps + ('filters',) def attach(self, plugin, steps=None): """Attach a plugin with the specified steps. Parameters ---------- plugin : IPlugin class The plugin to attach to the current pipeline. steps : str or list List of pipeline steps to set with the plugin. The list of accepted steps is: `opener`, `preprocessors`, `reader`, `filters`, `writer`, `postprocessors`, `saver`. There are also two aliases: `from` refers to the first three steps, `to` to the last three. """ # By default, attach all steps. if steps is None: steps = self._all_steps if steps == 'from': steps = self._from_steps elif steps == 'to': steps = self._to_steps assert set(steps) <= set(self._all_steps) plugin().attach(self, steps) return self #------------------------------------------------------------------------------ # Misc functions #------------------------------------------------------------------------------ def open_file(path, plugin_name=None): """Open a file using a given plugin. If no plugin is specified, the file extension is used to find the appropriate plugin. """ if plugin_name is None: search = op.splitext(path)[1] else: search = plugin_name plugin = get_plugin(search) assert plugin return Podoc().attach(plugin, ['opener']).open(path)
Python
0
@@ -1018,25 +1018,20 @@ pener = -open_text +None %0A pre @@ -1141,25 +1141,20 @@ saver = -save_text +None %0A%0A de @@ -1171,16 +1171,150 @@ (self):%0A + if self.opener is None:%0A self.opener = open_text%0A if self.saver is None:%0A self.saver = save_text%0A
6a1aa8e6b640e0c6504cdbb9635f88ec9929cd7d
Update to latest config format.
CheckSTARTTLS.py
CheckSTARTTLS.py
#!/usr/bin/python import sys import os import errno import smtplib import socket import subprocess import re import json import dns.resolver from M2Crypto import X509 from publicsuffix import PublicSuffixList public_suffix_list = PublicSuffixList() def mkdirp(path): try: os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def extract_names(pem): """Return a list of DNS subject names from PEM-encoded leaf cert.""" leaf = X509.load_cert_string(pem, X509.FORMAT_PEM) subj = leaf.get_subject() # Certs have a "subject" identified by a Distingushed Name (DN). # Host certs should also have a Common Name (CN) with a DNS name. common_names = subj.get_entries_by_nid(subj.nid['CN']) common_names = [name.get_data().as_text() for name in common_names] try: # The SAN extension allows one cert to cover multiple domains # and permits DNS wildcards. # http://www.digicert.com/subject-alternative-name.htm # The field is a comma delimited list, e.g.: # >>> twitter_cert.get_ext('subjectAltName').get_value() # 'DNS:www.twitter.com, DNS:twitter.com' alt_names = leaf.get_ext('subjectAltName').get_value() alt_names = alt_names.split(', ') alt_names = [name.partition(':') for name in alt_names] alt_names = [name for prot, _, name in alt_names if prot == 'DNS'] except: alt_names = [] return set(common_names + alt_names) def tls_connect(mx_host, mail_domain): """Attempt a STARTTLS connection with openssl and save the output.""" if supports_starttls(mx_host): # smtplib doesn't let us access certificate information, # so shell out to openssl. try: output = subprocess.check_output( """openssl s_client \ -starttls smtp -connect %s:25 -showcerts </dev/null \ 2>/dev/null """ % mx_host, shell = True) except subprocess.CalledProcessError: print "Failed s_client" return # Save a copy of the certificate for later analysis with open(os.path.join(mail_domain, mx_host), "w") as f: f.write(output) def valid_cert(filename): """Return true if the certificate is valid. Note: CApath must have hashed symlinks to the trust roots. TODO: Include the -attime flag based on file modification time.""" if open(filename).read().find("-----BEGIN CERTIFICATE-----") == -1: return False try: # The file contains both the leaf cert and any intermediates, so we pass it # as both the cert to validate and as the "untrusted" chain. output = subprocess.check_output("""openssl verify -CApath /home/jsha/mozilla/ -purpose sslserver \ -untrusted "%s" \ "%s" """ % (filename, filename), shell = True) return True except subprocess.CalledProcessError: return False def check_certs(mail_domain): names = set() for mx_hostname in os.listdir(mail_domain): filename = os.path.join(mail_domain, mx_hostname) if not valid_cert(filename): return "" else: new_names = extract_names_from_openssl_output(filename) new_names = map(lambda n: public_suffix_list.get_public_suffix(n), new_names) names.update(new_names) if len(names) >= 1: # Hack: Just pick an arbitrary suffix for now. Do something cleverer later. return names.pop() else: return "" def common_suffix(hosts): num_components = min(len(h.split(".")) for h in hosts) longest_suffix = "" for i in range(1, num_components + 1): suffixes = set(".".join(h.split(".")[-i:]) for h in hosts) if len(suffixes) == 1: longest_suffix = suffixes.pop() else: return longest_suffix return longest_suffix def extract_names_from_openssl_output(certificates_file): openssl_output = open(certificates_file, "r").read() cert = re.findall("-----BEGIN CERTIFICATE-----.*?-----END CERTIFICATE-----", openssl_output, flags = re.DOTALL) return extract_names(cert[0]) def supports_starttls(mx_host): try: smtpserver = smtplib.SMTP(mx_host, 25, timeout = 2) smtpserver.ehlo() smtpserver.starttls() return True print "Success: %s" % mx_host except socket.error as e: print "Connection to %s failed: %s" % (mx_host, e.strerror) return False except smtplib.SMTPException: print "No STARTTLS support on %s" % mx_host return False def min_tls_version(mail_domain): protocols = [] for mx_hostname in os.listdir(mail_domain): filename = os.path.join(mail_domain, mx_hostname) contents = open(filename).read() protocol = re.findall("Protocol : (.*)", contents)[0] protocols.append(protocol) return min(protocols) def collect(mail_domain): print "Checking domain %s" % mail_domain mkdirp(mail_domain) answers = dns.resolver.query(mail_domain, 'MX') for rdata in answers: mx_host = str(rdata.exchange).rstrip(".") tls_connect(mx_host, mail_domain) if __name__ == '__main__': """Consume a target list of domains and output a configuration file for those domains.""" if len(sys.argv) == 1: print("Usage: CheckSTARTTLS.py list-of-domains.txt > output.json") config = { "address-domains": { }, "mx-domains": { } } for domain in open(sys.argv[1]).readlines(): domain = domain.strip() if not os.path.exists(domain): collect(domain) if len(os.listdir(domain)) == 0: continue suffix = check_certs(domain) min_version = min_tls_version(domain) if suffix != "": suffix_match = "." + suffix config["address-domains"][domain] = { "accept-mx-domains": [suffix_match] } config["mx-domains"][suffix_match] = { "require-tls": True, "min-tls-version": min_version } print json.dumps(config, indent=2, sort_keys=True)
Python
0
@@ -113,16 +113,35 @@ ort json +%0Aimport collections %0A%0Aimport @@ -5288,71 +5288,38 @@ g = -%7B%0A %22address-domains%22: %7B%0A %7D,%0A %22mx-domains%22: %7B%0A %7D%0A %7D +collections.defaultdict(dict)%0A %0A f @@ -5648,21 +5648,20 @@ g%5B%22a -ddress-domain +cceptable-mx s%22%5D%5B @@ -5738,25 +5738,27 @@ config%5B%22 -mx-domain +tls-policie s%22%5D%5Bsuff
b7e3901411059bbfa8ab83ec1f6fbf135aa50172
Update UserTime.py
Cogs/UserTime.py
Cogs/UserTime.py
import datetime import pytz from Cogs import FuzzySearch def setup(bot): # This module isn't actually a cog return def getUserTime(member, settings, time = None, strft = "%Y-%m-%d %I:%M %p"): # Returns a dict representing the time from the passed member's perspective offset = settings.getGlobalUserStat(member, "TimeZone") if offset == None: offset = settings.getGlobalUserStat(member, "UTCOffset") if offset == None: # No offset or tz - return UTC return { "zone" : 'UTC', "time" : time.strftime(strft) } # At this point - we need to determine if we have an offset - or possibly a timezone passed t = getTimeFromTZ(offset, time) if t == None: # We did not get a zone t = getTimeFromOffset(offset, time) return t def getTimeFromOffset(offset, t = None, strft = "%Y-%m-%d %I:%M %p"): offset = offset.replace('+', '') # Split time string by : and get hour/minute values try: hours, minutes = map(int, offset.split(':')) except Exception: try: hours = int(offset) minutes = 0 except Exception: return None # await ctx.channel.send('Offset has to be in +-H:M!') # return msg = 'UTC' # Get the time if t == None: t = datetime.datetime.utcnow() # Apply offset if hours > 0: # Apply positive offset msg += '+{}'.format(offset) td = datetime.timedelta(hours=hours, minutes=minutes) newTime = t + td elif hours < 0: # Apply negative offset msg += '{}'.format(offset) td = datetime.timedelta(hours=(-1*hours), minutes=(-1*minutes)) newTime = t - td else: # No offset newTime = t return { "zone" : msg, "time" : newTime.strftime(strft) } def getTimeFromTZ(tz, t = None, strft = "%Y-%m-%d %I:%M %p"): # Assume sanitized zones - as they're pulled from pytz # Let's get the timezone list tz_list = FuzzySearch.search(tz, pytz.all_timezones, None, 3) if not tz_list[0]['Ratio'] == 1: # We didn't find a complete match return None zone = pytz.timezone(tz_list[0]['Item']) if t == None: zone_now = datetime.datetime.now(zone) else: zone_now = pytz.utc.localize(t, is_dst=None).astimezone(zone) #zone_now = t.astimezone(zone) return { "zone" : tz_list[0]['Item'], "time" : zone_now.strftime(strft) }
Python
0.000001
@@ -121,24 +121,444 @@ return%0D%0A%0D%0A +def getClockForTime(time_string):%0D%0A%09# Assumes a HH:MM PP format%0D%0A%09try:%0D%0A%09%09time = time_string.split()%0D%0A%09%09time = time%5B0%5D.split(%22:%22)%0D%0A%09%09hour = int(time%5B0%5D)%0D%0A%09%09minute = int(time%5B1%5D)%0D%0A%09except:%0D%0A%09%09return %22%22%0D%0A%09clock_string = %22%22%0D%0A%09if min %3E 44:%0D%0A%09%09clock_string = str(hour + 1) if hour %3C 12 else %221%22%0D%0A%09elif min %3E 14:%0D%0A%09%09clock_string = str(hour) + %2230%22%0D%0A%09else:%0D%0A%09%09clock_string = str(hour)%0D%0A%09return %22:clock%22 + clock_string + %22:%22%0D%0A%0D%0A def getUserT @@ -615,24 +615,38 @@ %25d %25I:%25M %25p%22 +, clock = True ):%0D%0A%09# Retur @@ -909,16 +909,97 @@ rn UTC%0D%0A +%09%09t = getClockForTime(time.strftime(strft)) if clock, else time.strftime(strft)%0D%0A %09%09return @@ -1027,35 +1027,16 @@ ime%22 : t -ime.strftime(strft) %7D%0D%0A%09%09%0D%0A @@ -1156,24 +1156,31 @@ offset, time +, clock )%0D%0A%09if t == @@ -1249,16 +1249,23 @@ et, time +, clock )%0D%0A%09retu @@ -1337,24 +1337,38 @@ %25d %25I:%25M %25p%22 +, clock = True ):%0D%0A%09offset @@ -2134,63 +2134,129 @@ t%0D%0A%09 -return %7B %22zone%22 : msg, %22time%22 : newTime.strftime(strft) +ti = getClockForTime(newTime.strftime(strft)) if clock, else newTime.strftime(strft)%0D%0A%09return %7B %22zone%22 : msg, %22time%22 : ti %7D%0D%0A @@ -2318,16 +2318,30 @@ I:%25M %25p%22 +, clock = True ):%0D%0A%09# A @@ -2784,16 +2784,105 @@ (zone)%0D%0A +%09ti = getClockForTime(zone_now.strftime(strft)) if clock, else zone_now.strftime(strft)%0D%0A %09return @@ -2925,32 +2925,9 @@ %22 : -zone_now.strftime(strft) +ti %7D%0D%0A
878d690cad9bac7009deab554fe070c1dfc3a1d8
Update UserTime.py
Cogs/UserTime.py
Cogs/UserTime.py
import datetime import pytz from Cogs import FuzzySearch def setup(bot): # This module isn't actually a cog return def getClockForTime(time_string): # Assumes a HH:MM PP format print("Clock called: " + time_string) try: time = time_string.split(" ") if len(time) == 2: time = time[0].split(":") elif len(time) == 3: time = time[1].split(":") else: return time_string hour = int(time[0]) minute = int(time[1]) except: return time_string print(hour, minute) clock_string = "" if min > 44: clock_string = str(hour + 1) if hour < 12 else "1" elif min > 14: clock_string = str(hour) + "30" else: clock_string = str(hour) print(clock_string) return time_string +" :clock" + clock_string + ":" def getUserTime(member, settings, time = None, strft = "%Y-%m-%d %I:%M %p", clock = True): # Returns a dict representing the time from the passed member's perspective offset = settings.getGlobalUserStat(member, "TimeZone") if offset == None: offset = settings.getGlobalUserStat(member, "UTCOffset") if offset == None: # No offset or tz - return UTC if clock: t = getClockForTime(time.strftime(strft)) else: t = time.strftime(strft) return { "zone" : 'UTC', "time" : t } # At this point - we need to determine if we have an offset - or possibly a timezone passed t = getTimeFromTZ(offset, time, strft, clock) if t == None: # We did not get a zone t = getTimeFromOffset(offset, time, strft, clock) return t def getTimeFromOffset(offset, t = None, strft = "%Y-%m-%d %I:%M %p", clock = True): offset = offset.replace('+', '') # Split time string by : and get hour/minute values try: hours, minutes = map(int, offset.split(':')) except Exception: try: hours = int(offset) minutes = 0 except Exception: return None # await ctx.channel.send('Offset has to be in +-H:M!') # return msg = 'UTC' # Get the time if t == None: t = datetime.datetime.utcnow() # Apply offset if hours > 0: # Apply positive offset msg += '+{}'.format(offset) td = datetime.timedelta(hours=hours, minutes=minutes) newTime = t + td elif hours < 0: # Apply negative offset msg += '{}'.format(offset) td = datetime.timedelta(hours=(-1*hours), minutes=(-1*minutes)) newTime = t - td else: # No offset newTime = t if clock: ti = getClockForTime(newTime.strftime(strft)) else: ti = newTime.strftime(strft) return { "zone" : msg, "time" : ti } def getTimeFromTZ(tz, t = None, strft = "%Y-%m-%d %I:%M %p", clock = True): # Assume sanitized zones - as they're pulled from pytz # Let's get the timezone list tz_list = FuzzySearch.search(tz, pytz.all_timezones, None, 3) if not tz_list[0]['Ratio'] == 1: # We didn't find a complete match return None zone = pytz.timezone(tz_list[0]['Item']) if t == None: zone_now = datetime.datetime.now(zone) else: zone_now = pytz.utc.localize(t, is_dst=None).astimezone(zone) #zone_now = t.astimezone(zone) if clock: ti = getClockForTime(zone_now.strftime(strft)) else: ti = zone_now.strftime(strft) return { "zone" : tz_list[0]['Item'], "time" : ti}
Python
0.000001
@@ -240,19 +240,16 @@ ry:%0D%0A%09%09t -ime = time_ @@ -273,27 +273,24 @@ %0D%0A%09%09if len(t -ime ) == 2:%0D%0A%09%09%09 @@ -290,26 +290,20 @@ 2:%0D%0A%09%09%09t -ime = t -ime %5B0%5D.spli @@ -322,19 +322,16 @@ if len(t -ime ) == 3:%0D @@ -339,18 +339,12 @@ %09%09%09t -ime = t -ime %5B1%5D. @@ -401,19 +401,16 @@ = int(t -ime %5B0%5D)%0D%0A%09%09 @@ -423,19 +423,16 @@ = int(t -ime %5B1%5D)%0D%0A%09e
8eea594e684053a7fbfe1f2f946343cf809be058
Rename server tests
treecat/serving_test.py
treecat/serving_test.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np import pytest from treecat.serving import TreeCatServer from treecat.testutil import TINY_CONFIG from treecat.testutil import TINY_DATA from treecat.testutil import TINY_MASK from treecat.training import train_model @pytest.fixture(scope='module') def model(): return train_model(TINY_DATA, TINY_MASK, TINY_CONFIG) def test_server_init(model): server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG) server._get_session(7) def test_server_sample(model): N, V = TINY_DATA.shape server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG) # Sample all possible mask patterns. factors = [[True, False]] * V for mask in itertools.product(*factors): mask = np.array(mask, dtype=np.bool_) samples = server.sample(TINY_DATA, mask) assert samples.shape == TINY_DATA.shape assert samples.dtype == TINY_DATA.dtype assert np.allclose(samples[:, mask], TINY_DATA[:, mask]) def test_server_logprob(model): N, V = TINY_DATA.shape server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG) # Sample all possible mask patterns. factors = [[True, False]] * V for mask in itertools.product(*factors): mask = np.array(mask, dtype=np.bool_) logprob = server.logprob(TINY_DATA, mask) assert logprob.shape == (N, ) assert np.isfinite(logprob).all() assert (logprob < 0.0).all() # Assuming features are discrete. @pytest.mark.xfail def test_server_logprob_total(model): N, V = TINY_DATA.shape C = TINY_CONFIG['num_categories'] server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG) factors = [range(C)] * V data = np.array(list(itertools.product(*factors)), dtype=np.int32) mask = np.array([True] * V, dtype=np.bool_) logprob = server.logprob(data, mask) total = np.exp(np.logaddexp.reduce(logprob)) assert abs(total - 1.0) < 1e-6, total
Python
0.000001
@@ -619,16 +619,22 @@ r_sample +_shape (model): @@ -626,32 +626,32 @@ e_shape(model):%0A - N, V = TINY_ @@ -1138,16 +1138,22 @@ _logprob +_shape (model): @@ -1669,21 +1669,29 @@ logprob_ -total +is_normalized (model): @@ -1756,16 +1756,16 @@ ories'%5D%0A - serv @@ -1826,24 +1826,87 @@ TINY_CONFIG) +%0A%0A # The total probability of all possible rows should be 1. %0A factors
1ba8cadf93e80107902e142c4644d03668592c5f
add global and directive specific compat options
cautodoc.py
cautodoc.py
# coding=utf-8 """Hawkmoth - Sphinx C Domain autodoc directive extension""" __author__ = "Jani Nikula <jani@nikula.org>" __copyright__ = "Copyright (c) 2016-2017, Jani Nikula <jani@nikula.org>" __version__ = '0.1' __license__ = "BSD 2-Clause, see LICENSE for details" import glob import os import re import stat import subprocess import sys from docutils import nodes, statemachine from docutils.parsers.rst import directives from docutils.statemachine import ViewList from sphinx.ext.autodoc import AutodocReporter from sphinx.util.compat import Directive # The parser bits from hawkmoth import parse # This is the part that interfaces with Sphinx. Do not depend on Clang here. class CAutoDocDirective(Directive): """Extract documentation comments from the specified file""" required_argument = 1 optional_arguments = 1 # Allow passing a variable number of file patterns as arguments final_argument_whitespace = True # FIXME: potentially need to pass clang options, such as -D etc. Should that # be per directive? Or global default and overrides? option_spec = { # FIXME: figure out passing to parser, have a global default option 'compat': directives.unchanged, } has_content = False def parse(self, viewlist, filename): comments = parse(filename) for (comment, meta) in comments: lineoffset = meta['line'] lines = statemachine.string2lines(comment, 8, convert_whitespace=True) for line in lines: viewlist.append(line, filename, lineoffset) lineoffset += 1 def run(self): env = self.state.document.settings.env result = ViewList() for pattern in self.arguments[0].split(): for filename in glob.iglob(env.config.cautodoc_root + '/' + pattern): mode = os.stat(filename).st_mode if not stat.S_ISDIR(mode): # Tell Sphinx about the dependency env.note_dependency(os.path.abspath(filename)) # FIXME: pass relevant options to parser self.parse(result, filename) node = nodes.section() node.document = self.state.document self.state.nested_parse(result, self.content_offset, node) return node.children def setup(app): app.require_sphinx('1.3') app.add_config_value('cautodoc_root', '.', 'env') app.add_directive_to_domain('c', 'autodoc', CAutoDocDirective) return dict(version = __version__, parallel_read_safe = True, parallel_write_safe = True)
Python
0
@@ -1102,84 +1102,8 @@ = %7B%0A - # FIXME: figure out passing to parser, have a global default option%0A @@ -1136,16 +1136,25 @@ nchanged +_required ,%0A %7D%0A @@ -1216,16 +1216,24 @@ filename +, compat ):%0A @@ -1252,32 +1252,47 @@ = parse(filename +, compat=compat )%0A%0A for ( @@ -1631,16 +1631,89 @@ gs.env%0A%0A + compat = self.options.get('compat', env.config.cautodoc_compat)%0A%0A @@ -2187,16 +2187,24 @@ filename +, compat )%0A%0A @@ -2471,16 +2471,73 @@ 'env')%0A + app.add_config_value('cautodoc_compat', None, 'env')%0A app.
fa21acc470d9c32619b3c67dcce54c7b0a69a07a
Fix inadvertent requirement of hg, svn, git, etc.
lib/spack/spack/test/__init__.py
lib/spack/spack/test/__init__.py
############################################################################## # Copyright (c) 2013, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://scalability-llnl.github.io/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License (as published by # the Free Software Foundation) version 2.1 dated February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import sys import unittest import llnl.util.tty as tty from llnl.util.tty.colify import colify import spack import spack.test.install """Names of tests to be included in Spack's test suite""" test_names = ['versions', 'url_parse', 'url_substitution', 'packages', 'stage', 'spec_syntax', 'spec_semantics', 'spec_dag', 'concretize', 'multimethod', 'install', 'package_sanity', 'config', 'directory_layout', 'python_version', 'git_fetch', 'svn_fetch', 'hg_fetch', 'mirror', 'url_extrapolate'] def list_tests(): """Return names of all tests that can be run for Spack.""" return test_names def run(names, verbose=False): """Run tests with the supplied names. Names should be a list. If it's empty, run ALL of Spack's tests.""" verbosity = 1 if not verbose else 2 if not names: names = test_names else: for test in names: if test not in test_names: tty.error("%s is not a valid spack test name." % test, "Valid names are:") colify(test_names, indent=4) sys.exit(1) runner = unittest.TextTestRunner(verbosity=verbosity) testsRun = errors = failures = 0 for test in names: module = 'spack.test.' + test print module suite = unittest.defaultTestLoader.loadTestsFromName(module) tty.msg("Running test: %s" % test) result = runner.run(suite) testsRun += result.testsRun errors += len(result.errors) failures += len(result.failures) succeeded = not errors and not failures tty.msg("Tests Complete.", "%5d tests run" % testsRun, "%5d failures" % failures, "%5d errors" % errors) if not errors and not failures: tty.info("OK", format='g') else: tty.info("FAIL", format='r') sys.exit(1)
Python
0.000001
@@ -1327,36 +1327,8 @@ ck%0A%0A -import spack.test.install%0A%0A%0A %22%22%22N
671ca30892e3ebeb0a9140f95690853b4b92dc02
Fix reverse since we deprecated post_object_list
post/views.py
post/views.py
from django.core.urlresolvers import reverse from django.utils.translation import ugettext as _ from post.models import Post from jmbo.generic.views import GenericObjectDetail, GenericObjectList from jmbo.view_modifiers import DefaultViewModifier class ObjectList(GenericObjectList): def get_extra_context(self, *args, **kwargs): return {'title': _('Posts')} def get_view_modifier(self, request, *args, **kwargs): return DefaultViewModifier(request, *args, **kwargs) def get_paginate_by(self, *args, **kwargs): return 12 def get_queryset(self, *args, **kwargs): return Post.permitted.all() object_list = ObjectList() class ObjectDetail(GenericObjectDetail): def get_queryset(self, *args, **kwargs): return Post.permitted.all() def get_extra_context(self, *args, **kwargs): return {'title': 'Posts'} def get_view_modifier(self, request, *args, **kwargs): return DefaultViewModifier( request, base_url=reverse("post_object_list"), ignore_defaults=True, *args, **kwargs ) object_detail = ObjectDetail()
Python
0.000014
@@ -1025,13 +1025,8 @@ se(%22 -post_ obje @@ -1033,16 +1033,39 @@ ct_list%22 +, args=%5B'post', 'post'%5D ),%0A
a03c61430abac8cac5e522a3bf391175cd261cec
fix tests
gammafit/tests/test_onezone.py
gammafit/tests/test_onezone.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy import units as u import numpy as np from numpy.testing import assert_approx_equal electronozmpars={ 'seedspec':'CMB', 'index':2.0, 'cutoff':1e13, 'beta':1.0, 'ngamd':100, 'gmin':1e4, 'gmax':1e10, } def test_electronozm(): from ..onezone import ElectronOZM ozm = ElectronOZM( np.logspace(0,15,1000), 1, **electronozmpars) ozm.calc_outspec() lsy=np.trapz(ozm.specsy*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene) assert_approx_equal(lsy,0.016769058688230903) lic=np.trapz(ozm.specic*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene) assert_approx_equal(lic,214080823.28721327) def test_electronozm_evolve(): from ..onezone import ElectronOZM ozm = ElectronOZM( np.logspace(0,15,1000), 1, evolve_nelec=True, **electronozmpars) ozm.calc_outspec() lsy=np.trapz(ozm.specsy*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene) assert_approx_equal(lsy,5718447729.5694494) lic=np.trapz(ozm.specic*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene) assert_approx_equal(lic,1.0514223815442389e+20) def test_protonozm(): from ..onezone import ProtonOZM ozm = ProtonOZM( np.logspace(8,15,100), 1, index=2.0,cutoff=1e13,beta=1.0) ozm.calc_outspec() lpp=np.trapz(ozm.specpp*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene) assert_approx_equal(lpp,3.2800627079738687e+23, significant=5)
Python
0.000001
@@ -744,16 +744,17 @@ 21327)%0A%0A +# def test @@ -772,32 +772,33 @@ m_evolve():%0A +# from ..onezone i @@ -812,32 +812,33 @@ lectronOZM%0A%0A +# ozm = ElectronOZ @@ -901,32 +901,33 @@ ronozmpars)%0A +# ozm.calc_outspec @@ -926,32 +926,33 @@ _outspec()%0A%0A +# lsy=np.trapz(ozm @@ -1004,32 +1004,33 @@ outspecene)%0A +# assert_approx_eq @@ -1057,24 +1057,25 @@ 694494)%0A +# lic=np.trapz @@ -1131,32 +1131,33 @@ outspecene)%0A +# assert_approx_eq @@ -1466,24 +1466,23 @@ 2800 -627079738687e+23 +253974151616e-4 , si
a5878b1f1ff016a2337557a7d55502b9046a2894
add russia url
scripts/import/prepare_countries.py
scripts/import/prepare_countries.py
# This script generates Makefile that can be used to import countries # into libosmscout and generate geocoder-nlp database import pycountry, os postal_countries_file = "../postal/countries_languages.txt" postal_countries = [] for i in open(postal_countries_file, "r"): postal_countries.append(i.split(":")[0]) # print pycountry.countries.lookup("MK") Name2Country = { "azores": "Portugal", "bosnia herzegovina": "Bosnia and Herzegovina", "great britain": "United Kingdom", "macedonia": "MK", "russia": "RU", } Countries = { "europe": [ "albania", "andorra", "austria", "azores", "belarus", "belgium", "bosnia-herzegovina", "bulgaria", "croatia", "cyprus", "czech-republic", "denmark", "estonia", "faroe-islands", "finland", "france", "georgia", "germany", "great-britain", "greece", "hungary", "iceland", #"ireland-and-northern-ireland", "isle-of-man", "italy", #"kosovo", "latvia", "liechtenstein", "lithuania", "luxembourg", "macedonia", "malta", "moldova", "monaco", "montenegro", "netherlands", "norway", "poland", "portugal", "romania", "russia", "serbia", "slovakia", "slovenia", "spain", "sweden", "switzerland", "turkey", "ukraine" ] } fmake = open("Makefile", "w") fmake.write("# This Makefile is generated by script\n\n") fmake.write("BUILDER=./build.sh\n") fmake.write("WORLD_DIR=world\n") fmake.write("DOWNLOADS_DIR=downloads\n") fmake.write("\nall: $(DOWNLOADS_DIR)/.directory $(WORLD_DIR)/all_countries_done\n\techo All Done\n\n") fmake.write("$(DOWNLOADS_DIR)/.directory:\n\tmkdir -p $(DOWNLOADS_DIR)\n\ttouch $(DOWNLOADS_DIR)/.directory\n\n") all_countries = "" def pbfname(continent, country): return continent + "-" + country + ".pbf" def pbfurl(continent, country): return "http://download.geofabrik.de/%s/%s-latest.osm.pbf" % (continent, country) for continent in Countries.keys(): fmake.write("$(WORLD_DIR)/" + continent + "/.directory:\n\tmkdir -p $(WORLD_DIR)/" + continent + "\n\ttouch $(WORLD_DIR)/" + continent + "/.directory\n\n") for country in Countries[continent]: country_spaces = country.replace('-', ' ') if country_spaces in Name2Country: c = pycountry.countries.lookup(Name2Country[country_spaces]) else: c = pycountry.countries.lookup(country_spaces) code2 = c.alpha_2 name = c.name print continent, code2, name, (code2.lower() in postal_countries) sql = "$(WORLD_DIR)/" + os.path.join(continent, country + ".sqlite.bz2") pbf = "$(DOWNLOADS_DIR)/" + pbfname(continent, country) all_countries += sql + " " fmake.write(sql + ": $(WORLD_DIR)/" + continent + "/.directory " + pbf + "\n\t$(BUILDER) $(DOWNLOADS_DIR)/" + pbfname(continent, country) + " $(WORLD_DIR) " + continent + " " + country + " " + code2 + "\n\n") fmake.write(pbf + ":$(DOWNLOADS_DIR)/.directory\n\twget %s -O$(DOWNLOADS_DIR)/%s || (rm -f $(DOWNLOADS_DIR)/%s && exit 1)\n\ttouch $(DOWNLOADS_DIR)/%s\n" % (pbfurl(continent, country), pbfname(continent, country), pbfname(continent, country), pbfname(continent, country)) ) fmake.write("$(WORLD_DIR)/all_countries_done: " + all_countries + "\n\techo > $(WORLD_DIR)/all_countries_done\n\n") print "\nExamine generated Makefile and run make using it. See build.sh and adjust the used executables first\n"
Python
0.000025
@@ -529,16 +529,101 @@ RU%22,%0A%7D%0A%0A +SpecialURL = %7B%0A %22russia%22: %22http://download.geofabrik.de/russia-latest.osm.pbf%22%0A%7D%0A%0A Countrie @@ -2565,32 +2565,89 @@ nent, country):%0A + if country in SpecialURL: return SpecialURL%5Bcountry%5D%0A return %22http
424be3c0fda49c1111410d6eff719a36219640b6
Replace --include-sequence with --include_sequence for consistency.
genbank/convert_gff3_to_gbk.py
genbank/convert_gff3_to_gbk.py
#!/usr/bin/env python3 """ Converts GFF3 representing gene models to Genbank flat-file format. GFF3 specification: http://www.sequenceontology.org/gff3.shtml Genbank flat file specification: https://www.ncbi.nlm.nih.gov/Sitemap/samplerecord.html --molecule_type: http://www.ncbi.nlm.nih.gov/Sequin/sequin.hlp.html#Molecule --genbank_division: http://www.ncbi.nlm.nih.gov/Sitemap/samplerecord.html#GenBankDivisionB """ import argparse import biocodegenbank import biocodegff import os import sys from jinja2 import Environment, FileSystemLoader PATH = os.path.dirname(os.path.abspath(__file__)) TEMPLATE_ENVIRONMENT = Environment( autoescape=False, loader=FileSystemLoader(os.path.join(PATH, '../data')), trim_blocks=False) def main(): parser = argparse.ArgumentParser( description='Converts GFF3 into a GenBank flat file') ## output file to be written parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input GFF3 file to be read' ) parser.add_argument('-o', '--output_file', type=str, required=False, help='Path to a Genbank flat file to be created' ) parser.add_argument('-mt', '--molecule_type', type=str, required=False, default='DNA', help='Molecule type' ) parser.add_argument('-gbd', '--genbank_division', type=str, required=False, default='.', help='GenBank Division (3-letter abbreviation)' ) parser.add_argument('-md', '--modification_date', type=str, required=False, default='DD-MMM-YYYY', help='The modification date for header in format like 21-JUN-1999' ) parser.add_argument('-org', '--organism', type=str, required=False, default='.', help='Full organism name (including strain)' ) parser.add_argument('-str', '--strain', type=str, required=False, help="Only the strain designation, which is written to the FEATURES.source element" ) parser.add_argument('-d', '--definition', type=str, required=False, default='.', help='Brief description of sequence; includes information such as source organism, gene name/protein name, or some description of the sequence\'s function.' ) parser.add_argument('-s', '--source', type=str, required=False, default='.', help='Free-format information including an abbreviated form of the organism name, sometimes followed by a molecule type.' ) parser.add_argument('-t', '--taxon_id', type=int, required=False, help='NCBI taxon ID, if known' ) parser.add_argument('-l', '--lineage', type=str, required=False, default='Unknown', help='Semicolon-delimited lineage of the organism e.g., "Eukaryota; Alveolata; Apicomplexa; Aconoidasida; Piroplasmida; Theileriidae; Theileria"' ) parser.add_argument('-seq', '--include-sequence', action='store_true', help='Include sequence (if present) in the output GenBank flat file(s).' ) args = parser.parse_args() # line-wrap lineage to stay below 79 character GenBank flat file width lineage = biocodegenbank.line_wrap_lineage_string( args.lineage ) (assemblies, features) = biocodegff.get_gff3_features( args.input_file ) ofh = sys.stdout if args.output_file is not None: ofh = open(args.output_file, 'wt') for assembly_id in assemblies: assembly = assemblies[assembly_id] context = { 'locus':assembly_id, 'molecule_size':assembly.length, 'molecule_type':args.molecule_type, 'division':args.genbank_division, 'modification_date':args.modification_date, 'accession':'.', 'version':'.', 'gi':'.', 'source':args.source, 'definition':args.definition, 'organism':args.organism, 'lineage':lineage } header = TEMPLATE_ENVIRONMENT.get_template('genbank_flat_file_header.template').render(context) ofh.write(header) ofh.write("\nFEATURES Location/Qualifiers\n") ofh.write(" source 1..{0}\n".format(assembly.length)) ofh.write(" /organism=\"{0}\"\n".format(args.organism)) ofh.write(" /mol_type=\"genomic DNA\"\n") if args.strain is not None: ofh.write(" /strain=\"{0}\"\n".format(args.strain)) if args.taxon_id is not None: ofh.write(" /db_xref=\"taxon:{0}\"\n".format(args.taxon_id)) for gene in assemblies[assembly_id].genes(): biocodegenbank.print_biogene( gene=gene, fh=ofh, on=assembly ) if args.include_sequence: ofh.write("ORIGIN\n") biocodegenbank.print_sequence( seq=assembly.residues, fh=ofh ) ofh.write("//\n") if __name__ == '__main__': main()
Python
0.000276
@@ -2673,17 +2673,17 @@ -include -- +_ sequence
3eeeb844b3936063f4f0192d46577e3f9397c107
Fix ordering in cursor test
search/tests/test_query.py
search/tests/test_query.py
import datetime import unittest from google.appengine.api import search as search_api from ..indexes import DocumentModel, Index from ..fields import TZDateTimeField, TextField from ..query import SearchQuery from ..ql import Q from .. import timezone from .base import AppengineTestCase class FakeDocument(DocumentModel): foo = TextField() created = TZDateTimeField() class TestSearchQueryClone(unittest.TestCase): def test_clone_keywords(self): q = SearchQuery("dummy", document_class=FakeDocument).keywords("bar") q1 = q.filter(foo="baz") self.assertEqual( u"bar", unicode(q.query) ) self.assertEqual( u'bar AND (foo:"baz")', unicode(q1.query) ) def test_clone_filters(self): q = SearchQuery("dummy", document_class=FakeDocument).filter( (Q(foo="bar") | Q(foo="baz")) & ~Q(foo="neg") ) q1 = q.filter(~Q(foo="neg2")) self.assertEqual( u'(((foo:"bar") OR (foo:"baz")) AND NOT (foo:"neg"))', unicode(q.query) ) self.assertEqual( u'(' '(((foo:"bar") OR (foo:"baz")) AND NOT (foo:"neg")) ' 'AND NOT (foo:"neg2")' ')', unicode(q1.query) ) class TestSearchQueryFilter(unittest.TestCase): def test_filter_on_datetime_field(self): xmas = datetime.datetime(2016, 12, 31, 12, tzinfo=timezone.utc) q = SearchQuery('dummy', document_class=FakeDocument) q = q.filter(created__gt=xmas) self.assertEqual(unicode(q.query), u'(created > 1483185600)') class TestCursor(AppengineTestCase): def test_cursor(self): idx = Index('dummy', FakeDocument) idx.put(FakeDocument(foo='thing')) idx.put(FakeDocument(foo='thing2')) idx.get_range() q = idx.search().set_cursor()[:1] list(q) self.assertTrue(q.next_cursor) q2 = idx.search().set_cursor(cursor=q.next_cursor) self.assertEqual(2, len(q2)) # still returns full count results = list(q2) self.assertEqual(1, len(results)) # but only one document self.assertEqual('thing2', results[0].foo) self.assertFalse(q2.next_cursor)
Python
0.000002
@@ -1903,16 +1903,32 @@ cursor() +.order_by('foo') %5B:1%5D%0A @@ -2039,16 +2039,32 @@ _cursor) +.order_by('foo') %0A
74e4e5e507d908950d4458dff5ba4aa5c712866f
Allow localization of "Self Informations"
searx/plugins/self_info.py
searx/plugins/self_info.py
''' searx is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. searx is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with searx. If not, see < http://www.gnu.org/licenses/ >. (C) 2015 by Adam Tauber, <asciimoo@gmail.com> ''' from flask_babel import gettext import re name = "Self Informations" description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".') default_on = True # Self User Agent regex p = re.compile(b'.*user[ -]agent.*', re.IGNORECASE) # attach callback to the post search hook # request: flask request object # ctx: the whole local context of the pre search hook def post_search(request, search): if search.search_query.pageno > 1: return True if search.search_query.query == b'ip': x_forwarded_for = request.headers.getlist("X-Forwarded-For") if x_forwarded_for: ip = x_forwarded_for[0] else: ip = request.remote_addr search.result_container.answers['ip'] = {'answer': ip} elif p.match(search.search_query.query): ua = request.user_agent search.result_container.answers['user-agent'] = {'answer': ua} return True
Python
0
@@ -710,17 +710,25 @@ %0Aname = -%22 +gettext(' Self Inf @@ -736,17 +736,18 @@ rmations -%22 +') %0Adescrip
93bdcc16101852c6390fba1eff879d13fcdc063e
Remove leftover comparison functions
primitives.py
primitives.py
import numbers import errors import lang import tokens import util # used to get generate guaranteed-unique symbol names GENSYM_COUNTER = util.ThreadSafeCounter() def add(a, b, *rest): '''Adds the all the given numbers together.''' util.ensure_type(numbers.Number, a, b) # add all the arguments together while checking type total = a + b for n in rest: util.ensure_type(numbers.Number, n) total += n return total def sub(a, b, *rest): '''Subtracts the given numbers in sequence.''' util.ensure_type(numbers.Number, a, b) # subtract all the arguments in sequence while checking type difference = a - b for n in rest: util.ensure_type(numbers.Number, n) difference -= n return difference def mul(a, b, *rest): '''Multiplies all the given numbers together.''' util.ensure_type(numbers.Number, a, b) # multiply all the arguments together while checking type product = a * b for n in rest: # stop multiplying if the product ever goes to zero if product == 0: break util.ensure_type(numbers.Number, n) product *= n return product def div(a, b, *rest): '''Divides the given numbers in sequence.''' util.ensure_type(numbers.Number, a, b) # divide all the arguments in sequence while checking type quotient = a / b for n in rest: util.ensure_type(numbers.Number, n) quotient /= n return quotient def power(a, b): '''Raises a to the power of b.''' util.ensure_type(numbers.Number, a, b) return a ** b def type_(e): '''Returns the type of an element as a string. Returns 'nil' for NIL.''' if isinstance(e, lang.Symbol): return lang.String('symbol') elif isinstance(e, lang.String): return lang.String('string') elif isinstance(e, lang.Boolean): return lang.String('boolean') elif isinstance(e, (int, long)): return lang.String('integer') elif isinstance(e, float): return lang.String('float') elif isinstance(e, complex): return lang.String('complex') elif e is lang.NIL: return lang.String('nil') elif isinstance(e, lang.Cons): return lang.String('cons') elif isinstance(e, lang.Function): return lang.String('function') elif isinstance(e, lang.Macro): return lang.String('macro') # shouldn't ever get this far raise errors.WrongArgumentTypeError('unsupported type: ' + e.__class__.__name__.lower()) def is_(a, b): '''Returns true if the two items refer to the same object in memory.''' return lang.Boolean.build(a is b) def equal(a, b): ''' Returns true if two constructs are congruent. For example, numbers are compared mathematically, cons are compared by structure and equivalent contents, etc. ''' # the same item is equal to itself if a is b: return lang.TRUE # things can't be equal if they're not the same class elif not (isinstance(a, b.__class__) and isinstance(b, a.__class__)): return lang.FALSE # compare everything else by value (numbers, Cons, symbols, etc.) return lang.Boolean.build(a == b) def gt(a, b): '''Compare two numbers using '>'.''' util.ensure_type(numbers.Number, a, b) return lang.Boolean.build(a > b) def gte(a, b): '''Compare two numbers using '>='.''' util.ensure_type(numbers.Number, a, b) return lang.Boolean.build(a >= b) def lt(a, b): '''Compare two numbers using '<'.''' util.ensure_type(numbers.Number, a, b) return lang.Boolean.build(a < b) def lte(a, b): '''Compare two numbers using '<='.''' util.ensure_type(numbers.Number, a, b) return lang.Boolean.build(a <= b) def not_(a): ''' Returns the opposite boolean of that passed in. All things that aren't #f are #t, so we return whether a is False. ''' return lang.Boolean.build(a is lang.FALSE) def cons(a, b): '''Pair two items.''' return lang.Cons(a, b) def car(e): '''Return the first element of a pair.''' util.ensure_type(lang.Cons, e) # nil isn't allowed to be indexed into, since it has no car or cdr if e is lang.NIL: raise errors.WrongArgumentTypeError('wrong argument type for car: ' + 'expected pair, got ' + str(e)) return e.car def cdr(e): '''Return the second element of a pair.''' util.ensure_type(lang.Cons, e) if e is lang.NIL: raise errors.WrongArgumentTypeError('wrong argument type for cdr: ' + 'expected pair, got ' + str(e)) return e.cdr def read(prompt): '''Print the prompt, read input from stdin, and return it as a string.''' util.ensure_type(basestring, prompt) return String(raw_input(prompt)) def parse_(s): '''Parse a string into a list of the S-expressions it describes.''' util.ensure_type(basestring, s) return lang.Cons.build(*parse(tokens.tokenize(s))) def gensym(prefix): ''' Generate a unique symbol with the given prefix in its name. Generated symbols have names that contain syntax elements, and hence can't be entered via the reader. ''' util.ensure_type(basestring, prefix) return lang.Symbol(prefix + tokens.OPEN_PAREN + str(GENSYM_COUNTER()) + tokens.CLOSE_PAREN) # these functions serve as markers for whether the function being called is # special. we check to see if the function for the symbol is one of these # functions, and if so we evaluate it in whatever way it requires. this allows # the user to define new symbols that point to these functions, but still have # the functions work in the same way. quote = lang.PrimitiveFunction(lambda e: _, name=tokens.QUOTE_LONG) unquote = lang.PrimitiveFunction(lambda e: _, name=tokens.UNQUOTE_LONG) quasiquote = lang.PrimitiveFunction(lambda e: _, name=tokens.QUASIQUOTE_LONG) lambda_ = lang.PrimitiveFunction(lambda args, body: _, name=tokens.LAMBDA) macro = lang.PrimitiveFunction(lambda args, body: _, name=tokens.MACRO) expand = lang.PrimitiveFunction(lambda macro, *args: _, name=tokens.MACRO_EXPAND) define = lang.PrimitiveFunction(lambda symbol, value: _, name=tokens.DEFINE) cond = lang.PrimitiveFunction(lambda *e: _, name=tokens.COND) and_ = lang.PrimitiveFunction(lambda a, b, *rest: _, name=tokens.AND) or_ = lang.PrimitiveFunction(lambda a, b, *rest: _, name=tokens.OR) eval_ = lang.PrimitiveFunction(lambda sexp: _, name=tokens.EVAL) load = lang.PrimitiveFunction(lambda fname: _, name=tokens.LOAD)
Python
0.000001
@@ -3372,422 +3372,8 @@ b)%0A%0A -def gte(a, b):%0A '''Compare two numbers using '%3E='.'''%0A util.ensure_type(numbers.Number, a, b)%0A return lang.Boolean.build(a %3E= b)%0A%0Adef lt(a, b):%0A '''Compare two numbers using '%3C'.'''%0A util.ensure_type(numbers.Number, a, b)%0A return lang.Boolean.build(a %3C b)%0A%0Adef lte(a, b):%0A '''Compare two numbers using '%3C='.'''%0A util.ensure_type(numbers.Number, a, b)%0A return lang.Boolean.build(a %3C= b)%0A%0A def
98dcb50560266fdc3b475f7e0d48627e7cf9abc0
simplify provider decorator
allib/di.py
allib/di.py
import functools import inspect import typing def provider(func=None, *, singleton=False): """ Decorator to mark a function as a provider. Args: singleton (bool): The returned value should be a singleton or shared instance. If False (the default) the provider function will be invoked again for every time it's needed for injection. Example: @provider def myfunc() -> MyClass: return MyClass(args) """ def _add_provider_annotations(wrapper, func): wrapper.__di__ = getattr(func, '__di__', {}) hints = typing.get_type_hints(func) wrapper.__di__['provides'] = hints['return'] wrapper.__di__['singleton'] = singleton if func is None: def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) _add_provider_annotations(wrapper, func) return wrapper return decorator @functools.wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) _add_provider_annotations(wrapper, func) return wrapper def inject(*args, **kwargs): """ Mark a class or function for injection, meaning that a DI container knows that it should inject dependencies into it. Normally you won't need this as the injector will inject the required arguments anyway, but it can be used to inject properties into a class without having to specify it in the constructor, or to inject arguments that aren't properly type hinted. Example: @di.inject('foo', MyClass) class MyOtherClass: pass assert isinstance(injector.get(MyOtherClass).foo, MyClass) """ def wrapper(obj): if inspect.isclass(obj) or callable(obj): inject_object(obj, *args, **kwargs) return obj raise Exception("Don't know how to inject into %r" % obj) return wrapper def inject_object(obj, var_name, var_type): obj.__di__ = getattr(obj, '__di__', {}) obj.__di__.setdefault('inject', {})[var_name] = var_type return obj class Module: """ A module is a collection of providers. """ pass class Injector: """ Class that knows how to do dependency injection. """ def __init__(self): self.instances = {} self.factories = {} def register_module(self, module: Module): """ Register a module. """ if inspect.isclass(module): module = self.get(module) if isinstance(module, Module): funcs = ( item[1] for item in inspect.getmembers(module, predicate=inspect.ismethod) ) else: raise Exception("Don't know how to register module: %r" % module) for func in funcs: if hasattr(func, '__di__') and func.__di__.get('provides'): self.register_provider(func) def register_provider(self, func): """ Register a provider function. """ if 'provides' not in getattr(func, '__di__', {}): raise Exception('Function %r is not a provider' % func) self.factories[func.__di__['provides']] = func def get(self, thing: type): """ Get an instance of some type. """ if thing in self.instances: return self.instances[thing] if thing in self.factories: fact = self.factories[thing] ret = self.get(fact) if hasattr(fact, '__di__') and fact.__di__['singleton']: self.instances[thing] = ret return ret if inspect.isclass(thing): return self._call_class_init(thing) elif callable(thing): return thing(**self._guess_kwargs(thing)) raise Exception('not sure what thing is: %r' % thing) def _call_class_init(self, cls): # if this statement is true, the class or its parent class(es) does not # have an __init__ method defined and as such should not need any # constructor arguments to be instantiated. if cls.__init__ is object.__init__: obj = cls() else: obj = cls(**self._guess_kwargs(cls.__init__)) # extra properties defined with @di.inject if hasattr(obj, '__di__') and 'inject' in obj.__di__: for prop_name, prop_type in obj.__di__['inject'].items(): setattr(obj, prop_name, self.get(prop_type)) return obj def _guess_kwargs(self, func): kwargs = {} hints = typing.get_type_hints(func) for arg in hints: if arg == 'return': continue kwargs[arg] = self.get(hints[arg]) return kwargs
Python
0.000003
@@ -41,16 +41,233 @@ yping%0A%0A%0A +def _wrap_provider_func(func, di_props):%0A%09hints = typing.get_type_hints(func)%0A%09di_props%5B'provides'%5D = hints%5B'return'%5D%0A%0A%09if not hasattr(func, '__di__'):%0A%09%09func.__di__ = %7B%7D%0A%09func.__di__.update(di_props)%0A%0A%09return func%0A%0A%0A def prov @@ -642,202 +642,19 @@ %22%0A%09d -ef _add_provider_annotations(wrapper, func):%0A%09%09wrapper.__di__ = getattr(func, '__di__', %7B%7D)%0A%09%09hints = typing.get_type_hints(func)%0A%09%09wrapper.__di__%5B'provides'%5D = hints%5B'return'%5D%0A%09%09wrapper.__di__%5B +i_props = %7B 'sin @@ -660,19 +660,17 @@ ngleton' -%5D = +: singlet @@ -671,16 +671,17 @@ ingleton +%7D %0A%0A%09if fu @@ -722,322 +722,114 @@ %0A%09%09%09 -@functools.wraps(func)%0A%09%09%09def wrapper(*args, **kwargs):%0A%09%09%09%09return func(*args, **kwargs)%0A%09%09%09_add_provider_annotations(wrapper, func)%0A%09%09%09return wrapper%0A%09%09return decorator%0A%0A%09@functools.wraps(func)%0A%09def wrapper(*args, **kwargs):%0A%09%09return func(*args, **kwargs)%0A%09_add_provider_annotations(wrapper, func)%0A%09return wrapper +return _wrap_provider_func(func, di_props)%0A%09%09return decorator%0A%0A%09return _wrap_provider_func(func, di_props) %0A%0A%0Ad
34b2c332b8d1209985b37f4e440954a15d4004d3
create directly tar.gz into final directory
datadownloader/views.py
datadownloader/views.py
import os import tarfile import subprocess from datetime import datetime from sendfile import sendfile from django.views.generic import View, TemplateView from django.conf import settings from django.shortcuts import redirect def get_base_path(): if hasattr(settings, 'DATA_DOWNLOADER_PATH'): base_path = settings.DATA_DOWNLOADER_PATH else: base_path = os.path.join(settings.BASE_DIR, 'project', 'protected_medias', 'datas') return base_path def get_archives_info(): info = {} project_name = settings.BASE_DIR.split("/")[-1] base_path = get_base_path() for section in ["db", "media", "data"]: file_name = "%s_%s.tar.gz" % (project_name, section) path = os.path.join(base_path, file_name) if os.path.exists(path): infos = os.stat(path) date = datetime.fromtimestamp(int(infos.st_mtime)) info["%s_info" % section] = {'date': date, 'size': infos.st_size} else: info["%s_info" % section] = {'date': None, 'size': None} return info def create_archive(data_type): folders = [] base_path = get_base_path() project_name = settings.BASE_DIR.split("/")[-1] tar_name = "%s_%s.tar.gz" % (project_name, data_type) path = os.path.join(base_path, tar_name) if data_type == "db" or data_type == "data": folders.append("dumps") dumps_path = os.path.join(settings.BASE_DIR, "dumps") if os.path.exists(dumps_path): for dump_file in os.listdir(dumps_path): os.remove(os.path.join(dumps_path, dump_file)) else: os.makedirs(dumps_path) # We will tempory use makefile for run datadump, but we must found # other solution make = ['/usr/bin/make', '-C', settings.BASE_DIR, 'datadump'] subprocess.check_output(make) if data_type == "media" or data_type == "data": folders.append("project/media") with tarfile.open(tar_name, "w:gz") as tar: for folder in folders: tar.add(folder) os.rename(tar_name, path) def delete_archive(data_type): base_path = get_base_path() project_name = settings.BASE_DIR.split("/")[-1] tar_name = "%s_%s.tar.gz" % (project_name, data_type) path = os.path.join(base_path, tar_name) os.remove(path) class DataDownloaderMainView(TemplateView): template_name = "admin/datadownloader/index.html" def get_context_data(self, **kwargs): context = super(DataDownloaderMainView, self).get_context_data(**kwargs) context.update(get_archives_info()) return context class DataDownloaderCreateArchiveView(View): def get(self, request, *args, **kwargs): create_archive(kwargs['data_type']) return redirect('datadownloader_index') class DataDownloaderDeleteArchiveView(View): def get(self, request, *args, **kwargs): delete_archive(kwargs['data_type']) return redirect('datadownloader_index') class DataDownloaderDownloadArchiveView(View): def get(self, request, *args, **kwargs): data_type = kwargs['data_type'] base_path = get_base_path() project_name = settings.BASE_DIR.split("/")[-1] tar_name = "%s_%s.tar.gz" % (project_name, data_type) path = os.path.join(base_path, tar_name) return sendfile(request, path, attachment=True, attachment_filename=tar_name)
Python
0
@@ -2033,24 +2033,20 @@ le.open( -tar_name +path , %22w:gz%22 @@ -2118,38 +2118,8 @@ er)%0A - os.rename(tar_name, path)%0A %0A%0Ade
79ffd869214e5232d907e46c077faf653a33c114
add serialize to LogReport
chainer/training/extensions/log_report.py
chainer/training/extensions/log_report.py
import json import os import shutil import tempfile import six from chainer import reporter from chainer import serializer as serializer_module from chainer.training import extension from chainer.training import trigger as trigger_module class LogReport(extension.Extension): """Trainer extension to output the accumulated results to a log file. This extension accumulates the observations of the trainer to :class:`~chainer.DictSummary` at a regular interval specified by a supplied trigger, and writes them into a log file in JSON format. There are two triggers to handle this extension. One is the trigger to invoke this extension, which is used to handle the timing of accumulating the results. It is set to ``1, 'iteration'`` by default. The other is the trigger to determine when to emit the result. When this trigger returns True, this extension appends the summary of accumulated values to the list of past summaries, and writes the list to the log file. Then, this extension makes a new fresh summary object which is used until the next time that the trigger fires. It also adds some entries to each result dictionary. - ``'epoch'`` and ``'iteration'`` are the epoch and iteration counts at the output, respectively. - ``'elapsed_time'`` is the elapsed time in seconds since the training begins. The value is taken from :attr:`Trainer.elapsed_time`. Args: keys (iterable of strs): Keys of values to accumulate. If this is None, all the values are accumulated and output to the log file. trigger: Trigger that decides when to aggregate the result and output the values. This is distinct from the trigger of this extension itself. If it is a tuple in the form ``<int>, 'epoch'`` or ``<int>, 'iteration'``, it is passed to :class:`IntervalTrigger`. postprocess: Callback to postprocess the result dictionaries. Each result dictionary is passed to this callback on the output. This callback can modify the result dictionaries, which are used to output to the log file. log_name (str): Name of the log file under the output directory. It can be a format string: the last result dictionary is passed for the formatting. For example, users can use '{iteration}' to separate the log files for different iterations. If the log name is None, it does not output the log to any file. """ def __init__(self, keys=None, trigger=(1, 'epoch'), postprocess=None, log_name='log'): self._keys = keys self._trigger = trigger_module.get_trigger(trigger) self._postprocess = postprocess self._log_name = log_name self._log = [] self._init_summary() def __call__(self, trainer): # accumulate the observations keys = self._keys observation = trainer.observation summary = self._summary if keys is None: summary.add(observation) else: summary.add({k: observation[k] for k in keys if k in observation}) if self._trigger(trainer): # output the result stats = self._summary.compute_mean() stats_cpu = {} for name, value in six.iteritems(stats): stats_cpu[name] = float(value) # copy to CPU updater = trainer.updater stats_cpu['epoch'] = updater.epoch stats_cpu['iteration'] = updater.iteration stats_cpu['elapsed_time'] = trainer.elapsed_time if self._postprocess is not None: self._postprocess(stats_cpu) self._log.append(stats_cpu) # write to the log file if self._log_name is not None: log_name = self._log_name.format(**stats_cpu) fd, path = tempfile.mkstemp(prefix=log_name, dir=trainer.out) with os.fdopen(fd, 'w') as f: json.dump(self._log, f, indent=4) new_path = os.path.join(trainer.out, log_name) shutil.move(path, new_path) # reset the summary for the next output self._init_summary() @property def log(self): """The current list of observation dictionaries.""" return self._log def serialize(self, serializer): if hasattr(self._trigger, 'serialize'): self._trigger.serialize(serializer['_trigger']) # Note that this serialization may lose some information of small # numerical differences. if isinstance(serializer, serializer_module.Serializer): log = json.dumps(self._log) serializer('_log', log) else: log = serializer('_log', '') self._log = json.loads(log) def _init_summary(self): self._summary = reporter.DictSummary()
Python
0.000001
@@ -4557,16 +4557,125 @@ ger'%5D)%0A%0A + if hasattr(self._summary, 'serialize'):%0A self._summary.serialize(serializer%5B'_summary'%5D)%0A%0A
634305c2366991badc0c54a797e4294b86166879
make fname_rules address family dependent
ifupdown-multi.py
ifupdown-multi.py
#!/usr/bin/env python # Copyright (c) 2013 by Farsight Security, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import logging import os import subprocess import sys process_name = os.path.basename(sys.argv[0]) required_keys = ( 'MODE', 'ADDRFAM', 'IFACE', 'IF_ADDRESS', 'IF_NETMASK', 'IF_MULTI_TABLE', 'IF_MULTI_GATEWAY', ) additional_keys = ( 'IF_MULTI_GATEWAY_WEIGHT', 'IF_MULTI_PREFERRED_PREFIXES', ) fname_prefix = '/var/run/network/ifupdown-multi.' fname_nexthop = fname_prefix + '%(IFACE)s.nexthop.%(ADDRFAM)s' fname_rules = fname_prefix + '%(IFACE)s.rules' glob_nexthop = fname_prefix + '*.nexthop.%(ADDRFAM)s' priority_magic_preferred = 25357 priority_magic = 31047 def run(cmd): logging.debug('running command %r', cmd) rc = subprocess.call(cmd, shell=True) if rc != 0: logging.critical('command %r failed with exit code %d', cmd, rc) return rc class ifupdownMulti: def __init__(self, env): self.cfg = {} for key in required_keys: if env.has_key(key): self.cfg[key] = env[key] else: raise Exception, 'missing environment variable %s' % key for key in additional_keys: if env.has_key(key): self.cfg[key] = env[key] if not self.cfg['MODE'] in ('start', 'stop'): raise Exception, 'unknown ifupdown mode %s' % self.cfg['MODE'] if self.cfg['ADDRFAM'] == 'inet': self.cfg['ip'] = 'ip' elif self.cfg['ADDRFAM'] == 'inet6': self.cfg['ip'] = 'ip -6' table_id = int(self.cfg['IF_MULTI_TABLE']) self.cfg['PRIORITY_PREFERRED'] = priority_magic_preferred + table_id self.cfg['PRIORITY'] = priority_magic + table_id self.fname_nexthop = fname_nexthop % self.cfg self.fname_rules = fname_rules % self.cfg self.glob_nexthop = glob_nexthop % self.cfg def dispatch(self): if self.cfg['ADDRFAM'] in ('inet', 'inet6'): if self.cfg['MODE'] == 'start': self.start() elif self.cfg['MODE'] == 'stop': self.stop() def flush_route_cache(self): run('%(ip)s route flush cache' % self.cfg) def start_rule(self, rule): rule = rule % self.cfg with open(self.fname_rules, 'a') as w: w.write(rule + '\n') run('%s rule add %s' % (self.cfg['ip'], rule)) def start_route(self, route): route = route % self.cfg run('%s route replace %s' % (self.cfg['ip'], route)) def start_gateway(self): self.start_route('default via %(IF_MULTI_GATEWAY)s dev %(IFACE)s table %(IF_MULTI_TABLE)s proto static') nexthop = 'nexthop via %(IF_MULTI_GATEWAY)s dev %(IFACE)s' % self.cfg weight = self.cfg.get('IF_MULTI_GATEWAY_WEIGHT') if weight: nexthop += ' weight ' + weight with open(self.fname_nexthop, 'w') as w: w.write(nexthop) w.write('\n') def restart_nexthops(self): nexthops = set() for fname in glob.glob(self.glob_nexthop): for line in open(fname): nexthops.add(line.strip()) if nexthops: nexthops = sorted(list(nexthops)) cmd = self.cfg['ip'] + ' route replace default scope global ' + ' '.join(nexthops) run(cmd) else: run('%(ip)s route delete default' % self.cfg) def start(self): self.start_rule('from %(IF_ADDRESS)s table %(IF_MULTI_TABLE)s priority %(PRIORITY)s') self.start_rule('to %(IF_ADDRESS)s table %(IF_MULTI_TABLE)s priority %(PRIORITY)s') preferred_prefixes = self.cfg.get('IF_MULTI_PREFERRED_PREFIXES') if preferred_prefixes: for prefix in preferred_prefixes.split(): self.cfg['PREFIX'] = prefix self.start_rule('to %(PREFIX)s table %(IF_MULTI_TABLE)s priority %(PRIORITY_PREFERRED)s') self.start_gateway() self.restart_nexthops() self.flush_route_cache() def stop_rules(self): if os.path.exists(self.fname_rules): for line in open(self.fname_rules): rule = line.strip() run(self.cfg['ip'] + ' rule delete ' + rule) try: logging.debug('unlinking %s', self.fname_rules) os.unlink(self.fname_rules) except OSError: pass def stop(self): run('%(ip)s route flush table %(IF_MULTI_TABLE)s' % self.cfg) try: logging.debug('unlinking %s', self.fname_nexthop) os.unlink(self.fname_nexthop) except OSError: pass self.restart_nexthops() self.stop_rules() self.flush_route_cache() def main(): if not 'IF_MULTI_TABLE' in os.environ: sys.exit(0) if not os.getenv('MODE') in ('start', 'stop'): sys.exit(0) if os.getenv('VERBOSITY') == '1': level = logging.DEBUG else: level = logging.CRITICAL logging.basicConfig(format=process_name+': %(levelname)s: %(message)s', level=level) ifupdownMulti(os.environ).dispatch() if __name__ == '__main__': main()
Python
0.0002
@@ -1113,16 +1113,28 @@ )s.rules +.%25(ADDRFAM)s '%0A%0Aglob_
7477f969cd8efd624e7f378f7838270c53c2755e
Allow make_reverb_dataset's caller to set max_in_flight_samples_per_worker. Default behavior is unchanged.
acme/datasets/reverb.py
acme/datasets/reverb.py
# python3 # Copyright 2018 DeepMind Technologies Limited. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for making TensorFlow datasets for sampling from Reverb replay.""" from typing import Optional from acme import specs from acme import types from acme.adders import reverb as adders import reverb import tensorflow as tf def make_reverb_dataset( server_address: str, batch_size: Optional[int] = None, prefetch_size: Optional[int] = None, table: str = adders.DEFAULT_PRIORITY_TABLE, num_parallel_calls: int = 12, # Deprecated kwargs. environment_spec: Optional[specs.EnvironmentSpec] = None, extra_spec: Optional[types.NestedSpec] = None, transition_adder: bool = False, convert_zero_size_to_none: bool = False, using_deprecated_adder: bool = False, sequence_length: Optional[int] = None, ) -> tf.data.Dataset: """Make a TensorFlow dataset backed by a Reverb trajectory replay service.""" if environment_spec or extra_spec: raise ValueError( 'The make_reverb_dataset factory function no longer requires specs as' ' as they should be passed as a signature to the reverb.Table when it' ' is created. Consider either updating your code or falling back to the' ' deprecated dataset factory in acme/datasets/deprecated.') # These are no longer used and are only kept in the call signature for # backward compatibility. del environment_spec del extra_spec del transition_adder del convert_zero_size_to_none del using_deprecated_adder del sequence_length # This is the default that used to be set by reverb.TFClient.dataset(). max_in_flight_samples_per_worker = 2 * batch_size if batch_size else 100 def _make_dataset(unused_idx: tf.Tensor) -> tf.data.Dataset: dataset = reverb.TrajectoryDataset.from_table_signature( server_address=server_address, table=table, max_in_flight_samples_per_worker=max_in_flight_samples_per_worker) # Finish the pipeline: batch and prefetch. if batch_size: dataset = dataset.batch(batch_size, drop_remainder=True) return dataset # Create the dataset. dataset = tf.data.Dataset.range(num_parallel_calls) dataset = dataset.interleave( map_func=_make_dataset, cycle_length=num_parallel_calls, num_parallel_calls=num_parallel_calls, deterministic=False) if prefetch_size: dataset = dataset.prefetch(prefetch_size) return dataset # TODO(b/152732834): remove this and prefer datasets.make_reverb_dataset. make_dataset = make_reverb_dataset
Python
0.000012
@@ -1068,16 +1068,76 @@ t = 12,%0A + max_in_flight_samples_per_worker: Optional%5Bint%5D = None,%0A # De @@ -2225,16 +2225,19 @@ et().%0A +if max_in_f @@ -2265,13 +2265,19 @@ ker -= 2 * +is None and bat @@ -2289,29 +2289,161 @@ ze i -f batch_size else 100 +s None:%0A max_in_flight_samples_per_worker = 100%0A elif max_in_flight_samples_per_worker is None:%0A max_in_flight_samples_per_worker = 2 * batch_size %0A%0A
532935a92c576f4737dd1631c38e0ac32dd1f984
Remove hash comments from blacklist --autopull
blacklists.py
blacklists.py
from typing import Union import regex from globalvars import GlobalVars from helpers import log def load_blacklists(): GlobalVars.bad_keywords = Blacklist(Blacklist.KEYWORDS).parse() GlobalVars.blacklisted_websites = Blacklist(Blacklist.WEBSITES).parse() GlobalVars.blacklisted_usernames = Blacklist(Blacklist.USERNAMES).parse() GlobalVars.watched_keywords = Blacklist(Blacklist.WATCHED_KEYWORDS).parse() class BlacklistParser: def __init__(self, filename): self._filename = filename def parse(self): return None def add(self, item): pass def remove(self, item): pass def exists(self, item): pass class BasicListParser(BlacklistParser): def parse(self): with open(self._filename, 'r', encoding='utf-8') as f: return [line.rstrip() for line in f if len(line.rstrip()) > 0] def add(self, item: str): with open(self._filename, 'a+', encoding='utf-8') as f: last_char = f.read()[-1:] if last_char not in ['', '\n']: item = '\n' + item f.write(item + '\n') def remove(self, item: str): with open(self._filename, 'r+', encoding='utf-8') as f: items = f.readlines() items = [x for x in items if item not in x] f.seek(0) f.truncate() f.writelines(items) def exists(self, item: str): with open(self._filename, 'r', encoding='utf-8') as f: lines = f.readlines() for i, x in enumerate(lines): if item in x: return True, i + 1 return False, -1 class TSVDictParser(BlacklistParser): def parse(self): list = {} with open(self._filename, 'r', encoding='utf-8') as f: for lineno, line in enumerate(f, 1): if regex.compile('^\s*(?:#|$)').match(line): continue try: when, by_whom, what = line.rstrip().split('\t') except ValueError as err: log('error', '{0}:{1}:{2}'.format(self._filename, lineno, err)) continue list[what] = {'when': when, 'by': by_whom} return list def add(self, item: Union[str, dict]): with open(self._filename, 'a+', encoding='utf-8') as f: if isinstance(item, dict): item = '{}\t{}\t{}'.format(item[0], item[1], item[2]) last_char = f.read()[-1:] if last_char not in ['', '\n']: item = '\n' + item f.write(item + '\n') def remove(self, item: Union[str, dict]): if isinstance(item, dict): item = item[2] with open(self._filename, 'r+', encoding='utf-8') as f: items = f.readlines() items = [x for x in items if item not in x] f.seek(0) f.truncate() f.writelines(items) def exists(self, item: Union[str, dict]): if isinstance(item, dict): item = item[2] with open(self._filename, 'r', encoding='utf-8') as f: lines = f.readlines() for i, x in enumerate(lines): if item in x: return True, i + 1 return False, -1 class Blacklist: KEYWORDS = ('bad_keywords.txt', BasicListParser) WEBSITES = ('blacklisted_websites.txt', BasicListParser) USERNAMES = ('blacklisted_usernames.txt', BasicListParser) WATCHED_KEYWORDS = ('watched_keywords.txt', TSVDictParser) def __init__(self, type): self._filename = type[0] self._parser = type[1](self._filename) def parse(self): return self._parser.parse() def add(self, item): return self._parser.add(item) def remove(self, item): return self._parser.remove(item) def exists(self, item): return self._parser.exists(item)
Python
0
@@ -874,16 +874,35 @@ p()) %3E 0 + and line%5B0%5D != '#' %5D%0A%0A d
24b3a085471d0ccf048d23ee4f2cef1d5cdf97ec
Fix error message for unexpected operand
tt/expressions/bexpr.py
tt/expressions/bexpr.py
"""Tools for interacting with Boolean expressions.""" import re from ..operators import (CONSTANT_VALUES, DELIMITERS, OPERATOR_MAPPING, TT_NOT_OP) from ..trees import BooleanExpressionTree from .errors import (BadParenPositionError, ExpressionOrderError, UnbalancedParenError) class BooleanExpression(object): """A class for parsing and holding information about a Boolean expression. Attributes: raw_expr (str): The raw string expression, to be parsed upon initialization. symbols (List[str]): The list of unique symbols present in this expression. tokens (List[str]): A list of strings, each element indicating a different token of the parsed expression. postfix_tokens (List[str]): A list of strings, representing the ``tokens`` list converted to postfix form. expr_tree (tt.trees.BooleanExpressionTree): The expression tree representing the expression wrapped in this class, derived from the tokens parsed by this class. """ def __init__(self, raw_expr): self.raw_expr = raw_expr self.symbols = [] self._symbol_set = set() self.tokens = [] self.postfix_tokens = [] self._tokenize() self._to_postfix() self.expr_tree = BooleanExpressionTree(self.postfix_tokens) def _tokenize(self): """Make the first pass through the expression, tokenizing it. This method will populate the ``symbols`` and ``tokens`` attributes, and is the first step in the expression-processing pipeline. """ operator_strs = [k for k in OPERATOR_MAPPING.keys()] is_symbolic = {op: not op[0].isalpha() for op in operator_strs} operator_search_list = sorted(operator_strs, key=len, reverse=True) delimiters = DELIMITERS | set(k[0] for k, v in is_symbolic.items() if v) EXPECTING_OPERAND = 1 EXPECTING_OPERATOR = 2 grammar_state = EXPECTING_OPERAND idx = 0 open_paren_count = 0 num_chars = len(self.raw_expr) while idx < num_chars: c = self.raw_expr[idx].strip() if not c: # do nothing idx += 1 elif c == '(': if grammar_state != EXPECTING_OPERAND: raise BadParenPositionError('Unexpected parenthesis', self.raw_expr, idx) open_paren_count += 1 self.tokens.append(c) idx += 1 elif c == ')': if grammar_state != EXPECTING_OPERATOR: raise BadParenPositionError('Unexpected parenthesis', self.raw_expr, idx) elif not open_paren_count: raise UnbalancedParenError('Unbalanced parenthesis', self.raw_expr, idx) open_paren_count -= 1 self.tokens.append(c) idx += 1 else: is_operator = False num_chars_remaining = num_chars - idx matching_operators = [ operator for operator in operator_search_list if len(operator) <= num_chars_remaining and self.raw_expr[idx:(idx+len(operator))] == operator] if matching_operators: match = matching_operators[0] match_length = len(match) next_c_pos = idx + match_length next_c = (None if next_c_pos >= num_chars else self.raw_expr[idx + match_length]) if next_c is None: # trailing operator raise ExpressionOrderError( 'Unexpected operator "{}"'.format(match), self.raw_expr, idx) if next_c in delimiters or is_symbolic[match]: if OPERATOR_MAPPING[match] == TT_NOT_OP: if grammar_state != EXPECTING_OPERAND: raise ExpressionOrderError( 'Unexpected unary operator "{}"'.format( match), self.raw_expr, idx) else: if grammar_state != EXPECTING_OPERATOR: raise ExpressionOrderError( 'Unexpected binary operator "{}"'.format( match), self.raw_expr, idx) grammar_state = EXPECTING_OPERAND is_operator = True self.tokens.append(match) idx += match_length if not is_operator: if grammar_state != EXPECTING_OPERAND: raise ExpressionOrderError('Unexpected operator', self.raw_expr, idx) operand_end_idx = idx + 1 while (operand_end_idx < num_chars and self.raw_expr[operand_end_idx] not in delimiters): operand_end_idx += 1 operand = self.raw_expr[idx:operand_end_idx] self.tokens.append(operand) if operand not in (self._symbol_set | CONSTANT_VALUES): self.symbols.append(operand) self._symbol_set.add(operand) idx = operand_end_idx grammar_state = EXPECTING_OPERATOR if open_paren_count: left_paren_positions = [m.start() for m in re.finditer(r'\(', self.raw_expr)] raise UnbalancedParenError( 'Unbalanced left parenthesis', self.raw_expr, left_paren_positions[open_paren_count-1]) def _to_postfix(self): """Populate the ``postfix_tokens`` attribute.""" operand_set = self._symbol_set | CONSTANT_VALUES stack = [] for token in self.tokens: if token in operand_set: self.postfix_tokens.append(token) elif token == '(': stack.append(token) elif token in OPERATOR_MAPPING.keys(): if not stack: stack.append(token) else: while (stack and stack[-1] != '(' and OPERATOR_MAPPING[stack[-1]].precedence > OPERATOR_MAPPING[token].precedence): self.postfix_tokens.append(stack.pop()) stack.append(token) elif token == ')': while stack and stack[-1] != '(': self.postfix_tokens.append(stack.pop()) stack.pop() for token in reversed(stack): self.postfix_tokens.append(token)
Python
0.00002
@@ -1652,16 +1652,98 @@ eline.%0A%0A + Raises:%0A GrammarError: If a malformed expression is received.%0A%0A @@ -5247,19 +5247,18 @@ ed opera -tor +nd ',%0A
fc353499d00d9c9c3454de82b3ac2a4d724c485b
Add flatpages contrib to installed apps for tests
conftest.py
conftest.py
import os import logging from django.conf import settings from oscar import OSCAR_MAIN_TEMPLATE_DIR, get_core_apps location = lambda x: os.path.join( os.path.dirname(os.path.realpath(__file__)), x ) sandbox = lambda x: location("sandbox/%s" % x) logging.basicConfig(level=logging.INFO) def pytest_configure(): from oscar.defaults import OSCAR_SETTINGS from oscar_mws.defaults import OSCAR_MWS_SETTINGS DEFAULT_SETTINGS = OSCAR_SETTINGS DEFAULT_SETTINGS.update(OSCAR_MWS_SETTINGS) DEFAULT_SETTINGS['OSCAR_DEFAULT_CURRENCY'] = 'USD' settings.configure( DATABASES={ 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:', } }, USE_TZ=True, MEDIA_ROOT=sandbox('public/media'), MEDIA_URL='/media/', STATIC_URL='/static/', STATICFILES_DIRS=[ sandbox('static/') ], STATIC_ROOT=sandbox('public'), STATICFILES_FINDERS=( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ), TEMPLATE_LOADERS=( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ), TEMPLATE_CONTEXT_PROCESSORS = ( "django.contrib.auth.context_processors.auth", "django.core.context_processors.request", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.static", "django.contrib.messages.context_processors.messages", ), MIDDLEWARE_CLASSES=( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'oscar.apps.basket.middleware.BasketMiddleware', ), ROOT_URLCONF='sandbox.sandbox.urls', TEMPLATE_DIRS=[ sandbox('templates'), OSCAR_MAIN_TEMPLATE_DIR, ], INSTALLED_APPS=[ 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', 'compressor', 'south', 'oscar_mws', ] + get_core_apps(), AUTHENTICATION_BACKENDS=( 'django.contrib.auth.backends.ModelBackend', ), COMPRESS_ENABLED=True, COMPRESS_OFFLINE=False, COMPRESS_PRECOMPILERS=( ('text/less', 'lessc {infile} {outfile}'), ), LOGIN_REDIRECT_URL='/accounts/', APPEND_SLASH=True, SITE_ID=1, HAYSTACK_CONNECTIONS={ 'default': { 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine', }, }, LOGGING={ 'version': 1, 'disable_existing_loggers': True, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'simple', } }, 'loggers': { 'oscar_mws': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': True, }, 'oscar_mws.api': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': True, }, 'django.request': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': True, }, } }, DEBUG=True, **DEFAULT_SETTINGS )
Python
0
@@ -2629,24 +2629,64 @@ aticfiles',%0A + 'django.contrib.flatpages',%0A
d04ded85e01c4a9e0960d57a37ecd83fc92fa5cd
Add a fallback to mini_installer_tests' quit_chrome.py exit logic.
chrome/test/mini_installer/quit_chrome.py
chrome/test/mini_installer/quit_chrome.py
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Quits Chrome. This script sends a WM_CLOSE message to each window of Chrome and waits until the process terminates. """ import optparse import pywintypes import sys import time import win32con import win32gui import winerror import chrome_helper def CloseWindows(process_path): """Closes all windows owned by processes whose path is |process_path|. Args: process_path: The path to the process. Returns: A boolean indicating whether the processes successfully terminate within 30 seconds. """ start_time = time.time() while time.time() - start_time < 30: process_ids = chrome_helper.GetProcessIDs(process_path) if not process_ids: return True for hwnd in chrome_helper.GetWindowHandles(process_ids): try: win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0) except pywintypes.error as error: # It's normal that some window handles have become invalid. if error.args[0] != winerror.ERROR_INVALID_WINDOW_HANDLE: raise time.sleep(0.1) return False def main(): usage = 'usage: %prog chrome_path' parser = optparse.OptionParser(usage, description='Quit Chrome.') _, args = parser.parse_args() if len(args) != 1: parser.error('Incorrect number of arguments.') chrome_path = args[0] if not CloseWindows(chrome_path): raise Exception('Could not quit Chrome.') return 0 if __name__ == '__main__': sys.exit(main())
Python
0.003252
@@ -297,16 +297,26 @@ ptparse%0A +import os%0A import p @@ -501,16 +501,20 @@ s whose +exe path is @@ -572,23 +572,81 @@ to the -process +executable whose processes will have their%0A windows closed .%0A%0A Ret @@ -720,16 +720,17 @@ erminate +d within%0A @@ -733,18 +733,18 @@ hin%0A -30 +25 seconds @@ -817,10 +817,10 @@ e %3C -30 +25 :%0A @@ -1279,16 +1279,358 @@ False%0A%0A%0A +def KillNamedProcess(process_path):%0A %22%22%22 Kills all running exes with the same name as the exe at %7Cprocess_path%7C.%0A%0A Args:%0A process_path: The path to an executable.%0A%0A Returns:%0A True if running executables were successfully killed. False otherwise.%0A %22%22%22%0A return os.system('taskkill /f /im %25s' %25 os.path.basename(process_path)) == 0%0A%0A%0A def main @@ -1911,49 +1911,200 @@ -raise Exception('Could not quit Chrome.') +# TODO(robertshield): Investigate why Chrome occasionally doesn't shut down.%0A print 'Warning: Chrome not responding to window closure. Killing process...'%0A KillNamedProcess(chrome_path): %0A r
eba39b722d6d025ec351beeb35e7dadd55ef82f5
correctly treat hashes as little endian
blockchain.py
blockchain.py
#!/usr/bin/env python3 import binascii import datetime class BlockChain: def __init__(self, data, handler=None): self.data = data self.handler = handler self.index = 0 self.block_count = 0 while self.index < len(self.data): self.parse_block() self.block_count += 1 def get_byte(self): data = self.data[self.index] self.index += 1 return data def get_bytes(self, length=1): data = self.data[self.index:self.index + length] self.index += length return data def get_uint16(self): return self.get_byte() + (self.get_byte() << 8) def get_uint32(self): return self.get_uint16() + (self.get_uint16() << 16) def get_uint64(self): return self.get_uint32() + (self.get_uint32() << 32) def get_timestamp(self): return datetime.datetime.fromtimestamp(self.get_uint32()) def get_hash(self): return self.get_bytes(32) def get_varlen_int(self): code = self.get_byte() if code < 0xFD: return code elif code == 0xFD: return self.get_uint16() elif code == 0xFE: return self.get_uint32() elif code == 0xFF: return self.get_uint64() def parse_block(self): magic_network_id = self.get_uint32() block_length = self.get_uint32() block_format_version = self.get_uint32() hash_of_previous_block = self.get_hash() merkle_root = self.get_hash() timestamp = self.get_timestamp() bits = self.get_uint32() nonce = self.get_uint32() transaction_count = self.get_varlen_int() for i in range(transaction_count): self.parse_transaction() print("{} prev_block_hash={} timestamp={} nonce={}".format(self.block_count, binascii.hexlify(hash_of_previous_block), timestamp, nonce)) def parse_transaction(self): version_number = self.get_uint32() input_count = self.get_varlen_int() for i in range(input_count): self.parse_input() output_count = self.get_varlen_int() for i in range(output_count): self.parse_output() transaction_lock_time = self.get_uint32() def parse_input(self): transaction_hash = self.get_hash() transaction_index = self.get_uint32() script_length = self.get_varlen_int() script = self.get_bytes(script_length) sequence_number = self.get_uint32() def parse_output(self): value = self.get_uint64() script_length = self.get_varlen_int() script = self.get_bytes(script_length) if __name__ == "__main__": import sys filename = sys.argv[1] with open(filename, "rb") as f: data = f.read() BlockChain(data)
Python
0.998601
@@ -569,32 +569,38 @@ return data +%5B::-1%5D %0A%0A def get_ui
202d0a199a59a0e8ca5651785aa4497b1e0047e7
Add default implementation of Language.validate_code
importkit/meta.py
importkit/meta.py
## # Copyright (c) 2008-2012 Sprymix Inc. # All rights reserved. # # See LICENSE for details. ## import functools import os from semantix import exceptions as sx_errors from .loader import LanguageSourceFileLoader from .import_ import finder class LanguageMeta(type): languages = [] def __new__(cls, name, bases, dct, *, register=True): lang = super(LanguageMeta, cls).__new__(cls, name, bases, dct) if register: LanguageMeta.languages.append(lang) finder.update_finders() return lang def __init__(cls, name, bases, dct, *, register=True): super().__init__(name, bases, dct) @staticmethod def recognize_file(filename, try_append_extension=False, is_package=False): result = None for lang in LanguageMeta.languages: file_ = lang.recognize_file(filename, try_append_extension, is_package) if file_: if result is not None: raise ImportError('ambiguous module import: %s, languages in conflict: %s' % \ (filename, (lang, result[0]))) result = (lang, file_) return result def get_loader(cls): return cls.loader @classmethod def get_loaders(cls): for lang in LanguageMeta.languages: yield (functools.partial(lang.loader, language=lang), ['.' + ext for ext in lang.file_extensions]) class Language(object, metaclass=LanguageMeta, register=False): loader = LanguageSourceFileLoader file_extensions = () proxy_module_cls = None @classmethod def recognize_file(cls, filename, try_append_extension=False, is_package=False): if is_package: filename = os.path.join(filename, '__init__') if try_append_extension: for ext in cls.file_extensions: if os.path.exists(filename + '.' + ext): return filename + '.' + ext elif os.path.exists(filename): for ext in cls.file_extensions: if filename.endswith('.' + ext): return filename @classmethod def load_code(cls, stream, context): raise NotImplementedError @classmethod def execute_code(cls, code, context): raise NotImplementedError class ObjectError(Exception): def __init__(self, msg, context=None, code=None, note=None): self.msg = msg self.context = context self.code = code self.note = note def __str__(self): return self.msg class Object: def __sx_setstate__(self, data): pass class LanguageError(sx_errors.SemantixError): pass
Python
0
@@ -2345,16 +2345,81 @@ dError%0A%0A + @classmethod%0A def validate_code(cls, code):%0A pass%0A%0A %0Aclass O
417c46221fbcecf22ffa7c27d6e1ade76dea74df
disable caching for replacement frontend. a) the underlying frontend will cache and b) this cache is going to be very unreliable
claripy/frontends/replacement_frontend.py
claripy/frontends/replacement_frontend.py
#!/usr/bin/env python import weakref import logging l = logging.getLogger("claripy.frontends.full_frontend") from .constrained_frontend import ConstrainedFrontend class ReplacementFrontend(ConstrainedFrontend): def __init__(self, actual_frontend, allow_symbolic=None, replacements=None, replacement_cache=None, auto_replace=None, **kwargs): ConstrainedFrontend.__init__(self, **kwargs) self._actual_frontend = actual_frontend self._allow_symbolic = True if allow_symbolic is None else allow_symbolic self._auto_replace = True if auto_replace is None else auto_replace self._replacements = { } if replacements is None else replacements self._replacement_cache = weakref.WeakKeyDictionary() if replacement_cache is None else replacement_cache def add_replacement(self, old, new, invalidate_cache=False, replace=False): if not isinstance(old, Base): return if not replace and old in self._replacements: return if not isinstance(new, Base): if not isinstance(new, (int, long)): return new = BVV(new, old.length) if invalidate_cache: self._replacements = dict(self._replacements) self._replacement_cache = weakref.WeakKeyDictionary(self._replacement_cache) self._actual_frontend = self._actual_frontend.branch() self._replacements[old.cache_key] = new self._replacement_cache[old.cache_key] = new def _replacement(self, old): if not isinstance(old, Base): return old if old.cache_key in self._replacement_cache: return self._replacement_cache[old.cache_key] else: new = old.replace_dict(self._replacement_cache) self._replacement_cache[old.cache_key] = new return new def _add_solve_result(self, e, er, r): if not self._auto_replace: return if not isinstance(e, Base) or not e.symbolic: return if er.symbolic: return self.add_replacement(e, r) # # Storable support # def _blank_copy(self): s = ReplacementFrontend(self._actual_frontend._blank_copy()) s._auto_replace = self._auto_replace s._allow_symbolic = self._allow_symbolic return s def branch(self): s = ConstrainedFrontend.branch(self) s._action_frontend = self._actual_frontend.branch() s._replacements = self._replacements s._replacement_cache = self._replacement_cache return s def downsize(self): self._actual_frontend.downsize() self._replacement_cache.clear() def _ana_getstate(self): return self._replacements, self._actual_frontend, ConstrainedFrontend._ana_getstate(self) def _ana_setstate(self, s): self._replacements, self._actual_frontend, base_state = s ConstrainedFrontend._ana_setstate(base_state) self._replacement_cache = weakref.WeakKeyDictionary() # # Replacement solving # def _replace_list(self, lst): return tuple(self._replacement(c) for c in lst) def solve(self, extra_constraints=(), exact=None, cache=None): ecr = self._replace_list(extra_constraints) return self._actual_frontend.solve(extra_constraints=ecr, exact=exact, cache=cache) def eval(self, e, n, extra_constraints=(), exact=None, cache=None): er = self._replacement(e) ecr = self._replace_list(extra_constraints) r = self._actual_frontend.eval(er, n, extra_constraints=ecr, exact=exact, cache=cache) self._add_solve_result(e, er, r[0]) return r def max(self, e, extra_constraints=(), exact=None, cache=None): er = self._replacement(e) ecr = self._replace_list(extra_constraints) r = self._actual_frontend.max(er, extra_constraints=ecr, exact=exact, cache=cache) self._add_solve_result(e, er, r) return r def min(self, e, extra_constraints=(), exact=None, cache=None): er = self._replacement(e) ecr = self._replace_list(extra_constraints) r = self._actual_frontend.min(er, extra_constraints=ecr, exact=exact, cache=cache) self._add_solve_result(e, er, r) return r def solution(self, e, v, extra_constraints=(), exact=None, cache=None): er = self._replacement(e) vr = self._replacement(v) ecr = self._replace_list(extra_constraints) r = self._actual_frontend.solution(er, vr, extra_constraints=ecr, exact=exact, cache=cache) if r and (not isinstance(vr, Base) or not vr.symbolic): self._add_solve_result(e, er, vr) return r def is_true(self, e, extra_constraints=(), exact=None, cache=None): er = self._replacement(e) ecr = self._replace_list(extra_constraints) return self._actual_frontend.is_true(er, extra_constraints=ecr, exact=exact, cache=cache) def is_false(self, e, extra_constraints=(), exact=None, cache=None): er = self._replacement(e) ecr = self._replace_list(extra_constraints) return self._actual_frontend.is_false(er, extra_constraints=ecr, exact=exact, cache=cache) def add(self, constraints, **kwargs): for c in constraints: if self._auto_replace and isinstance(c, Base) and c.op == '__eq__' and isinstance(c.args[0], Base) and isinstance(c.args[1], Base): if c.args[0].symbolic and not c.args[1].symbolic and c.args[0].cache_key not in self._replacements and c.args[0].cache_key not in self._replacement_cache: self.add_replacement(c.args[0], c.args[1], invalidate_cache=True) elif not c.args[0].symbolic and c.args[1].symbolic and c.args[1].cache_key not in self._replacements and c.args[1].cache_key not in self._replacement_cache: self.add_replacement(c.args[1], c.args[0], invalidate_cache=True) ConstrainedFrontend.add(self, constraints, **kwargs) cr = self._replace_list(constraints) if not self._allow_symbolic and any(c.symbolic for c in cr): raise ClaripyFrontendError("symbolic constraints made it into ReplacementFrontend with allow_symbolic=False") return self._actual_frontend.add(cr, **kwargs) #def _add_constraints(self, *args, **kwargs): #pylint:disable=unused-argument # raise Exception("this should not be called") def _solve(self, *args, **kwargs): #pylint:disable=unused-argument raise Exception("this should not be called") def _eval(self, *args, **kwargs): #pylint:disable=unused-argument raise Exception("this should not be called") def _max(self, *args, **kwargs): #pylint:disable=unused-argument raise Exception("this should not be called") def _min(self, *args, **kwargs): #pylint:disable=unused-argument raise Exception("this should not be called") def _solution(self, *args, **kwargs): #pylint:disable=unused-argument raise Exception("this should not be called") def _is_true(self, *args, **kwargs): #pylint:disable=unused-argument raise Exception("this should not be called") def _is_false(self, *args, **kwargs): #pylint:disable=unused-argument raise Exception("this should not be called") from ..ast.base import Base from ..ast.bv import BVV from ..errors import ClaripyFrontendError
Python
0.00006
@@ -333,32 +333,85 @@ one, **kwargs):%0A + kwargs%5B'cache'%5D = kwargs.get('cache', False)%0A Constrai
bde5f87e92227734d86d8caf5fcafcbf81b3a272
Add rename table
tvnamer/gui/__init__.py
tvnamer/gui/__init__.py
import sys from PySide import QtCore, QtGui from ..renamer import Renamer class VideoFileFolderDropTarget(QtGui.QLabel, QtCore.QObject): dropped = QtCore.Signal(str) def __init__(self, parent=None): super().__init__(parent) self.setAcceptDrops(True) self.setStyleSheet("background: white;") self.setAlignment(QtCore.Qt.AlignCenter) self.setPixmap("tvnamer/gui/resources/drop_target.svg") def dragEnterEvent(self, e): if e.mimeData().hasUrls() and len(e.mimeData().urls()) == 1 and \ all(map(QtCore.QUrl.isLocalFile, e.mimeData().urls())): e.accept() else: e.ignore() def dropEvent(self, e): url = e.mimeData().urls()[0] self.dropped.emit(url.path()) class SetUpRenamerDialogue(QtGui.QDialog): def __init__(self, directory, api_key, parent=None): super().__init__(parent) self.setWindowTitle(directory) self.directory = directory self.api_key = api_key self.input_regex_text = QtGui.QLineEdit() self.input_regex_text.setPlaceholderText("Input Regular Expression") self.output_format_text = QtGui.QLineEdit() self.output_format_text.setPlaceholderText("Output Format") self.extras_form = QtGui.QWidget() self.series_name_text = QtGui.QLineEdit() self.season_number_text = QtGui.QSpinBox() self.season_number_text.setMinimum(0) self.episode_number_text = QtGui.QSpinBox() self.episode_number_text.setMinimum(0) self.episode_name_text = QtGui.QLineEdit() self.extras_form_layout = QtGui.QFormLayout(self) self.extras_form_layout.addRow("Series Name", self.series_name_text) self.extras_form_layout.addRow("Season Number", self.season_number_text) self.extras_form_layout.addRow("Episode Number", self.episode_number_text) self.extras_form_layout.addRow("Episode Name", self.episode_name_text) self.extras_form.setLayout(self.extras_form_layout) self.button_box = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok|QtGui.QDialogButtonBox.Cancel, QtCore.Qt.Horizontal, self) self.button_box.accepted.connect(self.accept) self.button_box.rejected.connect(self.reject) layout = QtGui.QVBoxLayout() layout.addWidget(self.input_regex_text) layout.addWidget(self.output_format_text) layout.addWidget(self.extras_form) layout.addWidget(self.button_box) self.setLayout(layout) @staticmethod def create_left_column(text): item = QtGui.QTableWidgetItem(text) flags = QtCore.Qt.ItemFlags() flags != QtCore.Qt.ItemIsEnabled item.setFlags(flags) return item @property def renamer(self): input_regex = self.input_regex_text.text() output_format = self.output_format_text.text() default_params = { "series_name": self.series_name_text.text(), "season_number": self.season_number_text.value(), "episode_number": self.episode_number_text.value(), "episode_name": self.episode_name_text.text(), } return Renamer(self.api_key, self.directory, input_regex, output_format, default_params) class MainWindow(QtGui.QMainWindow): def __init__(self): super().__init__() self.setWindowTitle("TV Namer") self.setMinimumSize(500, 400) self.init_menu_bar(self.menuBar()) self.drop_target = VideoFileFolderDropTarget(self) self.drop_target.dropped.connect(self.on_drop_target_dropped) self.setCentralWidget(self.drop_target) self.status_bar = QtGui.QStatusBar(self) self.setStatusBar(self.status_bar) self.show() self.settings = QtCore.QSettings() if not self.settings.contains("api_key"): self.set_api_key() def set_api_key(self): api_key, ok = QtGui.QInputDialog.getText(self, "Enter API key", "API key:", QtGui.QLineEdit.Normal, self.settings.value("api_key")) if ok: self.settings.setValue("api_key", api_key) def init_menu_bar(self, menu_bar=None): tools_menu = menu_bar.addMenu("Tools") api_key_action = QtGui.QAction("Set API Key", self) api_key_action.activated.connect(self.set_api_key) tools_menu.addAction(api_key_action) @QtCore.Slot(str) def on_drop_target_dropped(self, path): dialogue = SetUpRenamerDialogue(path, self.settings.value("api_key")) if dialogue.exec_() == QtGui.QDialog.DialogCode.Accepted: renamer = dialogue.renamer print(list(renamer.table)) def main(): app = QtGui.QApplication(sys.argv) app.setOrganizationName("Tom Leese") app.setOrganizationDomain("tomleese.me.uk") app.setApplicationName("TV Namer") window = MainWindow() sys.exit(app.exec_())
Python
0.000001
@@ -3191,16 +3191,119 @@ %7D%0A%0A + # remove empty values%0A default_params = %7Bk: v for k, v in default_params.items() if v%7D%0A%0A @@ -3412,24 +3412,321 @@ t_params)%0A%0A%0A +class RenameTable(QtGui.QListWidget):%0A def __init__(self, parent=None):%0A super().__init__(parent)%0A%0A def set_table(self, table):%0A self.clear()%0A print(table)%0A for old, new in table:%0A print(old, new)%0A self.addItem(%22%7B%7D %E2%86%A6 %7B%7D%22.format(old, new))%0A%0A%0A class MainWi @@ -4071,38 +4071,316 @@ elf. -setCentralWidget(self.drop_tar +rename_table = RenameTable(self)%0A self.stacked_widget = QtGui.QStackedWidget()%0A self.stacked_widget.addWidget(self.drop_target)%0A self.stacked_widget.addWidget(self.rename_table)%0A self.stacked_widget.setCurrentWidget(self.drop_target)%0A self.setCentralWidget(self.stacked_wid get) @@ -5523,33 +5523,117 @@ -print(list(renamer.table) +self.rename_table.set_table(renamer.table)%0A self.stacked_widget.setCurrentWidget(self.rename_table )%0A%0A%0A
2d55d95c623bef4848131878061887854ff8a971
Update utils.py
deeplab_resnet/utils.py
deeplab_resnet/utils.py
from PIL import Image import numpy as np # colour map label_colours = [(0,0,0) # 0=background ,(128,0,0),(0,128,0),(128,128,0),(0,0,128),(128,0,128) # 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle ,(0,128,128),(128,128,128),(64,0,0),(192,0,0),(64,128,0) # 6=bus, 7=car, 8=cat, 9=chair, 10=cow ,(192,128,0),(64,0,128),(192,0,128),(64,128,128),(192,128,128) # 11=diningtable, 12=dog, 13=horse, 14=motorbike, 15=person ,(0,64,0),(128,64,0),(0,192,0),(128,192,0),(0,64,128)] # 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor def decode_labels(mask): """Decode batch of segmentation masks. Args: label_batch: result of inference after taking argmax. Returns: An batch of RGB images of the same size """ img = Image.new('RGB', (len(mask[0]), len(mask))) pixels = img.load() for j_, j in enumerate(mask): for k_, k in enumerate(j): if k < 21: pixels[k_,j_] = label_colours[k] return np.array(img) def prepare_label(input_batch, new_size): """Resize masks and perform one-hot encoding. Args: input_batch: input tensor of shape [batch_size H W 1]. new_size: a tensor with new height and width. Returns: Outputs a tensor of shape [batch_size h w 21] with last dimension comprised of 0's and 1's only. """ with tf.name_scope('label_encode'): input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp. input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension. input_batch = tf.one_hot(input_batch, depth=n_classes) return input_batch
Python
0.000001
@@ -34,16 +34,55 @@ y as np%0A +import tensorflow as tf%0A%0An_classes = 21 %0A# colou @@ -1099,18 +1099,25 @@ if k %3C -21 +n_classes :%0A
a62b5955d9801f25736c42545191ff5a76a2e5b1
Refactor UserFactory and add CommentFactory
blog/tests.py
blog/tests.py
from django.test import TestCase from .models import BlogPost from django.contrib.auth.models import User class UserFactory(object): def create(self): user = User.objects.create_user(username = "user001", email = "email@domain.com", password = "password123456") return user class BlogPostFactory(object): def create(self, save=False): blogpost = BlogPost() blogpost.user = UserFactory().create() blogpost.title = "Title Test" blogpost.text = "Lorem ipsum tarapia tapioco..." if save==True: blogpost.save() return blogpost class BlogTest(TestCase): def setUp(self): pass def test_post_creation(self): blogpost = BlogPostFactory().create(True) self.assertTrue(blogpost.id > 0, "BlogPost created correctly") def test_post_update(self): blogpost = BlogPostFactory().create(True) self.assertTrue(blogpost.id > 0, "BlogPost created correctly") blogpost.title = "Title Test - modified" blogpost.save() blogpost_id = blogpost.id blogpost_saved = BlogPost.objects.get(id = blogpost_id) self.assertEquals(blogpost_saved.title, blogpost.title, "BlogPost updated correctly") def test_post_delete(self): blogpost = BlogPostFactory().create(True) blogpost_id = blogpost.id blogpost.delete() blogpost_saved = BlogPost.objects.filter(id = blogpost_id) self.assertEqual(blogpost_saved.count(), 0, "BlogPost deleted correctly")
Python
0
@@ -54,17 +54,26 @@ BlogPost +, Comment %0A - from dja @@ -147,32 +147,105 @@ def create(self +, username=%22user001%22, email=%22email@domain.com%22, password=%22password123456%22 ):%0A user @@ -282,25 +282,24 @@ rname = -%22 user -001%22 +name , email @@ -300,34 +300,21 @@ email = -%22 email -@domain.com%22 , passwo @@ -318,25 +318,24 @@ sword = -%22 password 123456%22) @@ -326,23 +326,16 @@ password -123456%22 )%0A @@ -639,16 +639,16 @@ save()%0A%0A - @@ -664,16 +664,371 @@ ogpost%0A%0A +class CommentFactory(object):%0A def create(self, blogpost, text=%22Test comment%22, save=False):%0A comment = Comment()%0A comment.post = blogpost%0A comment.user = UserFactory().create(%22user002%22, %22email002@domain.com%22, %22password123456%22)%0A comment.text = text%0A%0A if save==True:%0A comment.save()%0A%0A return comment%0A%0A class Bl
420d104d9e674b96363db5c986ea9eea4d411c92
Add updated template settings to conftests
conftest.py
conftest.py
""" Configuration file for py.test """ import django def pytest_configure(): from django.conf import settings settings.configure( DEBUG=True, USE_TZ=True, DATABASES={ "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": "test.sqlite3", } }, INSTALLED_APPS=[ "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sites", # The ordering here, the apps using the organization base models # first and *then* the organizations app itself is an implicit test # that the organizations app need not be installed in order to use # its base models. "test_accounts", "test_vendors", "organizations", "test_custom", ], MIDDLEWARE_CLASSES=[], SITE_ID=1, FIXTURE_DIRS=['tests/fixtures'], ORGS_SLUGFIELD='autoslug.AutoSlugField', ROOT_URLCONF="tests.urls", ) django.setup()
Python
0
@@ -1042,24 +1042,197 @@ ests.urls%22,%0A + TEMPLATES = %5B%0A %7B%0A 'BACKEND': 'django.template.backends.django.DjangoTemplates',%0A 'APP_DIRS': True,%0A %7D,%0A %5D%0A )%0A dj
8d6287397b47fcaf98cadc59349f1db68c7b2d93
Update 1.4_replace_whitespace.py
CrackingCodingInterview/1.4_replace_whitespace.py
CrackingCodingInterview/1.4_replace_whitespace.py
""" Replace all whitespace in a string with '%20' """
Python
0.000001
@@ -48,8 +48,100 @@ 0'%0A%22%22%22%0A%0A +def replace(string):%0A for i in string:%0A string.replace(%22%22, %2520)%0A return string%0A
8a1b902b729597f5c8536b235d7add887f097fdd
Drop box should be off by default. SSL should be on by default, HTTP should be off.
twistedcaldav/config.py
twistedcaldav/config.py
## # Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # DRI: David Reid, dreid@apple.com ## import os from twistedcaldav.py.plistlib import readPlist defaultConfigFile = '/etc/caldavd/caldavd.plist' defaultConfig = { 'CreateAccounts': False, 'DirectoryService': { 'params': {'node': '/Search'}, 'type': 'twistedcaldav.directory.appleopendirectory.OpenDirectoryService' }, 'DocumentRoot': '/Library/CalendarServer/Documents', 'DropBoxEnabled': True, 'ErrorLogFile': '/var/log/caldavd/error.log', 'ManholePort': 0, 'MaximumAttachmentSizeBytes': 1048576, 'NotificationsEnabled': False, 'PIDFile': '/var/run/caldavd.pid', 'Port': 8008, 'ResetAccountACLs': False, 'RunStandalone': True, 'SSLCertificate': '/etc/certificates/Default.crt', 'SSLEnable': False, 'SSLOnly': False, 'SSLPort': 8443, 'SSLPrivateKey': '/etc/certificates/Default.key', 'ServerLogFile': '/var/log/caldavd/server.log', 'ServerStatsFile': '/Library/CalendarServer/Documents/stats.plist', 'UserQuotaBytes': 104857600, 'Verbose': False, 'twistdLocation': '/usr/share/caldavd/bin/twistd', 'SACLEnable': False, 'AuthSchemes': ['Basic'], 'AdminPrincipals': ['/principal/users/admin'] } class Config (object): def __init__(self, defaults): self.update(defaults) def update(self, items): items = items.iteritems() for key, value in items: setattr(self, key, value) config = Config(defaultConfig) def parseConfig(configFile): if os.path.exists(configFile): plist = readPlist(configFile) config.update(plist)
Python
0.000025
@@ -1037,19 +1037,20 @@ abled': -Tru +Fals e,%0A ' @@ -1373,36 +1373,35 @@ 'SSLEnable': -Fals +Tru e,%0A 'SSLOnly' @@ -1402,20 +1402,19 @@ LOnly': -Fals +Tru e,%0A '
3e8d6e31f576fb857a1415c85a227f56225b8f06
fix database path
blogconfig.py
blogconfig.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # 博客名和简介 blogname = "I'm SErHo" blogdesc = "SErHo's Blog, Please Call me Serho Liu." blogcover = "//dn-serho.qbox.me/blogbg.jpg" # Picky 目录和数据库 picky = "/home/serho/website/picky" database = "//home/serho/website/newblog.db" # 其他设置 # disqus = "serho" # secret = "use random" debug = False
Python
0.000002
@@ -236,17 +236,16 @@ ase = %22/ -/ home/ser
1630bb891bf57052984301b9dd191826ca7ba18e
Update test_biobambam.py
tests/test_biobambam.py
tests/test_biobambam.py
""" .. Copyright 2017 EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os.path import time import pytest # pylint: disable=unused-import from tool import biobambam_filter def test_biobambam(): """ Test case to ensure that BioBamBam works """ bbb = biobambam_filter.biobambam() resource_path = os.path.join(os.path.dirname(__file__), "data/") bbb.run( [resource_path + "macs2.Human.DRR000150.22.bam"], [] ) print "Start : %s" % time.ctime() time.sleep (10) print "End : %s" % time.ctime() assert os.path.isfile(resource_path + "macs2.Human.DRR000150.22.filtered.bam") is True assert os.path.getsize(resource_path + "macs2.Human.DRR000150.22.filtered.bam") > 0
Python
0.000002
@@ -1185,16 +1185,151 @@ is True%0A + testFile = open(resource_path + %22macs2.Human.DRR000150.22.filtered.bam%22)%0A print (%22read line: %22)%0A print (testFile.readline())%0A asse
6ab7f0144ab7115da967d00db098d06db018780f
version bump 0.1.20beta
tx_highered/__init__.py
tx_highered/__init__.py
__version__ = "0.1.19beta"
Python
0
@@ -16,10 +16,10 @@ 0.1. -19 +20 beta
1e3e6ea6c24e275a5a08f096968ae14aab2dfd22
Support custom schema classes.
muffin_rest/peewee.py
muffin_rest/peewee.py
"""Support Muffin-Peewee.""" from muffin_rest import RESTHandler, RESTNotFound, Filter, Filters, RESTOptions try: from marshmallow_peewee import ModelSchema except ImportError: import logging logging.error('Marshmallow-Peewee should be installed to use the integration.') raise class PWFilter(Filter): """Filter Peewee Queryset.""" operators = Filter.operators operators['$in'] = lambda f, v: f << v operators['$none'] = lambda f, v: f >> v operators['$like'] = lambda f, v: f % v operators['$contains'] = lambda f, v: f.contains(v) operators['$starts'] = lambda f, v: f.startswith(v) operators['$ends'] = lambda f, v: f.endswith(v) operators['$between'] = lambda f, v: f.between(*v) operators['$regexp'] = lambda f, v: f.regexp(v) list_ops = Filter.list_ops + ('$between',) def __init__(self, name, mfield=None, **kwargs): self.mfield = mfield return super(PWFilter, self).__init__(name, **kwargs) def apply(self, collection, ops, resource=None, **kwargs): """Filter given collection.""" mfield = self.mfield or resource.meta.model._meta.fields.get(self.field.attribute) if mfield: collection = collection.where(*[op(mfield, val) for op, val in ops]) return collection class PWFilters(Filters): """Bind filter class.""" FILTER_CLASS = PWFilter class PWRESTOptions(RESTOptions): """Generate schema and name.""" def __init__(self, cls, name=None, **params): """Initialize options.""" super(PWRESTOptions, self).__init__(cls, **params) cls.name = name or self.model and self.model._meta.db_table or cls.name if not self.model: return None self.model_pk = self.model_pk or self.model._meta.primary_key if not cls.Schema: meta = type('Meta', (object,), dict({'model': self.model}, **self.schema_meta)) cls.Schema = type( cls.name.title() + 'Schema', (ModelSchema,), dict({'Meta': meta}, **self.schema)) # Resetup filters if getattr(self.meta, 'filters', None): self.filters = self.filters_converter(*self.meta.filters, handler=cls) class PWRESTHandler(RESTHandler): """Support REST for Peewee.""" OPTIONS_CLASS = PWRESTOptions class Meta: """Peewee options.""" filters_converter = PWFilters model = None model_pk = None schema = {} def get_many(self, request, **kwargs): """Get collection.""" return self.meta.model.select() def get_one(self, request, **kwargs): """Load a resource.""" resource = request.match_info.get(self.name) if not resource: return None try: return self.collection.where(self.meta.model_pk == resource).get() except Exception: raise RESTNotFound(reason='Resource not found.') def sort(self, *sorting, **kwargs): """Sort resources.""" sorting_ = [] for name, desc in sorting: field = self.meta.model._meta.fields.get(name) if field is None: continue if desc: field = field.desc() sorting_.append(field) if sorting_: return self.collection.order_by(*sorting_) return self.collection def paginate(self, request, offset=0, limit=None): """Paginate queryset.""" return self.collection.offset(offset).limit(limit), self.collection.count() def get_schema(self, request, resource=None, **kwargs): """Initialize schema.""" return self.Schema(instance=resource) def save(self, request, resource=None, **kwargs): """Create a resource.""" resource.save() return resource def delete(self, request, resource=None, **kwargs): """Delete a resource.""" if resource is None: raise RESTNotFound(reason='Resource not found') resource.delete_instance()
Python
0
@@ -2008,22 +2008,42 @@ ', ( -ModelSchema,), +self.schema_cls,),%0A dic @@ -2494,16 +2494,49 @@ ema = %7B%7D +%0A schema_cls = ModelSchema %0A%0A de
331057ecf72a6f0945c0dd5b2af49eb6b3ec299f
update consume function to not use TODAY constant
consumer.py
consumer.py
# Consumer for Texas State University from __future__ import unicode_literals import os import time from dateutil.parser import parse from datetime import date, timedelta, datetime import requests from lxml import etree from nameparser import HumanName from scrapi.linter import lint from scrapi.linter.document import RawDocument, NormalizedDocument NAME = 'texasstate' TODAY = date.today() NAMESPACES = {'dc': 'http://purl.org/dc/elements/1.1/', 'oai_dc': 'http://www.openarchives.org/OAI/2.0/', 'ns0': 'http://www.openarchives.org/OAI/2.0/'} DEFAULT = datetime(1970, 01, 01) DEFAULT_ENCODING = 'UTF-8' record_encoding = None OAI_DC_BASE_URL = 'http://digital.library.txstate.edu/oai/request?verb=ListRecords' def copy_to_unicode(element): encoding = record_encoding or DEFAULT_ENCODING element = ''.join(element) if isinstance(element, unicode): return element else: return unicode(element, encoding=encoding) def consume(days_back=10): start_date = str(date.today() - timedelta(days_back)) start_date = TODAY - timedelta(days_back) # YYYY-MM-DD hh:mm:ss url = OAI_DC_BASE_URL + '&metadataPrefix=oai_dc&from=' + str(start_date) + ' 00:00:00' records = get_records(url) record_encoding = requests.get(url).encoding xml_list = [] for record in records: doc_id = record.xpath( 'ns0:header/ns0:identifier', namespaces=NAMESPACES)[0].text record = etree.tostring(record, encoding=record_encoding) xml_list.append(RawDocument({ 'doc': record, 'source': NAME, 'docID': copy_to_unicode(doc_id), 'filetype': 'xml' })) return xml_list def get_records(url): print(url) data = requests.get(url) doc = etree.XML(data.content) records = doc.xpath('//ns0:record', namespaces=NAMESPACES) token = doc.xpath('//ns0:resumptionToken/node()', namespaces=NAMESPACES) if len(token) == 1: time.sleep(0.5) base_url = OAI_DC_BASE_URL + '&resumptionToken=' url = base_url + token[0] records += get_records(url) return records def getcontributors(result): contributors = result.xpath( '//dc:contributor/node()', namespaces=NAMESPACES) or [''] creators = result.xpath( '//dc:creator/node()', namespaces=NAMESPACES) or [''] all_contributors = contributors + creators contributor_list = [] for person in all_contributors: name = HumanName(person) contributor = { 'prefix': name.title, 'given': name.first, 'middle': name.middle, 'family': name.last, 'suffix': name.suffix, 'email': '', 'ORCID': '' } contributor_list.append(contributor) return contributor_list def gettags(result): tags = result.xpath('//dc:subject/node()', namespaces=NAMESPACES) or [] return [copy_to_unicode(tag.lower()) for tag in tags] def get_ids(result, doc): serviceID = doc.get('docID') identifiers = result.xpath('//dc:identifier/node()', namespaces=NAMESPACES) url = '' doi = '' for item in identifiers: if 'digital.library.txstate.edu' in item or 'hdl.handle.net' in item: url = item if 'doi' in item or 'DOI' in item: doi = item doi = doi.replace('doi:', '') doi = doi.replace('DOI:', '') doi = doi.replace('http://dx.doi.org/', '') doi = doi.strip(' ') return {'serviceID': serviceID, 'url': copy_to_unicode(url), 'doi': copy_to_unicode(doi)} def get_properties(result): result_type = (result.xpath('//dc:type/node()', namespaces=NAMESPACES) or [''])[0] rights = result.xpath('//dc:rights/node()', namespaces=NAMESPACES) or [''] if len(rights) > 1: copyright = ' '.join(rights) else: copyright = rights publisher = (result.xpath('//dc:publisher/node()', namespaces=NAMESPACES) or [''])[0] relation = (result.xpath('//dc:relation/node()', namespaces=NAMESPACES) or [''])[0] language = (result.xpath('//dc:language/node()', namespaces=NAMESPACES) or [''])[0] dates = result.xpath('//dc:date/node()', namespaces=NAMESPACES) or [''] set_spec = result.xpath('ns0:header/ns0:setSpec/node()', namespaces=NAMESPACES)[0] props = { 'type': copy_to_unicode(result_type), 'dates': copy_to_unicode(dates), 'language': copy_to_unicode(language), 'relation': copy_to_unicode(relation), 'publisherInfo': { 'publisher': copy_to_unicode(publisher), }, 'permissions': { 'copyrightStatement': copy_to_unicode(copyright), } } return props def get_date_created(result): dates = (result.xpath('//dc:date/node()', namespaces=NAMESPACES) or ['']) date = copy_to_unicode(dates[0]) return date def get_date_updated(result): dateupdated = result.xpath('//ns0:header/ns0:datestamp/node()', namespaces=NAMESPACES)[0] date_updated = parse(dateupdated).isoformat() return copy_to_unicode(date_updated) def normalize(raw_doc): result = raw_doc.get('doc') try: result = etree.XML(result) except etree.XMLSyntaxError: print "Error in namespaces! Skipping this one..." return None with open(os.path.join(os.path.dirname(__file__), 'series_names.txt')) as series_names: series_name_list = [word.replace('\n', '') for word in series_names] set_spec = result.xpath('ns0:header/ns0:setSpec/node()', namespaces=NAMESPACES)[0] if set_spec.replace('publication:', '') not in series_name_list: print('{} not in approved list, not normalizing...'.format(set_spec)) return None title = result.xpath('//dc:title/node()', namespaces=NAMESPACES)[0] description = (result.xpath('//dc:description/node()', namespaces=NAMESPACES) or [''])[0] payload = { 'title': copy_to_unicode(title), 'contributors': getcontributors(result), 'properties': get_properties(result), 'description': copy_to_unicode(description), 'tags': gettags(result), 'id': get_ids(result, raw_doc), 'source': NAME, 'dateCreated': get_date_created(result), 'dateUpdated': get_date_updated(result) } if payload['id']['url'] == '': print "Warning, no URL provided, not normalizing..." return None return NormalizedDocument(payload) if __name__ == '__main__': print(lint(consume, normalize))
Python
0
@@ -374,29 +374,8 @@ te'%0A -TODAY = date.today()%0A NAME @@ -1045,54 +1045,8 @@ k))%0A - start_date = TODAY - timedelta(days_back)%0A @@ -1128,20 +1128,16 @@ rom=' + -str( start_da @@ -1138,17 +1138,16 @@ art_date -) + ' 00:
8fa0dca5cd5187126a10197883348fc6b16544b5
Test get campaigns by email
tests/test_campaigns.py
tests/test_campaigns.py
import os import vcr import unittest from hatchbuck.api import HatchbuckAPI from hatchbuck.objects import Contact class TestCampaigns(unittest.TestCase): def setUp(self): # Fake key can be used with existing cassettes self.test_api_key = os.environ.get("HATCHBUCK_API_KEY", "ABC123") @vcr.use_cassette( 'tests/fixtures/cassettes/test_get_contact_campaigns.yml', filter_query_parameters=['api_key'] ) def test_get_contact_campaigns(self): hatchbuck = HatchbuckAPI(self.test_api_key) contact_id = "d1F4Tm1tcUxVRmdFQmVIT3lhVjNpaUtxamprakk5S3JIUGRmVWtHUXJaRTE1" contact = hatchbuck.search_contacts(contactId=contact_id)[0] self.assertEqual(contact.contactId, contact_id) campaigns = contact.get_campaigns() self.assertEqual(campaigns[0].name, "Brochure Request Followup") self.assertEqual(campaigns[0].id, "b1BFUnM1Unh0MDVVOVJEWUc1d0pTM0pUSVY4QS0xOW5GRHRsS05DXzNXazE1") if __name__ == '__main__': unittest.main()
Python
0
@@ -861,16 +861,654 @@ lowup%22)%0A + self.assertEqual(campaigns%5B0%5D.step, 0)%0A self.assertEqual(campaigns%5B0%5D.id, %22b1BFUnM1Unh0MDVVOVJEWUc1d0pTM0pUSVY4QS0xOW5GRHRsS05DXzNXazE1%22)%0A%0A @vcr.use_cassette(%0A 'tests/fixtures/cassettes/test_get_contact_campaigns_by_email.yml',%0A filter_query_parameters=%5B'api_key'%5D%0A )%0A def test_get_contact_campaigns_by_email(self):%0A hatchbuck = HatchbuckAPI(self.test_api_key)%0A contact_email = %22jill.smith@pyhatchbuck.net%22%0A campaigns = hatchbuck.get_campaigns(contact_email)%0A self.assertEqual(campaigns%5B0%5D.name, %22Brochure Request Followup%22)%0A self.assertEqual(campaigns%5B0%5D.step, 0)%0A
e9b270028d12ad7836b5b3b5775c08df06788f42
Allow more flexibility when calling module metadata and summary functions
livvkit/components/validation.py
livvkit/components/validation.py
# Copyright (c) 2015,2016, UT-BATTELLE, LLC # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ Validation Test Base Module """ from __future__ import absolute_import, division, print_function, unicode_literals import six import os import importlib import livvkit from livvkit.util import functions def _load_case_module(case, config): try: m = importlib.import_module(config['module']) except ImportError as ie: config_path = os.path.abspath(config['module']) try: if six.PY2: import imp m = imp.load_source('validation.'+case, config_path) elif six.PY3: spec = importlib.util.spec_from_file_location('validation.'+case, config_path) m = importlib.util.module_from_spec(spec) spec.loader.exec_module(m) else: raise except: print(" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") print(" UH OH!") print(" ----------------------------------------------------------") print(" Could not find the module for test case: ") print(" "+case) print(" The module must be specified as an import statement of a") print(" module that can be found on your python, or a valid path") print(" to a python module file (specified either relative to your") print(" current working directory or absolutely).") print(" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") raise return m def _run_suite(case, config, summary): """ Run the full suite of validation tests """ m = _load_case_module(case, config) result = m.run(case, config) summary[case] = _summarize_result(m, result) _print_summary(m, case, summary) if result['Type'] == 'Book': for name, page in six.iteritems(result['Data']): functions.create_page_from_template("validation.html", os.path.join(livvkit.index_dir, "validation", name + ".html")) functions.write_json(page, os.path.join(livvkit.output_dir, "validation"), name + ".json") else: functions.create_page_from_template("validation.html", os.path.join(livvkit.index_dir, "validation", case + ".html")) functions.write_json(result, os.path.join(livvkit.output_dir, "validation"), case + ".json") def _print_summary(module, case, summary): try: module.print_summary(summary[case]) except (NotImplementedError, AttributeError): print(" Ran " + case + "!") print("") def _summarize_result(module, result): try: summary = module.summarize_result(result) except (NotImplementedError, AttributeError): status = "Success" if result["Type"] == "Error": status = "Failure" summary = {"": {"Outcome": status}} return summary def _populate_metadata(case, config): m = _load_case_module(case, config) try: metadata = m.populate_metadata() except (NotImplementedError, AttributeError): metadata = {"Type": "ValSummary", "Title": "Validation", "TableTitle": "Validation", "Headers": ["Outcome"]} return metadata
Python
0
@@ -4051,32 +4051,49 @@ mary):%0A try:%0A + try:%0A module.p @@ -4120,16 +4120,96 @@ %5Bcase%5D)%0A + except TypeError:%0A module.print_summary(case, summary%5Bcase%5D)%0A exce @@ -4522,20 +4522,16 @@ Error%22:%0A - @@ -4553,16 +4553,16 @@ ailure%22%0A + @@ -4697,32 +4697,49 @@ onfig)%0A try:%0A + try:%0A metadata @@ -4763,16 +4763,98 @@ adata()%0A + except TypeError:%0A metadata = m.populate_metadata(case, config) %0A exc
bdb700d896985eb62c98e3b668e15a3d29008921
Allow setting either side of symmetrical relations
share/normalize/parsers.py
share/normalize/parsers.py
import re import uuid from functools import reduce from django.apps import apps from django.core.exceptions import FieldDoesNotExist from share.normalize.links import Context from share.normalize.links import AbstractLink # NOTE: Context is a thread local singleton # It is asigned to ctx here just to keep a family interface ctx = Context() class ParserMeta(type): def __new__(cls, name, bases, attrs): # Enabled inheritance in parsers. parsers = reduce(lambda acc, val: {**acc, **getattr(val, 'parsers', {})}, bases[::-1], {}) for key, value in tuple(attrs.items()): if isinstance(value, AbstractLink) and key != 'schema': parsers[key] = attrs.pop(key).chain()[0] attrs['parsers'] = parsers attrs['_extra'] = reduce(lambda acc, val: {**acc, **getattr(val, '_extra', {})}, bases[::-1], {}) attrs['_extra'].update({ key: value.chain()[0] for key, value in attrs.pop('Extra', object).__dict__.items() if isinstance(value, AbstractLink) }) return super(ParserMeta, cls).__new__(cls, name, bases, attrs) class Parser(metaclass=ParserMeta): @classmethod def using(cls, **overrides): if not all(isinstance(x, AbstractLink) for x in overrides.values()): raise Exception('Found non-link values in {}. Maybe you need to wrap something in Delegate?'.format(overrides)) return type( cls.__name__ + 'Overridden', (cls, ), { 'schema': cls.schema if isinstance(cls.schema, (str, AbstractLink)) else cls.__name__.lower(), **overrides } ) @property def schema(self): return self.__class__.__name__.lower() def __init__(self, context, config=None): self.config = config or ctx._config self.context = context self.id = '_:' + uuid.uuid4().hex def validate(self, field, value): if field.is_relation: if field.one_to_many or field.rel.many_to_many: assert isinstance(value, (list, tuple)), 'Values for field {} must be lists. Found {}'.format(field, value) else: assert isinstance(value, dict) and '@id' in value and '@type' in value, 'Values for field {} must be a dictionary with keys @id and @type. Found {}'.format(field, value) else: assert not isinstance(value, dict), 'Value for non-relational field {} must be a primitive type. Found {}'.format(field, value) def parse(self): prev, Context().parser = Context().parser, self if isinstance(self.schema, AbstractLink): schema = self.schema.chain()[0].run(self.context).lower() else: schema = self.schema if (self.context, schema) in ctx.pool: Context().parser = prev return ctx.pool[self.context, schema] model = apps.get_model('share', schema) self.ref = {'@id': self.id, '@type': schema} inst = {**self.ref} # Shorthand for copying inst ctx.pool[self.context, schema] = self.ref for key, chain in self.parsers.items(): try: field = model._meta.get_field(key) except FieldDoesNotExist: raise Exception('Tried to parse value {} which does not exist on {}'.format(key, model)) value = chain.run(self.context) if value and field.is_relation and (field.one_to_many or field.rel.many_to_many): for v in value: field_name = field.field.name if field.one_to_many else field.m2m_field_name() ctx.pool[v][field_name] = self.ref if value is not None: self.validate(field, value) inst[key] = self._normalize_white_space(value) inst['extra'] = {} for key, chain in self._extra.items(): val = chain.run(self.context) if val: inst['extra'][key] = val if not inst['extra']: del inst['extra'] Context().parser = prev ctx.pool[self.ref] = inst ctx.graph.append(inst) # Return only a reference to the parsed object to avoid circular data structures return self.ref def _normalize_white_space(self, value): if not isinstance(value, str): return value return re.sub(r'\s+', ' ', value.strip())
Python
0.000004
@@ -15,16 +15,31 @@ rt uuid%0A +import logging%0A from fun @@ -354,16 +354,53 @@ ntext()%0A +logger = logging.getLogger(__name__)%0A %0A%0Aclass @@ -2925,16 +2925,135 @@ = prev%0A + logger.debug('Values (%25s, %25s) found in cache as %25s', self.context, schema, ctx.pool%5Bself.context, schema%5D)%0A @@ -3094,16 +3094,16 @@ schema%5D%0A - %0A @@ -3709,44 +3709,8 @@ y):%0A - for v in value:%0A @@ -3800,16 +3800,512 @@ _name()%0A + for v in tuple(value): # Freeze list so we can modify it will iterating%0A # Allow filling out either side of recursive relations%0A if model._meta.concrete_model == field.related_model and field_name in ctx.pool%5Bv%5D:%0A ctx.pool%5Bv%5D%5Bfield.m2m_reverse_field_name()%5D = self.ref%0A value.remove(v) # Prevent CyclicalDependency error. Only %22subjects%22 should have related_works%0A else:%0A
eb58615d0fa7f4469be01f9e8dcb1cf44b8ce85e
correct context problem
close_residual_order_unlink/unlink_mrp.py
close_residual_order_unlink/unlink_mrp.py
# -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### import os import sys import logging import openerp import openerp.netsvc as netsvc import openerp.addons.decimal_precision as dp from openerp.osv import fields, osv, expression, orm from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta from openerp import SUPERUSER_ID, api from openerp import tools from openerp.tools.translate import _ from openerp.tools.float_utils import float_round as round from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare) _logger = logging.getLogger(__name__) class SaleOrder(orm.Model): """ Model name: Sale Order """ _inherit = 'sale.order' # ------------------------------------------------------------------------- # Override: button force close: # ------------------------------------------------------------------------- def force_close_residual_order(self, cr, uid, ids, context=None): ''' Force order and line closed: ''' # Run normal button procedure: super(SaleOrder, self).force_close_residual_order( cr, uid, ids, context=context) _logger.warning('Unlink no more production line') # Pool used: sol_pool = self.pool.get('sale.order.line') order_proxy = self.browse(cr, uid, ids, context=context) # -------------------------------------- # Read data for log and get information: # -------------------------------------- html_log = '' for line in order_proxy.order_line: if not line.mrp_id: # not production_mrp_id continue # Manage only linked to production line if line.product_uom_qty - line.product_uom_maked_sync_qty <= 0: continue # Manage only residual production todo if 'UNLINK' in line.mrp_id.name: continue # Unlinked order no re-unlink # Unlink line: sol_pool.free_line(cr, uid, [line.id], context=context) # Log unlinked: html_log += ''' <tr> <td>%s</td> <td>%s</td> <td>%s</td> <td>%s</td> </tr>\n''' % ( line.product_id.default_code, line.product_uom_qty, line.product_uom_maked_sync_qty, line.delivered_qty, ) # -------------------------- # Log message for operation: # -------------------------- if html_log: message = _(''' <p>UNLINKED Remain line to produce:</p> <table class='oe_list_content'> <tr> <td class='oe_list_field_cell'>Prod.</td> <td class='oe_list_field_cell'>Order</td> <td class='oe_list_field_cell'>Done</td> <td class='oe_list_field_cell'>Delivered</td> </tr> %s </table> ''') % html_log # Send message self.message_post(cr, uid, ids, body=message, context=context) return True # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Python
0.999969
@@ -2963,24 +2963,84 @@ nlink line:%0A + context%5B'production_order_id'%5D = line.mrp_id.id%0A @@ -3551,32 +3551,129 @@ )%0A +if 'production_order_id' in context:%0A del(context%5B'production_order_id'%5D)%0A %0A # -----
d8a83ea3433948447c307a894b16c2b8a12247e8
Kill defaulting to json for now.
api/base.py
api/base.py
from django.contrib.auth.models import User from django.conf.urls.defaults import url from django.core.urlresolvers import reverse from tastypie.bundle import Bundle from tastypie.resources import ModelResource from tastypie import fields from tastypie.authentication import BasicAuthentication from tastypie.authorization import DjangoAuthorization, Authorization from tastypie.constants import ALL, ALL_WITH_RELATIONS from builds.models import Build from projects.models import Project class BaseResource(ModelResource): def determine_format(self, *args, **kwargs): return "application/json" class EnhancedModelResource(BaseResource): def obj_get_list(self, request=None, **kwargs): """ A ORM-specific implementation of ``obj_get_list``. Takes an optional ``request`` object, whose ``GET`` dictionary can be used to narrow the query. """ filters = None if hasattr(request, 'GET'): filters = request.GET applicable_filters = self.build_filters(filters=filters) applicable_filters.update(kwargs) try: return self.get_object_list(request).filter(**applicable_filters) except ValueError, e: raise NotFound("Invalid resource lookup data provided (mismatched type).") class UserResource(BaseResource): class Meta: allowed_methods = ['get'] queryset = User.objects.all() fields = ['username', 'first_name', 'last_name', 'last_login', 'id'] def override_urls(self): return [ url(r"^(?P<resource_name>%s)/(?P<username>[a-z-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"), ] class ProjectResource(BaseResource): user = fields.ForeignKey(UserResource, 'user') class Meta: include_absolute_url = True allowed_methods = ['get'] queryset = Project.objects.all() excludes = ['build_pdf', 'path', 'skip', 'featured'] def override_urls(self): return [ url(r"^(?P<resource_name>%s)/(?P<slug>[a-z-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"), ] class BuildResource(EnhancedModelResource): project = fields.ForeignKey(ProjectResource, 'project') class Meta: allowed_methods = ['get'] queryset = Build.objects.all() def override_urls(self): return [ url(r"^(?P<resource_name>%s)/(?P<project__slug>[a-z-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_list'), name="api_list_detail"), ]
Python
0
@@ -488,127 +488,8 @@ t%0A%0A%0A -class BaseResource(ModelResource):%0A def determine_format(self, *args, **kwargs):%0A return %22application/json%22%0A%0A clas @@ -504,36 +504,37 @@ edModelResource( -Base +Model Resource):%0A d @@ -1200,36 +1200,37 @@ ss UserResource( -Base +Model Resource):%0A c @@ -1632,12 +1632,13 @@ rce( -Base +Model Reso
69f46596f189786fce0e2a087e6870e5d3059331
Fix figshare harvester date range (#764)
share/harvesters/com_figshare_v2.py
share/harvesters/com_figshare_v2.py
import pendulum from furl import furl from share.harvest import BaseHarvester class FigshareHarvester(BaseHarvester): VERSION = 1 page_size = 50 def do_harvest(self, start_date, end_date): return self.fetch_records(furl(self.config.base_url).set(query_params={ 'order_direction': 'asc', 'order': 'modified_date', 'page_size': self.page_size, 'modified_date': start_date.date().isoformat(), }).url, end_date.date()) def fetch_records(self, url, end_day): page, detail = 0, None while True: page += 1 resp = self.requests.get(furl(url).add(query_params={ 'page': page, }).url) if resp.status_code == 422: # We've asked for too much. Time to readjust date range # Thanks for leaking variables python page, url = 0, furl(url).add(query_params={ 'modified_date': pendulum.parse(detail['modified_date']).date().isoformat() }) continue for item in resp.json(): resp = self.requests.get(item['url']) detail = resp.json() if pendulum.parse(detail['modified_date']).date() > end_day: return yield item['url'], detail if len(resp.json()) < self.page_size: return # We've hit the end of our results
Python
0.000003
@@ -164,18 +164,17 @@ def +_ do_ -harvest +fetch (sel @@ -211,34 +211,14 @@ -return self.fetch_records( +url = furl @@ -394,27 +394,28 @@ 'modified_ -dat +sinc e': start_da @@ -447,17 +447,51 @@ %7D) -. +%0A return self.fetch_records( url, end @@ -564,21 +564,36 @@ page -, detail = 0, + = 1%0A last_seen_day = Non @@ -653,91 +653,31 @@ -resp = self.requests.get(furl(url).add(query_params=%7B%0A +url.args%5B 'page' -: +%5D = page -, %0A @@ -685,18 +685,44 @@ -%7D) +resp = self.requests.get(url .url)%0A%0A @@ -735,16 +735,34 @@ if +last_seen_day and resp.sta @@ -870,189 +870,50 @@ -# Thanks for leaking variables python%0A page, url = 0, furl(url).add(query_params=%7B%0A 'modified_date': pendulum.parse(detail%5B'modified_date'%5D).date() +url.args%5B'modified_since'%5D = last_seen_day .iso @@ -929,34 +929,40 @@ -%7D) +page = 0 %0A @@ -1100,17 +1100,16 @@ .json()%0A -%0A @@ -1116,18 +1116,31 @@ -if +last_seen_day = pendulu @@ -1178,16 +1178,50 @@ ).date() +%0A%0A if last_seen_day %3E end_d
83f54f57170115cda98e7d1aa68972c60b865647
Fix test_upgrades_to_html.py test
cnxupgrade/tests/test_upgrades_to_html.py
cnxupgrade/tests/test_upgrades_to_html.py
# -*- coding: utf-8 -*- # ### # Copyright (c) 2013, Rice University # This software is subject to the provisions of the GNU Affero General # Public License version 3 (AGPLv3). # See LICENCE.txt for details. # ### """Tests for to_html command-line interface. """ import sys import unittest from . import DB_CONNECTION_STRING class ToHtmlTestCase(unittest.TestCase): def call_target(self, **kwargs): from ..upgrades import to_html return to_html.cli_command(**kwargs) def test(self): # Mock produce_html_for_modules if 'cnxarchive.to_html' in sys.modules: del sys.modules['cnxarchive.to_html'] import cnxarchive.to_html as to_html original_func = to_html.produce_html_for_modules self.addCleanup(setattr, to_html, 'produce_html_for_modules', original_func) self.call_count = 0 def f(*args, **kwargs): self.call_count += 1 self.args = args self.kwargs = kwargs return [] to_html.produce_html_for_modules = f self.call_target(db_conn_str=DB_CONNECTION_STRING, id_select_query='SELECT 2', overwrite_html=False) # Assert produce_html_for_modules is called self.assertEqual(self.call_count, 1) self.assertEqual(str(type(self.args[0])), "<type 'psycopg2._psycopg.connection'>") self.assertEqual(self.args[1], 'SELECT 2') self.assertEqual(self.args[2], False) self.assertEqual(self.kwargs, {})
Python
0.000021
@@ -1534,62 +1534,39 @@ elf. +kw args -%5B2%5D, False)%0A self.assertEqu +, %7B'overwrite_html': F al -( se -lf.kwargs, %7B %7D)%0A
e20f0d3ada72cb21185ca0c3c1d22a77ee254de0
fix rogue tab
tests/test_get_paths.py
tests/test_get_paths.py
import sys import os from goatools.obo_parser import GODag ROOT = os.path.dirname(os.path.abspath(__file__)) + "/data/" def print_paths(paths, PRT=sys.stdout): for path in paths: PRT.write('\n') for GO in path: PRT.write('{}\n'.format(GO)) def chk_results(actual_paths, expected_paths): for actual_path in actual_paths: # GOTerm -> list of Strings actual = [GO.id for GO in actual_path] if actual not in expected_paths: raise Exception('ACTUAL {} NOT FOUND IN EXPECTED RESULTS\n'.format(actual)) def test_paths_to_top(): dag = GODag(ROOT + "mini_obo.obo") expected_paths = [['GO:0000001', 'GO:0000002', 'GO:0000005', 'GO:0000010'], ['GO:0000001', 'GO:0000003', 'GO:0000005', 'GO:0000010'], ['GO:0000001', 'GO:0000003', 'GO:0000006', 'GO:0000008', 'GO:0000010']] actual_paths = dag.paths_to_top("GO:0000010") chk_results(actual_paths, expected_paths) print_paths(actual_paths)
Python
0.000001
@@ -1,12 +1,8 @@ - import s
e42690a6f225952ddb6417edc90e27892c18d2a2
Move api to root.
api/main.py
api/main.py
from bottle import route, request, response, run, view from collections import OrderedDict from parser import parse_response from server import query_server import bottle import json import os @route('/api/') @view('api/views/index') def index(): site = "%s://%s" % (request.urlparts.scheme, request.urlparts.netloc) return {"site": site} @route('/api/tag', method=["get", "post"]) def tag(): # Support posting data both via forms and via POST body data = request.POST.get("data", request.body.getvalue()) if not data: return {"error": "No data posted"} raw_text = query_server(data) sentences, entities = parse_response(raw_text) response.content_type = "application/json" pretty = request.POST.get("pretty", False) json_kwargs = {"separators": (',', ':')} if pretty: json_kwargs = {"indent": 4, "separators": (', ', ': ')} return json.dumps(OrderedDict([ ("sentences", sentences), ("entities", entities), ]), **json_kwargs) if __name__ == "__main__": environment = os.environ.get("ENVIRONMENT", None) assert environment, "Needs $ENVIRONMENT variable set" if environment == "development": print "RUNNING IN DEVELOPMENT MODE" bottle.debug(True) bottle.TEMPLATES.clear() run(host='localhost', port=8000, reloader=True) elif environment == "production": print "RUNNING IN PRODUCTION MODE" run(host="0.0.0.0", port=int(os.environ.get("PORT", 5000))) else: assert False, "That's not a valid $ENVIRONMENT"
Python
0
@@ -196,20 +196,16 @@ route('/ -api/ ')%0A@view @@ -347,20 +347,16 @@ route('/ -api/ tag', me
ee3b712611ed531843134ef4ce94cb45c726c127
Fix filename creation in csv export action
nap/extras/actions.py
nap/extras/actions.py
from django.http import StreamingHttpResponse from django.utils.encoding import force_text from .models import modelserialiser_factory from .simplecsv import CSV class ExportCsv(object): def __init__(self, serialiser=None, label=None, **opts): self.serialiser = serialiser self.opts = opts if label: self.short_description = label def __call__(self, admin, request, queryset): if self.serialiser is None: ser_class = modelserialiser_factory( '%sSerialiser' % admin.__class__.__name__, admin.model, **self.opts ) else: ser_class = self.serialiser def inner(ser): csv = CSV(fields=ser._fields.keys()) yield csv.write_headers() for obj in queryset: data = { key: force_text(val) for key, val in ser.object_deflate(obj).items() } yield csv.write_dict(data) response = StreamingHttpResponse(inner(ser_class()), content_type='text/csv') filename = admin.csv_ response['Content-Disposition'] = 'attachment; filename=%s' % filename return response
Python
0.000024
@@ -1139,18 +1139,357 @@ e = -admin.csv_ +self.opts.get('filename', 'export_%7Bclassname%7D.csv')%0A if callable(filename):%0A filename = filename(admin)%0A else:%0A filename = filename.format(%0A classname=admin.__class__.__name__,%0A model=admin.model._meta.module_name,%0A app_label=admin.model._meta.app_label,%0A ) %0A
b1cc545775cb3de4899e75232d7225ac35ce5bad
More terse zuul output
ircbot/commands/zuul.py
ircbot/commands/zuul.py
#!/usr/bin/python2.4 # A simple zuul watcher bot import json import sqlite3 import subprocess import time import urllib2 import utils EVENTS = {'check': 'did some work!', 'gate': 'tricked them into approving the commit'} class ZuulWatcher(object): def __init__(self, log, conf): self.log = log self.conf = conf self.database = sqlite3.connect('commands/zuul.sqlite', detect_types=sqlite3.PARSE_DECLTYPES) self.database.row_factory = sqlite3.Row self.cursor = self.database.cursor() self.statuses = {} # Create tables try: self.cursor.execute('create table patchsets(' 'ident int, number int, author varchar(255), ' 'monitored int, seen datetime)') except sqlite3.OperationalError: pass self.database.commit() # Things you're expected to implement def Name(self): """Who am I?""" return 'zuulwatcher' def Verbs(self): """Return the verbs which this module supports Takes no arguments, and returns an array of strings. """ return [] def Help(self, verb): """Display help for a verb Takes the name of a verb, and returns a string which is the help message for that verb. """ return '' def Command(self, user, channel, verb, line): """Execute a given verb with these arguments Takes the verb which the user entered, and the remainder of the line. Returns a string which is sent to the user. """ yield def NoticeUser(self, channel, user): """We just noticed this user. Either they joined, or we did.""" yield def HeartBeat(self): """Gets called at regular intervals""" channel = '#%s' % self.conf['zuul']['channel'] try: remote = urllib2.urlopen('http://zuul.openstack.org/status.json') status = json.loads(remote.read()) remote.close() for pipeline in status['pipelines']: if pipeline['name'] in ['check', 'gate']: for queue in pipeline['change_queues']: for head in queue['heads']: for review in head: ident, number = review['id'].split(',') self.log('... zuul processing %s, %s' %(ident, number)) owner = None self.cursor.execute('select * from patchsets where ' 'ident=? and number=?', [ident, number]) rows = self.cursor.fetchall() if rows: owner = rows[0][2] else: self.log(' looking up patchset info') info = utils.get_patchset_info(ident) for patchset in info['patchSets']: if patchset['number'] == number: owner = patchset['uploader']['name'] break self.cursor.execute('insert into patchsets' '(ident, number, author, ' 'monitored, seen) ' 'values(?, ?, ?, ?, ?)', [ident, number, owner, 1, time.time()]) self.database.commit() if not owner in self.conf['zuul']['usermap']: continue yield(channel, 'msg', ('Woohoo!, %s %s %s' %(owner, EVENTS.get(pipeline['name'], 'howled into the wind'), review['url']))) yield(channel, 'msg', (' Review title is: %s' % info['subject'])) # Talk to ourselves to get a PPP report entry nick = self.conf['zuul']['usermap'].get(owner, None) if nick: yield(channel, 'msg', ('ducking-bear: ppp progress %s [%s]' %(info['subject'], nick))) nick = self.conf['zuul']['usermap'].get(owner, None) self.log(' nick for %s is %s' %(owner, nick)) if nick: for job in review['jobs']: self.log(' %s: %s' %(job['name'], job['result'])) if not job['result']: continue if job['result'] == 'SUCCESS': continue key = (ident, number, job['name']) if key in self.statuses: continue self.log('%s, %s status %s: %s' %(ident, number, job['name'], job['result'])) test = '-'.join(job['name'].split('-')[2:]) voting = '' if not job['voting']: voting = ' (non-voting)' yield(channel, 'msg', ('%s: %s %s ... %s%s' %(nick, review['url'], test, job['result'], voting))) self.statuses[key] = True except Exception, e: self.log('Ignoring exception %s' % e) def Cleanup(self): """We're about to be torn down.""" self.database.commit() self.cursor.close() def Init(log, conf): """Initialize all command classes.""" yield ZuulWatcher(log, conf)
Python
0.998458
@@ -156,22 +156,13 @@ ': ' -did some work! +check ',%0A @@ -183,46 +183,13 @@ ': ' -tricked them into approving the commit +merge '%7D%0A%0A @@ -4198,25 +4198,16 @@ (' -Woohoo!, %25s %25s %25s '%0A @@ -4202,16 +4202,21 @@ %25s %25s %25s + (%25s) '%0A @@ -4478,183 +4478,9 @@ rl'%5D -)))%0A yield(channel, 'msg',%0A (' Review title is: %25s'%0A %25 +, inf @@ -4493,16 +4493,17 @@ ject'%5D)) +) %0A%0A @@ -4746,273 +4746,8 @@ one) -%0A if nick:%0A yield(channel, 'msg',%0A ('ducking-bear: ppp progress %25s %5B%25s%5D'%0A %25(info%5B'subject'%5D, nick))) %0A%0A
52c375b28d2f106712236f9b15906d48030bebfc
fix deal with buttons
api/urls.py
api/urls.py
from django.conf.urls import url from django.contrib.auth.models import User from rest_framework.permissions import IsAuthenticated from api import serializers as s from api import views as v from api import drf_permissions as p from apostello import forms as f from apostello import models as m from elvanto.models import ElvantoGroup # api urlpatterns = [ # list views: url( r'^v2/sms/in/$', v.SmsCollection.as_view( model_class=m.SmsInbound, serializer_class=s.SmsInboundSerializer, permission_classes=(IsAuthenticated, p.CanSeeIncoming) ), name='in_log' ), url( r'^v2/sms/out/$', v.Collection.as_view( model_class=m.SmsOutbound, serializer_class=s.SmsOutboundSerializer, permission_classes=(IsAuthenticated, p.CanSeeOutgoing), related_field='recipient', ), name='out_log' ), url( r'^v2/recipients/$', v.Collection.as_view( model_class=m.Recipient, form_class=f.RecipientForm, serializer_class=s.RecipientSerializer, permission_classes=(IsAuthenticated, p.CanSeeContactNames) ), name='recipients' ), url( r'^v2/groups/$', v.Collection.as_view( model_class=m.RecipientGroup, form_class=f.ManageRecipientGroupForm, serializer_class=s.RecipientGroupSerializer, permission_classes=(IsAuthenticated, p.CanSeeGroups), prefetch_fields=['recipient_set'], ), name='recipient_groups' ), url( r'^v2/elvanto/groups/$', v.Collection.as_view( model_class=ElvantoGroup, serializer_class=s.ElvantoGroupSerializer, permission_classes=(IsAuthenticated, p.CanSeeGroups, p.CanImport) ), name='elvanto_groups' ), url( r'^v2/queued/sms/$', v.QueuedSmsCollection.as_view( model_class=m.QueuedSms, serializer_class=s.QueuedSmsSerializer, permission_classes=(IsAuthenticated, p.IsStaff) ), name='queued_smss' ), url( r'^v2/keywords/$', v.Collection.as_view( model_class=m.Keyword, form_class=f.KeywordForm, serializer_class=s.KeywordSerializer, permission_classes=(IsAuthenticated, p.CanSeeKeywords), prefetch_fields=['linked_groups', 'owners', 'subscribed_to_digest',], ), name='keywords' ), url( r'^v2/users/profiles/$', v.Collection.as_view( model_class=m.UserProfile, serializer_class=s.UserProfileSerializer, permission_classes=(IsAuthenticated, p.IsStaff), ), name='user_profiles' ), url( r'^v2/users/$', v.UserCollection.as_view( model_class=User, serializer_class=s.UserSerializer, permission_classes=(IsAuthenticated, p.CanSeeKeywords), ), name='users' ), url( r'^v2/config/$', v.ConfigView.as_view(), name='site_config', ), # simple toggle views: url( r'^v2/toggle/sms/in/display_on_wall/(?P<pk>[0-9]+)/$', v.ObjSimpleUpdate.as_view( model_class=m.SmsInbound, serializer_class=s.SmsInboundSerializer, permission_classes=(IsAuthenticated, p.CanSeeIncoming), field='display_on_wall', ), name='toggle_display_on_wall', ), url( r'^v2/toggle/sms/in/deal_with/(?P<pk>[0-9]+)/$', v.ObjSimpleUpdate.as_view( model_class=m.SmsInbound, serializer_class=s.SmsInboundSerializer, permission_classes=(IsAuthenticated, p.CanSeeIncoming, p.CanSeeKeywords), field='deal_with', ), name='toggle_deal_with_sms', ), url( r'^v2/toggle/elvanto/group/sync/(?P<pk>[0-9]+)/$', v.ObjSimpleUpdate.as_view( model_class=ElvantoGroup, serializer_class=s.ElvantoGroupSerializer, permission_classes=(IsAuthenticated, ), field='sync', ), name='toggle_elvanto_group_sync', ), # action views: url( r'^v2/actions/sms/send/adhoc/$', v.SendAdhoc.as_view(), name='act_send_adhoc', ), url( r'^v2/actions/sms/send/group/$', v.SendGroup.as_view(), name='act_send_group', ), url( r'^v2/actions/sms/in/archive/(?P<pk>[0-9]+)/$', v.ArchiveObj.as_view( model_class=m.SmsInbound, serializer_class=s.SmsInboundSerializer, permission_classes=(IsAuthenticated, p.CanSeeIncoming, ), ), name='act_archive_sms', ), url( r'^v2/actions/recipient/archive/(?P<pk>[0-9]+)/$', v.ArchiveObj.as_view( model_class=m.Recipient, serializer_class=s.RecipientSerializer, permission_classes=(IsAuthenticated, ) ), name='act_archive_recipient', ), url( r'^v2/actions/group/archive/(?P<pk>[0-9]+)/$', v.ArchiveObj.as_view( model_class=m.RecipientGroup, serializer_class=s.RecipientGroupSerializer, permission_classes=(IsAuthenticated, ) ), name='act_archive_group', ), url( r'^v2/actions/keyword/archive/(?P<keyword>[\d|\w]+)/$', v.ArchiveObj.as_view( model_class=m.Keyword, serializer_class=s.KeywordSerializer, permission_classes=(IsAuthenticated, ) ), name='act_archive_keyword', ), url( r'^v2/actions/keywords/(?P<keyword>[\d|\w]+)/archive_resps/$', v.ArchiveAllResponses.as_view(), name='act_keyword_archive_all_responses', ), url( r'^v2/actions/sms/in/reingest/(?P<pk>[0-9]+)/$', v.ReingestObj.as_view( model_class=m.SmsInbound, serializer_class=s.SmsInboundSerializer, permission_classes=(IsAuthenticated, p.CanSeeIncoming), ), name='act_reingest_sms', ), url( r'^v2/actions/group/update_members/(?P<pk>[0-9]+)/$', v.UpdateGroupMembers.as_view( model_class=m.RecipientGroup, serializer_class=s.RecipientGroupSerializer, permission_classes=(IsAuthenticated, p.CanSeeGroups) ), name='act_update_group_members' ), url(r'^v2/actions/elvanto/group_fetch/$', v.ElvantoFetchButton.as_view(), name='act_fetch_elvanto_groups'), url(r'^v2/actions/elvanto/group_pull/$', v.ElvantoPullButton.as_view(), name='act_pull_elvanto_groups'), url( r'^v2/actions/queued/sms/(?P<pk>[0-9]+)/$', v.CancelObj.as_view( model_class=m.QueuedSms, serializer_class=s.QueuedSmsSerializer, permission_classes=(IsAuthenticated, p.IsStaff) ), name='act_cancel_queued_sms' ), url( r'^v2/actions/users/profiles/update/(?P<pk>[0-9]+)/$', v.UpdateUserProfile.as_view( model_class=m.UserProfile, serializer_class=s.UserProfileSerializer, permission_classes=(IsAuthenticated, p.IsStaff), ), name='user_profile_update' ), ]
Python
0
@@ -3883,24 +3883,25 @@ field='deal +t _with',%0A
85a6030ddebaaef2644640b1d3e8e9447a730a78
send utcnow instead of just now
uiharu/bin/collector.py
uiharu/bin/collector.py
from __future__ import print_function import argparse import datetime import logging.config import socket import sys import sqlalchemy as sa from uiharu.collector import TemperatureCollector from uiharu.config import ConfigAction from uiharu.periodic_sleeper import PeriodicSleeper from uiharu.models import TemperatureMeasurement _logging_config = dict( version=1, disable_existing_loggers=False, formatters={ 'verbose': { 'format': '%(asctime)s [%(levelname)s] %(message)s' }, }, handlers={ 'console': { 'class': 'logging.StreamHandler', 'formatter': 'verbose', }, 'null': { 'class': 'logging.NullHandler', } }, loggers={ '': { 'handlers': ['console'], 'level': logging.INFO, }, 'temperusb': { 'level': logging.WARN, }, }, ) logging.config.dictConfig(_logging_config) log = logging.getLogger(__name__) def parse_cli_args(): """Parse the CLI arguments and return the populated namespace.""" hostname = socket.gethostname() parser = argparse.ArgumentParser() parser.add_argument( '--period', type=float, default=60.0, help="How often to collect temperature data (in seconds)", ) parser.add_argument( '--config', action=ConfigAction, help="The location of the JSON config file", ) parser.add_argument( '--sensor-name', default=hostname, help="The name to save collector measurements under. Defaults to this host's hostname ({0})".format(hostname), ) parser.add_argument( '--debug', action='store_true', help="Enable debug mode", ) return parser.parse_args() def main(): args = parse_cli_args() if args.debug: log.setLevel(logging.DEBUG) log.debug("Debug mode enabled") if not args.config: print("Error: A config path must be specified", file=sys.stderr) sys.exit(1) log.info("Using sensor name: %s", args.sensor_name) log.info("Connecting to database") engine = sa.create_engine(args.config['sqlalchemy_connection_url']) Session = sa.orm.sessionmaker(bind=engine) log.info("Starting temperature collector with a collection period of %f seconds", args.period) collector = TemperatureCollector() periodic_sleeper = PeriodicSleeper(args.period) log.info("Running the collector") while True: temperature = collector.get_temperature() if not temperature: log.error("Could not fetch temperature. Sleeping until next collection period.") periodic_sleeper.sleep_until_next_period() continue log.info("Collected the temperature in Celsius: %f", temperature) measurement = TemperatureMeasurement( sensor_name=args.sensor_name, timestamp=datetime.datetime.now(), value=temperature, ) session = Session() session.add(measurement) session.commit() periodic_sleeper.sleep_until_next_period() if __name__ == "__main__": main()
Python
0
@@ -2974,16 +2974,19 @@ atetime. +utc now(),%0A
1e03772e601fb6ed0eb6aa59555af61c29b2650f
remove fungible in parent class constructor call
amaascore/assets/cfd.py
amaascore/assets/cfd.py
from __future__ import absolute_import, division, print_function, unicode_literals from datetime import datetime, date from dateutil import parser from amaascore.assets.derivative import Derivative class ContractForDifference(Derivative): def __init__(self, asset_manager_id, asset_id, asset_issuer_id=None, asset_status='Active', display_name='', description='', country_id=None, venue_id=None, currency=None, issue_date=None, links=None, references=None, *args, **kwargs): super(ContractForDifference, self).__init__(asset_manager_id=asset_manager_id, asset_id=asset_id, fungible=False, asset_issuer_id=asset_issuer_id, asset_status=asset_status, display_name=display_name, description=description, country_id=country_id, venue_id=venue_id, issue_date=issue_date, currency=currency, links=links, references=references, *args, **kwargs)
Python
0.000001
@@ -689,24 +689,8 @@ - fungible=False, ass
63eaf0faf56a70fadbd37f0acac6f5e61c7b19eb
Change sleep function to the end to do repeat everytime
checkdns.py
checkdns.py
# coding=utf8 # 31.220.16.242 # 216.58.222.46 import socket import time import webbrowser def checkdns(): print time.ctime() retorno = True try: ip = socket.gethostbyname('google.com') print ("O IP do host verificado é: " + ip) if ip == "216.58.222.46": retorno = False url = 'http://www.google.com.br/' webbrowser.open_new_tab(url) else: print "DNS ainda não atualizado. Aguardando 30s." time.sleep( 30 ) except socket.gaierror: print "Nenhum host definido para o domínio. Aguardando 30s." time.sleep( 30 ) return retorno condicao = True while condicao: condicao = checkdns()
Python
0
@@ -308,15 +308,14 @@ 8.22 -2 .46%22:%0A + @@ -525,41 +525,8 @@ s.%22%0A - time.sleep( 30 )%0A @@ -630,37 +630,8 @@ s.%22%0A - time.sleep( 30 )%0A @@ -682,16 +682,16 @@ ndicao:%0A - cond @@ -699,16 +699,37 @@ cao = checkdns() +%0A time.sleep( 30 )
4c819629552a31748e4bb266c1c13726276d7944
Use cross version compatible iteration
tests/test_renderers.py
tests/test_renderers.py
import unittest from asciimatics.renderers import StaticRenderer from asciimatics.screen import Screen class TestRenderers(unittest.TestCase): def test_static_renderer(self): """ Check that the base static renderer class works. """ # Check basic API for a renderer... renderer = StaticRenderer(images=["A\nB", "C "]) # Max height should match largest height of any entry. self.assertEqual(renderer.max_height, 2) # Max width should match largest width of any entry. self.assertEqual(renderer.max_width, 3) # Images should be the parsed versions of the original strings. images = renderer.images self.assertEqual(images.__next__(), ["A", "B"]) self.assertEqual(images.__next__(), ["C "]) # String presentation should be the first image as a printable string. self.assertEqual(str(renderer), "A\nB") def test_colour_maps(self): """ Check that the ${} syntax is parsed correctly. """ # Check the ${fg, attr} variant renderer = StaticRenderer(images=["${3,1}*"]) output = renderer.rendered_text self.assertEqual(len(output[0]), len(output[1])) self.assertEqual(output[0], ["*"]) self.assertEqual(output[1][0][0], (Screen.COLOUR_YELLOW, Screen.A_BOLD)) # Check the ${fg} variant renderer = StaticRenderer(images=["${1}XY${2}Z"]) output = renderer.rendered_text self.assertEqual(len(output[0]), len(output[1])) self.assertEqual(output[0], ["XYZ"]) self.assertEqual(output[1][0][0], (Screen.COLOUR_RED, 0)) self.assertEqual(output[1][0][1], (Screen.COLOUR_RED, 0)) self.assertEqual(output[1][0][2], (Screen.COLOUR_GREEN, 0)) if __name__ == '__main__': unittest.main()
Python
0
@@ -711,32 +711,27 @@ rtEqual( +next( images -.__next__( ), %5B%22A%22, @@ -766,24 +766,19 @@ ual( +next( images -.__next__( ), %5B
6c5f350b84fa29553265b9ec44a4436f14825221
Add south.
nest/settings/base.py
nest/settings/base.py
# Django settings for nest project. import os from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP from django.core.exceptions import ImproperlyConfigured from unipath import Path from .gargoyle_switches import * def get_env_variable(var_name): """ Get the environment variable or return exception """ try: return os.environ[var_name] except KeyError: error_msg = "Set the %s env variable" % var_name raise ImproperlyConfigured(error_msg) DEBUG = True TEMPLATE_DEBUG = DEBUG GARGOYLE_AUTO_CREATE = True ADMINS = ( ('Philip James', 'philip@immaculateobsession.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': 'nest.db', # Or path to database file if using sqlite3. # The following settings are not used with sqlite3: 'USER': '', 'PASSWORD': '', 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. 'PORT': '', # Set to empty string for default. } } PROJECT_ROOT = Path(__file__).ancestor(3) MEDIA_ROOT = PROJECT_ROOT.child('media') STATIC_ROOT = '' STATICFILES_DIRS = ( PROJECT_ROOT.child('static'), ) TEMPLATE_DIRS = ( PROJECT_ROOT.child('templates'), ) STATIC_URL = '/static/' # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Los_Angeles' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = '1234567890' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'comics.middleware.ReferralMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) TEMPLATE_CONTEXT_PROCESSORS = TCP + ( 'django.core.context_processors.request', 'allauth.account.context_processors.account', 'allauth.socialaccount.context_processors.socialaccount', 'petroglyphs.context_processors.setting_injector', ) AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', 'allauth.account.auth_backends.AuthenticationBackend', ) ROOT_URLCONF = 'nest.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'nest.wsgi.application' INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'comics', 'petroglyphs', 'suit', 'django.contrib.admin', 'suit_redactor', 'nexus', 'gargoyle', 'allauth', 'allauth.account', 'allauth.socialaccount', 'allauth.socialaccount.providers.facebook', 'allauth.socialaccount.providers.twitter', 'datetimewidget', 'rest_framework', 'reversion', 'saltpeter', ) MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage' #ALLAUTH SETTINGS ACCOUNT_EMAIL_REQUIRED=True ACCOUNT_UNIQUE_EMAIL=True SOCIALACCOUNT_PROVIDERS = { 'facebook': { 'SCOPE': ['email', 'publish_actions', 'publish_stream', 'manage_pages', ], 'AUTH_PARAMS': {'auth_type': 'reauthenticate'}, 'METHOD': 'oauth2', } } EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.mandrillapp.com' EMAIL_PORT = 587 EMAIL_HOST_USER = 'pjj@philipjohnjames.com' EMAIL_HOST_PASSWORD = get_env_variable('MANDRILL_KEY') DEFAULT_FROM_EMAIL = 'site@quailcomics.com' SERVER_EMAIL = 'site@quailcomics.com' MIXPANEL_KEY = get_env_variable('MIXPANEL_KEY') # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, 'loggly': { 'format':'loggly: %(message)s', }, }, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' }, }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler', 'formatter': 'verbose', 'filters': ['require_debug_false',], }, 'mail_error': { 'level': 'INFO', 'class': 'django.utils.log.AdminEmailHandler', 'formatter': 'verbose', }, }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, 'post_to_social': { 'handlers': ['mail_error'], 'level': 'INFO', 'propagate': True, }, }, }
Python
0.000015
@@ -4640,24 +4640,37 @@ saltpeter',%0A + 'south',%0A )%0A%0AMESSAGE_S
90b991c19ef5249a09410b19c33f2c8bfe9b5ca7
Install pypy for proper architechture.
braid/pypy.py
braid/pypy.py
from os import path from fabric.api import cd, task, sudo from braid import fails pypyURL = 'https://bitbucket.org/pypy/pypy/downloads/pypy-2.0-linux64.tar.bz2' setuptoolsURL = 'http://peak.telecommunity.com/dist/ez_setup.py' pipURL = 'https://raw.github.com/pypa/pip/master/contrib/get-pip.py' pypyDir = '/opt/pypy-2.0' @task def install(): sudo('/bin/mkdir -p /opt') if fails('/usr/bin/id {}'.format('pypy')): sudo('/usr/sbin/useradd --home-dir {} --gid bin ' '-M --system --shell /bin/false ' 'pypy'.format(pypyDir)) else: sudo('/usr/sbin/usermod --home {} pypy'.format(pypyDir)) with cd('/opt'): for url in pypyURL, setuptoolsURL, pipURL: sudo('/usr/bin/wget -nc {}'.format(url)) sudo('/bin/tar xf {}'.format(path.basename(pypyURL))) for url in setuptoolsURL, pipURL: sudo('~pypy/bin/pypy {}'.format(path.basename(url))) sudo('~pypy/bin/pip install pyopenssl') sudo('~pypy/bin/pip install svn+svn://svn.twistedmatrix.com/svn/Twisted/trunk/')
Python
0
@@ -1,12 +1,22 @@ +import re%0A from os impo @@ -60,16 +60,23 @@ sk, sudo +, abort %0A%0Afrom b @@ -87,16 +87,45 @@ import +info%0Afrom braid.utils import fails%0A%0Ap @@ -130,18 +130,35 @@ %0ApypyURL +s = + %7B%0A 'x86_64': 'https: @@ -201,16 +201,18 @@ pypy-2.0 +.2 -linux64 @@ -220,16 +220,131 @@ tar.bz2' +,%0A 'x86': 'https://bitbucket.org/pypy/pypy/downloads/pypy-2.0.2-linux.tar.bz2',%0A %7D%0ApypyDir = '/opt/pypy-2.0'%0A %0Asetupto @@ -475,34 +475,8 @@ y'%0A%0A -pypyDir = '/opt/pypy-2.0'%0A %0A@ta @@ -810,16 +810,255 @@ /opt'):%0A + if info.arch() == 'x86_64':%0A pypyURL = pypyURLs%5B'x86_64'%5D%0A elif re.match('i.86', info.arch()):%0A pypyURL = pypyURLs%5B'x86'%5D%0A else:%0A abort(%22Can't install pypy on unknown architecture.%22)%0A%0A
25030673476f9eb99a4eff980d7bb050fdaa2568
Print size of result lists in check_files
analysis/check_files.py
analysis/check_files.py
#!/usr/bin/env python # vim: set sw=2 ts=2 softtabstop=2 expandtab: import argparse import os import logging import sys import yaml try: # Try to use libyaml which is faster from yaml import CLoader as Loader, CDumper as Dumper except ImportError: # fall back on python implementation from yaml import Loader, Dumper def main(args): parser = argparse.ArgumentParser() parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error']) parser.add_argument('first_yml', type=argparse.FileType('r')) parser.add_argument('second_yml', type=argparse.FileType('r')) pargs = parser.parse_args(args) logLevel = getattr(logging, pargs.log_level.upper(),None) logging.basicConfig(level=logLevel) firstResults = yaml.load(pargs.first_yml, Loader=Loader) secondResults = yaml.load(pargs.second_yml, Loader=Loader) assert isinstance(firstResults, list) assert isinstance(secondResults, list) if len(firstResults) == 0: logging.error('First Result list is empty') return 1 if len(secondResults) == 0: logging.error('Second Result list is empty') return 1 # Create set of all used files programsInFirst = set() programsInSecond = set() for r in firstResults: programsInFirst.add(r['program']) for r in secondResults: programsInSecond.add(r['program']) resultMissingFromSecond= [ ] resultMissingFromFirst=[ ] # Check for files missing in second for r in firstResults: if not (r['program'] in programsInSecond): resultMissingFromSecond.append(r) logging.warning('Program {} is missing from second but present in first'.format(r['program'])) # Check for files missing in first for r in secondResults: if not (r['program'] in programsInFirst): resultMissingFromFirst.append(r) logging.warning('Program {} is missing from first but present in second'.format(r['program'])) print("# of programs missing from second but present in first: {}".format(len(resultMissingFromSecond))) print("# of programs missing from first but present in second: {}".format(len(resultMissingFromFirst))) print("") print("# Missing from second") for r in resultMissingFromSecond: print(r) print("# Missing from first") for r in resultMissingFromFirst: print(r) return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
Python
0
@@ -1158,27 +1158,151 @@ %0A%0A -# Create set of all +print(%22# of results in first %7B%7D%22.format(len(firstResults)))%0A print(%22# of results in second %7B%7D%22.format(len(secondResults)))%0A%0A # Create sets of use
fefdea2a81bec7bdb8678671c0eb2dea8f7dea83
Disable TOTP token sync
hoover/site/settings/common.py
hoover/site/settings/common.py
from pathlib import Path base_dir = Path(__file__).absolute().parent.parent.parent.parent INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'hoover.search', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', 'hoover.search.middleware.NoReferral', 'hoover.search.middleware.NoCache', ) ROOT_URLCONF = 'hoover.site.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'hoover.search.context_processors.default', ], }, }, ] LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'formatters': { 'logfile': { 'format': ('%(asctime)s %(process)d ' '%(levelname)s %(name)s %(message)s'), 'datefmt': '%Y-%m-%d %H:%M:%S', }, }, 'loggers': { 'django.request': { 'level': 'WARNING', 'propagate': False, 'handlers': ['stderr'], }, 'hoover.search': { 'level': 'INFO', 'propagate': False, 'handlers': ['stderr'], }, '': { 'level': 'WARNING', 'propagate': True, 'handlers': ['stderr'], }, }, 'handlers': { 'stderr': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'logfile', }, }, } WSGI_APPLICATION = 'hoover.site.wsgi.application' LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True STATIC_URL = '/static/' LOGIN_REDIRECT_URL = '/' LOGOUT_REDIRECT_URL = '/' HOOVER_ELASTICSEARCH_URL = 'http://localhost:9200' HOOVER_UPLOADS_URL = '/uploads/' STATIC_ROOT = str(base_dir / 'static') HOOVER_UPLOADS_ROOT = str(base_dir / 'uploads') HOOVER_LOADERS = [ 'hoover.search.loaders.upload.Loader', 'hoover.search.loaders.webdav.Loader', 'hoover.search.loaders.collectible.Loader', 'hoover.search.loaders.external.Loader', ] HOOVER_PDFJS_URL = None TIKA_URL = 'http://localhost:9998' EMBED_HYPOTHESIS = None _minute = 60 _hour = 60 * _minute HOOVER_TWOFACTOR_INVITATION_VALID = 30 # minutes HOOVER_TWOFACTOR_AUTOLOGOUT = 3 * _hour HOOVER_TWOFACTOR_RATELIMIT = None HOOVER_RATELIMIT_USER = None HOOVER_BATCH_LIMIT = 250 HOOVER_UI_ROOT = None HOOVER_EVENTS_DIR = None HOOVER_OAUTH_LIQUID_URL = None HOOVER_OAUTH_LIQUID_CLIENT_ID = None HOOVER_OAUTH_LIQUID_CLIENT_SECRET = None HOOVER_HYPOTHESIS_EMBED_URL = None
Python
0
@@ -2560,16 +2560,39 @@ = '/'%0A%0A +OTP_TOTP_SYNC = False%0A%0A HOOVER_E
d115c0ceb08a350f7b367f61627ced5ab03df833
Remove useless space
sklearn_porter/language/__init__.py
sklearn_porter/language/__init__.py
# -*- coding: utf-8 -*- import sklearn_porter.language.c import sklearn_porter.language.go import sklearn_porter.language.java import sklearn_porter.language.js import sklearn_porter.language.php import sklearn_porter.language.ruby LANGUAGES = { c.KEY: c, go.KEY: go, java.KEY: java, js.KEY: js, php.KEY: php, ruby.KEY: ruby } __all__ = ['c', 'go', 'java', 'js', 'php', 'ruby', 'LANGUAGES']
Python
0.000001
@@ -51,18 +51,16 @@ guage.c%0A -%0A%0A import s
e35b3d947e78cda10ccb635f2696bf6d80882992
Expression transformation. Simplify commutative
analyze/wavelet/base.py
analyze/wavelet/base.py
from functools import partial from itertools import chain, tee import numpy as np PI2 = 2 * np.pi def pairwise(iterable): one, two = tee(iterable) next(two, None) return zip(one, two) def grouper(iterable, n): return zip(*([iter(iterable)] * n)) def test_split_vertical(): i, j = split_vertical([[1, 2], [3, 4]]) assert i.tolist() == [[1], [3]] assert j.tolist() == [[2], [4]] def split_vertical(mat): mat = np.asarray(mat) half = mat.shape[1] / 2 return mat[:, :half], mat[:, half:] def test_iconcatenate_pairs(): pairs = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] assert [list(r) for r in iconcatenate_pairs(pairs)] == \ [ [1, 2, 3, 4, 5, 6], [4, 5, 6, 7, 8, 9], ] def iconcatenate_pairs(items): for pair in pairwise(items): yield np.concatenate(pair) def is_power_of_two(val): return val and val & (val - 1) == 0 class BaseWaveletBox(object): def __init__(self, nsamples, samplerate, scale_resolution, omega0): if not is_power_of_two(nsamples): raise Exception(u'nsamples must be power of two') self.nsamples = nsamples self.scales = autoscales(nsamples, samplerate, scale_resolution, omega0) self.angular_frequencies = angularfreq(nsamples, samplerate) def apply_cwt(self, chunks, **kwargs): half_nsamples = self.nsamples / 2 pad_num = 0 def np_pad_right(data): pad_num = half_nsamples - len(data) if pad_num < 0: raise Exception(u'Chunks size must be equal to nsamples / 2' u' except last, which may be shorter') if pad_num: return np.pad(data, (0, pad_num), 'constant') else: return data equal_sized_pieces = map(np_pad_right, chunks) zero_pad = np.zeros(half_nsamples) overlapped_pieces = iconcatenate_pairs( chain([zero_pad], equal_sized_pieces, [zero_pad]) ) hanning = np.hanning(self.nsamples) windowed_pieces = map(lambda p: p * hanning, overlapped_pieces) complex_images = [ self.cwt(windowed_piece, **kwargs) for windowed_piece in windowed_pieces ] halfs = chain.from_iterable(map(split_vertical, complex_images)) next(halfs) flattened_images = [left + right for left, right in grouper(halfs, 2)] # Cut pad_num from last flattened_images[-1] = flattened_images[-1][:, :-pad_num] return np.concatenate(flattened_images, axis=1) def angularfreq(nsamples, samplerate): """ Compute angular frequencies """ N2 = nsamples / 2.0 return np.array( [ samplerate * PI2 * (i if i <= N2 else i - nsamples) / nsamples for i in range(nsamples) ], np.float32 ) def autoscales(samples_count, samplerate, scale_resolution, omega0): """ Compute scales as fractional power of two """ samples_duration = samples_count / samplerate upper_frequency = samples_count * ( PI2 / (omega0 + np.sqrt(2 + omega0 ** 2)) ) scales_count = int(np.floor( np.log2(upper_frequency) / scale_resolution )) indexes = np.arange(scales_count + 1, endpoint=False, dtype=np.float32) upper_frequency_scale = samples_duration / upper_frequency return upper_frequency_scale * (2 ** (indexes * scale_resolution))
Python
0.999999
@@ -3121,16 +3121,22 @@ quency = + PI2 * samples @@ -3146,24 +3146,8 @@ unt -* (%0A PI2 / (o @@ -3179,22 +3179,16 @@ 0 ** 2)) -%0A ) %0A%0A sc
3bef5697a7f3c0c0972a3cbcce3a4bdac01b7dcb
Update graph_hashes.py
jkutils/graph_hashes.py
jkutils/graph_hashes.py
#!/usr/bin/python """ Yet another Control Flow Graph hash using small-primes-product. An implementation of the Koret-Karamitas (KOKA) CFGs hashing algorithm. Based on the paper Efficient Features for Function Matching between Binary Executables by Huku (Chariton Karamitas, CENSUS S.A., huku@census-labs.com). Copyright (c) 2018, Joxean Koret """ from __future__ import print_function import sys import time from idc import * from idaapi import * from idautils import * try: from others.tarjan_sort import strongly_connected_components except ImportError: from tarjan_sort import strongly_connected_components #------------------------------------------------------------------------------- def log(msg): Message("[%s] %s\n" % (time.asctime(), msg)) replace_wait_box(msg) #------------------------------------------------------------------------------- # Different type of basic blocks (graph nodes). NODE_ENTRY = 2 NODE_EXIT = 3 NODE_NORMAL = 5 # # NOTE: In the current implementation (Nov-2018) all edges are considered as if # they were conditional. Keep reading... # EDGE_IN_CONDITIONAL = 7 EDGE_OUT_CONDITIONAL = 11 # # Reserved but unused because, probably, it doesn't make sense when comparing # multiple different architectures. # #EDGE_IN_UNCONDITIONAL = 13 #EDGE_OUT_UNCONDITIONAL = 17 # # The following are feature types that aren't applied at basic block but rather # at function level. The idea is that if we do at function level we will have no # problems finding the same function that was re-ordered because of some crazy # code a different compiler decided to create (i.e., resilient to reordering). # FEATURE_LOOP = 19 FEATURE_CALL = 23 FEATURE_DATA_REFS = 29 FEATURE_CALL_REF = 31 FEATURE_STRONGLY_CONNECTED = 37 FEATURE_FUNC_NO_RET = 41 FEATURE_FUNC_LIB = 43 FEATURE_FUNC_THUNK = 47 #------------------------------------------------------------------------------- # Implementation of the KOKA (Koret-Karamitas) hashing algorithm for IDA class CKoretKaramitasHash: def __init__(self): pass def get_node_value(self, succs, preds): """ Return a set of prime numbers corresponding to the characteristics of the node. """ ret = 1 if succs == 0: ret *= NODE_ENTRY if preds == 0: ret *= NODE_EXIT ret *= NODE_NORMAL return ret def get_edges_value(self, bb, succs, preds): ret = 1 for edge in succs: ret *= EDGE_OUT_CONDITIONAL for edge in preds: ret *= EDGE_IN_CONDITIONAL return ret def calculate(self, f): func = get_func(f) if func is None: return "NO-FUNCTION" flow = FlowChart(func) if flow is None: return "NO-FLOW-GRAPH" hash = 1 # Variables required for calculations of previous ones bb_relations = {} # Iterate through each basic block for block in flow: block_ea = block.startEA succs = list(block.succs()) preds = list(block.preds()) hash *= self.get_node_value(len(succs), len(preds)) hash *= self.get_edges_value(block, succs, preds) # ...and each instruction on each basic block for ea in list(Heads(block.startEA, block.endEA)): if is_call_insn(ea): hash *= FEATURE_CALL l = DataRefsFrom(ea) hash *= FEATURE_DATA_REFS for xref in CodeRefsFrom(ea, 0): tmp_func = get_func(xref) if tmp_func is None or tmp_func.startEA != func.startEA: hash *= FEATURE_CALL_REF # Remember the relationships bb_relations[block_ea] = [] # Iterate the succesors of this basic block for succ_block in block.succs(): bb_relations[block_ea].append(succ_block.startEA) # Iterate the predecessors of this basic block for pred_block in block.preds(): try: bb_relations[pred_block.startEA].append(block.startEA) except KeyError: bb_relations[pred_block.startEA] = [block.startEA] # Calculate the strongly connected components try: strongly_connected = strongly_connected_components(bb_relations) # ...and get the number of loops out of it for sc in strongly_connected: if len(sc) > 1: hash *= FEATURE_LOOP else: if sc[0] in bb_relations and sc[0] in bb_relations[sc[0]]: hash *= FEATURE_LOOP # And, also, use the number of strongly connected components # to calculate another part of the hash. hash *= (FEATURE_STRONGLY_CONNECTED ** len(strongly_connected)) except: print("Exception:", str(sys.exc_info()[1])) flags = GetFunctionFlags(f) if flags & FUNC_NORET: hash *= FEATURE_FUNC_NO_RET if flags & FUNC_LIB: hash *= FEATURE_FUNC_LIB if flags & FUNC_THUNK: hash *= FEATURE_FUNC_THUNK return str(hash) #------------------------------------------------------------------------------- def main(): kgh = CKoretKaramitasHash() d = {} for f in Functions(): hash = kgh.calculate(f) func_str_ea = "0x%08x" % f try: d[hash].append(func_str_ea) except: d[hash] = [func_str_ea] print("0x%08x %s" % (f, hash)) import pprint pprint.pprint(d) uniques = 0 for key in d: if len(d[key]) > 1: print(key, d[key]) else: uniques += 1 print() print("Unique hashes", uniques) if __name__ == "__main__": main()
Python
0.000001
@@ -2865,16 +2865,60 @@ .startEA +%0A if block.endEA == 0:%0A continue %0A%0A
3fdad9fb89d70b8d81483b646e16d20f076e0ebd
Test urxvt alpha
tests/test_sequences.py
tests/test_sequences.py
"""Test sequence functions.""" import unittest import unittest.mock import io from pywal import sequences from pywal import util # Import colors. COLORS = util.read_file_json("tests/test_files/test_file.json") class Testsequences(unittest.TestCase): """Test the sequence functions.""" def test_set_special(self): """> Create special escape sequence.""" result = sequences.set_special(11, COLORS["special"]["background"]) self.assertEqual(result, "\033]11;#1F211E\007") def test_set_color(self): """> Create color escape sequence.""" result = sequences.set_color(11, COLORS["colors"]["color0"]) self.assertEqual(result, "\033]4;11;#1F211E\007") def test_send_srquences(self): """> Send sequences to all open terminals.""" with unittest.mock.patch('sys.stdout', new=io.StringIO()) as fake_out: sequences.send(COLORS, False) data = fake_out.getvalue().strip() self.assertTrue(data.endswith("colors: Set terminal colors")) if __name__ == "__main__": unittest.main()
Python
0.000019
@@ -493,32 +493,300 @@ ;#1F211E%5C007%22)%0A%0A + def test_set_special_alpha(self):%0A %22%22%22%3E Create special escape sequence with alpha.%22%22%22%0A util.Color.alpha_num = 40%0A result = sequences.set_special(11, COLORS%5B%22special%22%5D%5B%22background%22%5D)%0A self.assertEqual(result, %22%5C033%5D11;%5B40%5D#1F211E%5C007%22)%0A%0A def test_set
89d68883b636b29dcfbb3c56b2b766a54fc7b37d
remove debugger
tests/test_solr_prep.py
tests/test_solr_prep.py
import pytest from datetime import datetime from lametro.search_indexes import LAMetroBillIndex @pytest.mark.parametrize('session_identifier,prepared_session', [ ('2014', '7/1/2014 to 6/30/2015'), ('2015', '7/1/2015 to 6/30/2016'), ('2016', '7/1/2016 to 6/30/2017'), ('2017', '7/1/2017 to 6/30/2018'), ('2018', '7/1/2018 to 6/30/2019'), ]) def test_legislative_session(bill, legislative_session, session_identifier, prepared_session): ''' This test instantiates LAMetroBillIndex – a subclass of SearchIndex from Haystack, used for building the Solr index. The test, then, calls the SearchIndex `prepare` function, which returns a dict of prepped data. https://github.com/django-haystack/django-haystack/blob/4910ccb01c31d12bf22dcb000894eece6c26f74b/haystack/indexes.py#L198 ''' legislative_session.identifier = session_identifier legislative_session.save() bill = bill.build(legislative_session=legislative_session) index = LAMetroBillIndex() indexed_data = index.prepare(bill) assert indexed_data['legislative_session'] == prepared_session @pytest.mark.parametrize('session_identifier,prepared_session', [ ('2014', '7/1/2014 to 6/30/2015'), ('2015', '7/1/2015 to 6/30/2016'), ('2016', '7/1/2016 to 6/30/2017'), ('2017', '7/1/2017 to 6/30/2018'), ('2018', '7/1/2018 to 6/30/2019'), ]) def test_sponsorships(bill, legislative_session, session_identifier, prepared_session, metro_organization, event, mocker): ''' This test instantiates LAMetroBillIndex – a subclass of SearchIndex from Haystack, used for building the Solr index. The test, then, calls the SearchIndex `prepare` function, which returns a dict of prepped data. https://github.com/django-haystack/django-haystack/blob/4910ccb01c31d12bf22dcb000894eece6c26f74b/haystack/indexes.py#L198 ''' legislative_session.identifier = session_identifier legislative_session.save() bill = bill.build(legislative_session=legislative_session) org1 = metro_organization.build() org2 = metro_organization.build() event1 = event.build() # event2 = event.build() # event3 = event.build() actions_and_agendas = [ { 'date': datetime.now(), 'description': 'org1 description', 'event': event1, 'organization': org1 }, { 'date': datetime.now(), 'description': 'org2 descripton', 'event': event1, 'organization': org2 }, { 'date': datetime.now(), 'description': 'org2 descripton', 'event': event1, 'organization': org2 } ] import pdb pdb.set_trace() mock_actions_and_agendas = mocker.patch('lametro.models.LAMetroBill.actions_and_agendas',\ new_callable=mocker.PropertyMock,\ return_value=actions_and_agendas) index = LAMetroBillIndex() indexed_data = index.prepare(bill) assert indexed_data['sponsorships'] == {org1.name, org2.name}
Python
0.000133
@@ -2939,43 +2939,8 @@ %5D%0A - import pdb%0A pdb.set_trace()%0A
b86348349906c88b6946f757485cf41f909a9a91
fix subtitle test for newer versions of ffmpeg
tests/test_subtitles.py
tests/test_subtitles.py
import sys from .common import * from av.subtitles.subtitle import * class TestSubtitle(TestCase): def test_movtext(self): path = fate_suite('sub/MovText_capability_tester.mp4') fh = av.open(path) subs = [] for packet in fh.demux(): try: subs.extend(packet.decode()) except ValueError: raise SkipTest self.assertEqual(len(subs), 3) self.assertIsInstance(subs[0][0], AssSubtitle) self.assertEqual(subs[0][0].ass, 'Dialogue: 0,0:00:00.97,0:00:02.54,Default,- Test 1.\\N- Test 2.\r\n') def test_vobsub(self): path = fate_suite('sub/vobsub.sub') fh = av.open(path) subs = [] for packet in fh.demux(): try: subs.extend(packet.decode()) except ValueError: raise SkipTest self.assertEqual(len(subs), 43) sub = subs[0][0] self.assertIsInstance(sub, BitmapSubtitle) self.assertEqual(sub.x, 259) self.assertEqual(sub.y, 379) self.assertEqual(sub.width, 200) self.assertEqual(sub.height, 24) bms = sub.planes self.assertEqual(len(bms), 1) if hasattr(__builtins__, 'buffer'): self.assertEqual(len(buffer(bms[0])), 4800) if hasattr(__builtins__, 'memoryview'): self.assertEqual(len(memoryview(bms[0])), 4800)
Python
0
@@ -503,29 +503,26 @@ self.assert -Equal +In (subs%5B0%5D%5B0%5D. @@ -526,16 +526,17 @@ 0%5D.ass, +( 'Dialogu @@ -596,16 +596,135 @@ 2.%5Cr%5Cn' +,%0A 'Dialogue: 0,0:00:00.97,0:00:02.54,Default,,0,0,0,,- Test 1.%5C%5CN- Test 2.%5Cr%5Cn') )%0A%0A d
4a7484bccc9a92353681fb155f15629fa1059cd1
Format users
slackbot/get_scoreboard.py
slackbot/get_scoreboard.py
import logging from typing import Dict, List, Tuple from werkzeug.datastructures import ImmutableMultiDict from database.main import connect, channel_resp from database.team import check_all_scores logger = logging.getLogger(__name__) def get_scoreboard(form: ImmutableMultiDict) -> Dict[str, str]: logger.debug(f"Scoreboard request: {form}") team_id = form.get('team_id', '') with connect() as conn: scoreboard_list = check_all_scores(conn, team_id) return channel_resp(_parse_scoreboard(scoreboard_list)) def _parse_scoreboard(scoreboard_list: List[Tuple[str, int]]) -> str: text = f'Here\'s a list of my favourite people:' for index, (subject, score) in enumerate(scoreboard_list): text += f'\n{index+1}. {subject} [{score} point{"s" if score != 1 else ""}]' if index == 0: text += ' :crown:' elif index + 1 == len(scoreboard_list): text += ' :hankey:' return text
Python
0.000001
@@ -748,16 +748,18 @@ dex+1%7D. +%3C@ %7Bsubject @@ -759,16 +759,17 @@ subject%7D +%3E %5B%7Bscore
29bfc1049352f59fca0b625d0ecbc7177fb565c7
Change default value for certificate location.
py509/x509.py
py509/x509.py
import socket import uuid from OpenSSL import crypto def make_serial(): """Make a random serial number.""" return uuid.uuid4().int def make_pkey(key_type=crypto.TYPE_RSA, key_bits=4096): """Make a public/private key pair.""" key = crypto.PKey() key.generate_key(key_type, key_bits) return key def make_certificate_signing_request(pkey, digest='sha512', **name): """Make a certificate signing request.""" csr = crypto.X509Req() subj = csr.get_subject() subj.C = name.get('C', 'US') subj.ST = name.get('ST', 'CA') subj.L = name.get('L', 'San Diego') subj.O = name.get('O', 'Home') subj.OU = name.get('OU', socket.gethostbyname(socket.getfqdn())) subj.CN = name.get('CN', socket.getfqdn()) csr.set_pubkey(pkey) csr.set_version(3) csr.sign(pkey, digest) return csr def make_certificate(csr, ca_key, ca_cert, serial, not_before, not_after, digest='sha512', version=2, exts=()): """Make a certificate.""" crt = crypto.X509() crt.set_serial_number(serial) crt.gmtime_adj_notBefore(not_before) crt.gmtime_adj_notAfter(not_after) crt.set_issuer(ca_cert.get_subject()) crt.set_subject(csr.get_subject()) crt.set_pubkey(csr.get_pubkey()) crt.set_version(version) crt.add_extensions(exts) crt.sign(ca_key, digest) return crt def make_certificate_authority(**name): """Make a certificate authority. A certificate authority can sign certificates. For clients to be able to validate certificates signed by your certificate authorithy, they must trust the certificate returned by this function. """ key = make_pkey() csr = make_certificate_signing_request(key, **name) crt = make_certificate(csr, key, csr, make_serial(), 0, 10 * 365 * 24 * 60 * 60, exts=[crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE')]) return key, crt
Python
0.000024
@@ -564,17 +564,12 @@ ', ' -San Diego +Home ')%0A
e05a4f17fcf0ec1bedcc8188d584d31616c4e0af
Update test_toml_file.py
tests/test_toml_file.py
tests/test_toml_file.py
import os from tomlkit.toml_document import TOMLDocument from tomlkit.toml_file import TOMLFile def test_toml_file(example): original_content = example("example") toml_file = os.path.join(os.path.dirname(__file__), "examples", "example.toml") toml = TOMLFile(toml_file) content = toml.read() assert isinstance(content, TOMLDocument) assert content["owner"]["organization"] == "GitHub" toml.write(content) try: with open(toml_file, encoding="utf-8") as f: assert original_content == f.read() finally: with open(toml_file, "w", encoding="utf-8", newline="") as f: assert f.write(original_content) def test_keep_old_eol(tmpdir): toml_path = str(tmpdir / "pyproject.toml") with open(toml_path, "wb+") as f: f.write(b"a = 1\r\nb = 2\r\n") f = TOMLFile(toml_path) content = f.read() content["b"] = 3 f.write(content) with open(toml_path, "rb") as f: assert f.read() == b"a = 1\r\nb = 3\r\n" def test_keep_old_eol_2(tmpdir): toml_path = str(tmpdir / "pyproject.toml") with open(toml_path, "wb+") as f: f.write(b"a = 1\nb = 2\n") f = TOMLFile(toml_path) content = f.read() content["b"] = 3 f.write(content) with open(toml_path, "rb") as f: assert f.read() == b"a = 1\nb = 3\n" def test_mixed_eol(tmpdir): toml_path = str(tmpdir / "pyproject.toml") with open(toml_path, "wb+") as f: f.write(b"a = 1\r\nrb = 2\n") f = TOMLFile(toml_path) f.write(f.read()) with io.open(toml_path, "rb") as f: assert f.read() == b"a = 1\r\nrb = 2\n"
Python
0.000002
@@ -1559,11 +1559,8 @@ ith -io. open
210bf81fa0c7296c6e48e112dacc29ad2b89af0c
add raw_id_fields for users and topics
pybb/admin.py
pybb/admin.py
# -*- coding: utf-8 from django.utils.translation import ugettext_lazy as _ from django.contrib import admin from pybb.models import Category, Forum, Topic, Post, Profile, Read class CategoryAdmin(admin.ModelAdmin): list_display = ['name', 'position', 'forum_count'] list_per_page = 20 ordering = ['position'] search_fields = ['name'] class ForumAdmin(admin.ModelAdmin): list_display = ['name', 'category', 'position', 'topic_count'] list_per_page = 20 ordering = ['-category'] search_fields = ['name', 'category__name'] fieldsets = ( (None, { 'fields': ('category', 'name', 'updated') } ), (_('Additional options'), { 'classes': ('collapse',), 'fields': ('position', 'description', 'post_count', 'moderators') } ), ) class TopicAdmin(admin.ModelAdmin): list_display = ['name', 'forum', 'created', 'head', 'post_count'] list_per_page = 20 ordering = ['-created'] date_hierarchy = 'created' search_fields = ['name'] fieldsets = ( (None, { 'fields': ('forum', 'name', 'user', ('created', 'updated')) } ), (_('Additional options'), { 'classes': ('collapse',), 'fields': (('views', 'post_count'), ('sticky', 'closed'), 'subscribers') } ), ) class PostAdmin(admin.ModelAdmin): list_display = ['topic', 'user', 'created', 'updated', 'summary'] list_per_page = 20 ordering = ['-created'] date_hierarchy = 'created' search_fields = ['body'] fieldsets = ( (None, { 'fields': ('topic', 'user', 'markup') } ), (_('Additional options'), { 'classes': ('collapse',), 'fields' : (('created', 'updated'), 'user_ip') } ), (_('Message'), { 'fields': ('body', 'body_html', 'body_text') } ), ) class ProfileAdmin(admin.ModelAdmin): list_display = ['user', 'time_zone', 'location', 'language'] list_per_page = 20 ordering = ['-user'] search_fields = ['user__username', 'user__first_name', 'user__last_name'] fieldsets = ( (None, { 'fields': ('user', 'time_zone', 'markup', 'location', 'language') } ), (_('IM'), { 'classes': ('collapse',), 'fields' : ('jabber', 'icq', 'msn', 'aim', 'yahoo') } ), (_('Additional options'), { 'classes': ('collapse',), 'fields' : ('site', 'avatar', 'signature', 'show_signatures') } ), (_('Ban options'), { 'classes': ('collapse',), 'fields' : ('ban_status', 'ban_till') } ), ) class ReadAdmin(admin.ModelAdmin): list_display = ['user', 'topic', 'time'] list_per_page = 20 ordering = ['-time'] date_hierarchy = 'time' search_fields = ['user__username', 'topic__name'] admin.site.register(Category, CategoryAdmin) admin.site.register(Forum, ForumAdmin) admin.site.register(Topic, TopicAdmin) admin.site.register(Post, PostAdmin) admin.site.register(Profile, ProfileAdmin) admin.site.register(Read, ReadAdmin)
Python
0
@@ -464,32 +464,67 @@ t_per_page = 20%0A + raw_id_fields = %5B'moderators'%5D%0A ordering = %5B @@ -532,24 +532,24 @@ -category'%5D%0A - search_f @@ -1031,32 +1031,85 @@ t_per_page = 20%0A + raw_id_fields = %5B'user', 'forum', 'subscribers'%5D%0A ordering = %5B @@ -1649,32 +1649,70 @@ t_per_page = 20%0A + raw_id_fields = %5B'user', 'topic'%5D%0A ordering = %5B @@ -2318,32 +2318,61 @@ t_per_page = 20%0A + raw_id_fields = %5B'user'%5D%0A ordering = %5B @@ -3193,24 +3193,24 @@ c', 'time'%5D%0A - list_per @@ -3212,32 +3212,70 @@ t_per_page = 20%0A + raw_id_fields = %5B'user', 'topic'%5D%0A ordering = %5B
b108cb874288ab6d2ee17b2fd807a95509b3e2c5
properly reverse emails as usernames in urls
api/urls.py
api/urls.py
from django.conf.urls import url, include from .views import LocationApi, IssueView, IssueCommentView, UserSearch, IssueStatusView, CommentDetailView, \ MentionView, UserInformationApi, UserDetailView app_name = 'issue_tracker_api' urlpatterns = [ url(r'^$', LocationApi.as_view()), url( r'^issue/(?P<issue_id>\d+)/', include([ url(r'^$', IssueView.as_view(), name='issue_detail'), url(r'^comment/$', IssueCommentView.as_view(), name='issue_comments'), url(r'^status/$', IssueStatusView.as_view(), name='issue_status'), ]) ), url(r'aboutme/$', UserInformationApi.as_view(), name='aboutme'), url(r'^users/$', UserSearch.as_view(), name='user_search'), url(r'users/(?P<username>[\w@]+)/$', UserDetailView.as_view(), name='user_detail'), url(r'^mentions/$', MentionView.as_view(), name='mention_search'), url(r'^comment/(?P<pk>\d+)/$', CommentDetailView.as_view(), name='comment_detail') ]
Python
0.999932
@@ -763,16 +763,17 @@ ame%3E%5B%5Cw@ +. %5D+)/$',