commit
stringlengths
40
40
subject
stringlengths
1
1.49k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
new_contents
stringlengths
1
29.8k
old_contents
stringlengths
0
9.9k
lang
stringclasses
3 values
proba
float64
0
1
1b4776ddb6ca0f30e4b61393ac37a8f44cfb2af4
fix auto-discovering db config
feedservice/settings.py
feedservice/settings.py
# -*- coding: utf-8 -*- import os, os.path def bool_env(val, default): """Replaces string based environment values with Python booleans""" if not val in os.environ: return default return True if os.environ.get(val) == 'True' else False DEBUG = bool_env('MYGPOFS_DEBUG', True) TEMPLATE_DEBUG = DEBUG ADMINS = ( ('Stefan Kögl', 'stefan@skoegl.net'), ) MANAGERS = ADMINS # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'UTC' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Static asset configuration BASE_DIR = os.path.dirname(os.path.abspath(__file__)) BASE_DIR = os.path.join(BASE_DIR, '../htdocs') STATIC_ROOT = 'static' STATIC_URL = '/media/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'media'), ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'm6jkg5lzard@k^p(wui4gtx_zu4s=26c+c0bk+k1xsik6+derf' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ) ROOT_URLCONF = 'feedservice.urls' TEMPLATE_DIRS = ( ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.staticfiles', 'feedservice.parse', 'feedservice.urlstore', 'feedservice.webservice', ) SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') BASE_URL='http://localhost:8080/' import dj_database_url DATABASES = {'default': dj_database_url.config()} SOUNDCLOUD_CONSUMER_KEY = os.getenv('MYGPOFS_SOUNDCLOUD_CONSUMER_KEY', '') FLATTR_THING = '' ALLOWED_HOSTS = filter(None, os.getenv('MYGPOFS_ALLOWED_HOSTS', '').split(';')) try: from settings_prod import * except ImportError, e: import sys print >> sys.stderr, 'create settings_prod.py with your customized settings'
# -*- coding: utf-8 -*- import os, os.path def bool_env(val, default): """Replaces string based environment values with Python booleans""" if not val in os.environ: return default return True if os.environ.get(val) == 'True' else False DEBUG = bool_env('MYGPOFS_DEBUG', True) TEMPLATE_DEBUG = DEBUG ADMINS = ( ('Stefan Kögl', 'stefan@skoegl.net'), ) MANAGERS = ADMINS # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'UTC' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Static asset configuration BASE_DIR = os.path.dirname(os.path.abspath(__file__)) BASE_DIR = os.path.join(BASE_DIR, '../htdocs') STATIC_ROOT = 'static' STATIC_URL = '/media/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'media'), ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'm6jkg5lzard@k^p(wui4gtx_zu4s=26c+c0bk+k1xsik6+derf' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ) ROOT_URLCONF = 'feedservice.urls' TEMPLATE_DIRS = ( ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.staticfiles', 'feedservice.parse', 'feedservice.urlstore', 'feedservice.webservice', ) SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') BASE_URL='http://localhost:8080/' import dj_database_url DATABASES = dj_database_url.config() SOUNDCLOUD_CONSUMER_KEY = os.getenv('MYGPOFS_SOUNDCLOUD_CONSUMER_KEY', '') FLATTR_THING = '' ALLOWED_HOSTS = filter(None, os.getenv('MYGPOFS_ALLOWED_HOSTS', '').split(';')) try: from settings_prod import * except ImportError, e: import sys print >> sys.stderr, 'create settings_prod.py with your customized settings'
Python
0.000002
d86bdec5d7d57fe74cb463e391798bd1e5be87ff
Update Ghana code to match current Pombola
pombola/ghana/urls.py
pombola/ghana/urls.py
from django.conf.urls import patterns, url, include from django.views.generic import TemplateView from .views import data_upload, info_page_upload urlpatterns = patterns('', url(r'^intro$', TemplateView.as_view(template_name='intro.html')), url(r'^data/upload/mps/$', data_upload, name='data_upload'), url(r'^data/upload/info-page/$', info_page_upload, name='info_page_upload'), url('', include('django.contrib.auth.urls')), )
from django.conf.urls import patterns, include, url, handler404 from django.views.generic import TemplateView import django.contrib.auth.views from .views import data_upload, info_page_upload urlpatterns = patterns('', url(r'^intro$', TemplateView.as_view(template_name='intro.html')), url(r'^data/upload/mps/$', data_upload, name='data_upload'), url(r'^data/upload/info-page/$', info_page_upload, name='info_page_upload'), #auth views url(r'^accounts/login$', django.contrib.auth.views.login, name='login'), url(r'^accounts/logut$', django.contrib.auth.views.logout, name='logout'), #url(r'^accounts/register$', registration.backends.simple.urls, name='register'), )
Python
0
9b75fd09220e61fd511c99e63f8d2b30e6a0f868
stop using deprecated assertEquals()
test_csv2es.py
test_csv2es.py
## Copyright 2015 Ray Holder ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. import csv2es import unittest class TestDelimiter(unittest.TestCase): def test_sanitize(self): self.assertEqual(None, csv2es.sanitize_delimiter(None, False)) self.assertEqual(str('\t'), csv2es.sanitize_delimiter(None, True)) self.assertEqual(str('|'), csv2es.sanitize_delimiter('|', False)) self.assertEqual(str('|'), csv2es.sanitize_delimiter(u'|', False)) self.assertEqual(str('\t'), csv2es.sanitize_delimiter('|', True)) self.assertEqual(str('\t'), csv2es.sanitize_delimiter('||', True)) self.assertRaises(Exception, csv2es.sanitize_delimiter, '||', False) class TestLoading(unittest.TestCase): def test_csv(self): # TODO fill this in self.assertTrue(True) def test_tsv(self): # TODO fill this in self.assertTrue(True) if __name__ == '__main__': unittest.main()
## Copyright 2015 Ray Holder ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. import csv2es import unittest class TestDelimiter(unittest.TestCase): def test_sanitize(self): self.assertEquals(None, csv2es.sanitize_delimiter(None, False)) self.assertEquals(str('\t'), csv2es.sanitize_delimiter(None, True)) self.assertEquals(str('|'), csv2es.sanitize_delimiter('|', False)) self.assertEquals(str('|'), csv2es.sanitize_delimiter(u'|', False)) self.assertEquals(str('\t'), csv2es.sanitize_delimiter('|', True)) self.assertEquals(str('\t'), csv2es.sanitize_delimiter('||', True)) self.assertRaises(Exception, csv2es.sanitize_delimiter, '||', False) class TestLoading(unittest.TestCase): def test_csv(self): # TODO fill this in self.assertTrue(True) def test_tsv(self): # TODO fill this in self.assertTrue(True) if __name__ == '__main__': unittest.main()
Python
0
bce19fd89fc82f2d18bd1cc210d94255800a2d5c
Use relative import for Python 3 support
molo/commenting/admin_views.py
molo/commenting/admin_views.py
from .tasks import send_export_email from django.contrib import messages from django.shortcuts import redirect from django.views.generic import FormView from django_comments.views.comments import post_comment from molo.commenting.forms import AdminMoloCommentReplyForm from wagtail.contrib.modeladmin.views import IndexView class MoloCommentsAdminView(IndexView): def send_export_email_to_celery(self, email, arguments): send_export_email.delay(email, arguments) def post(self, request, *args, **kwargs): if not request.user.email: messages.error( request, ( "Your email address is not configured. " "Please update it before exporting.")) return redirect(request.path) drf__submit_date__gte = request.GET.get('drf__submit_date__gte') drf__submit_date__lte = request.GET.get('drf__submit_date__lte') is_staff = request.GET.get('user__is_staff__exact') is_removed__exact = request.GET.get('is_removed__exact') filter_list = { 'submit_date__range': (drf__submit_date__gte, drf__submit_date__lte) if drf__submit_date__gte and drf__submit_date__lte else None, 'is_removed': is_removed__exact, 'user__is_staff': is_staff } arguments = {'wagtail_site': request.site.pk} for key, value in filter_list.items(): if value: arguments[key] = value self.send_export_email_to_celery(request.user.email, arguments) messages.success(request, ( "CSV emailed to '{0}'").format(request.user.email)) return redirect(request.path) def get_template_names(self): return 'admin/molo_comments_admin.html' class MoloCommentsAdminReplyView(FormView): form_class = AdminMoloCommentReplyForm template_name = 'admin/molo_comments_admin_reply.html' def get_form_kwargs(self): kwargs = super(MoloCommentsAdminReplyView, self).get_form_kwargs() kwargs['parent'] = self.kwargs['parent'] return kwargs def form_valid(self, form): self.request.POST = self.request.POST.copy() self.request.POST['name'] = '' self.request.POST['url'] = '' self.request.POST['email'] = '' self.request.POST['parent'] = self.kwargs['parent'] post_comment(self.request) messages.success(self.request, ('Reply successfully created.')) return redirect('/admin/commenting/molocomment/')
from django.contrib import messages from django.shortcuts import redirect from django.views.generic import FormView from django_comments.views.comments import post_comment from molo.commenting.forms import AdminMoloCommentReplyForm from tasks import send_export_email from wagtail.contrib.modeladmin.views import IndexView class MoloCommentsAdminView(IndexView): def send_export_email_to_celery(self, email, arguments): send_export_email.delay(email, arguments) def post(self, request, *args, **kwargs): if not request.user.email: messages.error( request, ( "Your email address is not configured. " "Please update it before exporting.")) return redirect(request.path) drf__submit_date__gte = request.GET.get('drf__submit_date__gte') drf__submit_date__lte = request.GET.get('drf__submit_date__lte') is_staff = request.GET.get('user__is_staff__exact') is_removed__exact = request.GET.get('is_removed__exact') filter_list = { 'submit_date__range': (drf__submit_date__gte, drf__submit_date__lte) if drf__submit_date__gte and drf__submit_date__lte else None, 'is_removed': is_removed__exact, 'user__is_staff': is_staff } arguments = {'wagtail_site': request.site.pk} for key, value in filter_list.items(): if value: arguments[key] = value self.send_export_email_to_celery(request.user.email, arguments) messages.success(request, ( "CSV emailed to '{0}'").format(request.user.email)) return redirect(request.path) def get_template_names(self): return 'admin/molo_comments_admin.html' class MoloCommentsAdminReplyView(FormView): form_class = AdminMoloCommentReplyForm template_name = 'admin/molo_comments_admin_reply.html' def get_form_kwargs(self): kwargs = super(MoloCommentsAdminReplyView, self).get_form_kwargs() kwargs['parent'] = self.kwargs['parent'] return kwargs def form_valid(self, form): self.request.POST = self.request.POST.copy() self.request.POST['name'] = '' self.request.POST['url'] = '' self.request.POST['email'] = '' self.request.POST['parent'] = self.kwargs['parent'] post_comment(self.request) messages.success(self.request, ('Reply successfully created.')) return redirect('/admin/commenting/molocomment/')
Python
0
b23e93a996f1e769dd64050c35b093275d6b9386
Update Kitsu service
src/services/info/kitsu.py
src/services/info/kitsu.py
# API docs: https://kitsu.docs.apiary.io from logging import debug, info, warning, error import re from .. import AbstractInfoHandler from data.models import UnprocessedShow, ShowType class InfoHandler(AbstractInfoHandler): _show_link_base = "https://kitsu.io/anime/{slug}" _show_link_matcher = "https?://kitsu\.io/anime/([a-zA-Z0-9-]+)" _season_url = "https://kitsu.io/api/edge/anime?filter[year]={year}&filter[season]={season}&filter[subtype]=tv&page[limit]=20" _api_base = "https:///kitsu.io/api/edge/anime" def __init__(self): super().__init__("kitsu", "Kitsu") def get_link(self, link): if link is None: return None return self._show_link_base.format(slug=link.site_key) def extract_show_id(self, url): if url is not None: match = re.match(self._show_link_matcher, url, re.I) if match: return match.group(1) return None def get_episode_count(self, link, **kwargs): #debug("Getting episode count") # Request show data from Kitsu #url = self._api_base + "?filter[slug]=" + link.site_key + "&fields[anime]=episodeCount" #response = self._site_request(url, **kwargs) #if response is None: # error("Cannot get show data") # return None # Parse show data #count = response["data"][0]["attributes"]["episodeCount"] #if count is None: # warning(" Count not found") # return None #return count return None def get_show_score(self, show, link, **kwargs): #debug("Getting show score") # Request show data #url = self._api_base + "?filter[slug]=" + link.site_key + "&fields[anime]=averageRating" #response = self._site_request(url, **kwargs) #if response is None: # error("Cannot get show data") # return None # Find score #score = response["data"][0]["attributes"]["averageRating"] #if score is None: # warning(" Score not found") # return None #return score return None def get_seasonal_shows(self, year=None, season=None, **kwargs): #debug("Getting season shows: year={}, season={}".format(year, season)) # Request season data from Kitsu #url = self._season_url.format(year=year, season=season) #response = self._site_request(url, **kwargs) #if response is None: # error("Cannot get show list") # return list() # Parse data #TODO return list() def find_show(self, show_name, **kwargs): #url = self._api_base + "?filter[text]=" + show_name #result = self._site_request(url, **kwargs) #if result is None: # error("Failed to find show") # return list() #shows = list() #TODO #return shows return list() def find_show_info(self, show_id, **kwargs): #debug("Getting show info for {}".format(show_id)) # Request show data from Kitsu #url = self._api_base + "?filter[slug]=" + show_id + "&fields[anime]=titles,abbreviatedTitles" #response = self._site_request(url, **kwargs) #if response is None: # error("Cannot get show data") # return None # Parse show data #name_english = response["data"][0]["attributes"]["titles"]["en"] #if name_english is None: # warning(" English name was not found") # return None #names = [name_english] #return UnprocessedShow(self.key, id, None, names, ShowType.UNKNOWN, 0, False) return None def _site_request(self, url, **kwargs): return self.request(url, json=True, **kwargs)
# API docs: http://docs.kitsu.apiary.io/ from logging import debug, info, warning, error import re from .. import AbstractInfoHandler from data.models import UnprocessedShow, ShowType class InfoHandler(AbstractInfoHandler): _show_link_base = "https://kitsu.io/anime/{slug}" _show_link_matcher = "https?://kitsu\.io/anime/([a-zA-Z0-9-]+)" _season_url = "https://kitsu.io/api/edge/anime?filter[year]={year}&filter[season]={season}&filter[subtype]=tv&page[limit]=20" _api_base = "https:///kitsu.io/api/edge/anime" def __init__(self): super().__init__("kitsu", "Kitsu") def get_link(self, link): if link is None: return None return self._show_link_base.format(slug=link.site_key) def extract_show_id(self, url): if url is not None: match = re.match(self._show_link_matcher, url, re.I) if match: return match.group(1) return None def get_episode_count(self, link, **kwargs): return None def get_show_score(self, show, link, **kwargs): return None def get_seasonal_shows(self, year=None, season=None, **kwargs): #debug("Getting season shows: year={}, season={}".format(year, season)) # Request season page from AniDB #url = self._season_url.format(year=year, season=season) #response = self._site_request(url, **kwargs) #if response is None: # error("Cannot get show list") # return list() # Parse page #TODO return list() def find_show(self, show_name, **kwargs): return list() def find_show_info(self, show_id, **kwargs): #debug("Getting show info for {}".format(show_id)) # Request show data from Kitsu #url = self._api_base + "?filter[slug]=" + show_id #response = self._site_request(url, **kwargs) #if response is None: # error("Cannot get show data") # return None # Parse show data #name_english = response["data"][0]["attributes"]["titles"]["en"] #if name_english is None: # warning(" English name was not found") # return None #names = [name_english] #return UnprocessedShow(self.key, id, None, names, ShowType.UNKNOWN, 0, False) return None def _site_request(self, url, **kwargs): return self.request(url, json=True, **kwargs)
Python
0
97a8a349d26b364e57aaac6f8d920770810aa8d8
Correct localized strings
src/sentry/constants.py
src/sentry/constants.py
""" sentry.constants ~~~~~~~~~~~~~~~~ These settings act as the default (base) settings for the Sentry-provided web-server :copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from django.utils.datastructures import SortedDict from django.utils.translation import ugettext_lazy as _ SORT_OPTIONS = SortedDict(( ('priority', _('Priority')), ('date', _('Last Seen')), ('new', _('First Seen')), ('freq', _('Frequency')), ('tottime', _('Total Time Spent')), ('avgtime', _('Average Time Spent')), ('accel_15', _('Trending: %(minutes)d minutes' % {'minutes': 15})), ('accel_60', _('Trending: %(minutes)d minutes' % {'minutes': 60})), )) SORT_CLAUSES = { 'priority': 'sentry_groupedmessage.score', 'date': 'EXTRACT(EPOCH FROM sentry_groupedmessage.last_seen)', 'new': 'EXTRACT(EPOCH FROM sentry_groupedmessage.first_seen)', 'freq': 'sentry_groupedmessage.times_seen', 'tottime': 'sentry_groupedmessage.time_spent_total', 'avgtime': '(sentry_groupedmessage.time_spent_total / sentry_groupedmessage.time_spent_count)', } SCORE_CLAUSES = SORT_CLAUSES.copy() SQLITE_SORT_CLAUSES = SORT_CLAUSES.copy() SQLITE_SORT_CLAUSES.update({ 'date': 'sentry_groupedmessage.last_seen', 'new': 'sentry_groupedmessage.first_seen', }) SQLITE_SCORE_CLAUSES = SQLITE_SORT_CLAUSES.copy() MYSQL_SORT_CLAUSES = SORT_CLAUSES.copy() MYSQL_SORT_CLAUSES.update({ 'date': 'sentry_groupedmessage.last_seen', 'new': 'sentry_groupedmessage.first_seen', }) MYSQL_SCORE_CLAUSES = SCORE_CLAUSES.copy() MYSQL_SCORE_CLAUSES.update({ 'date': 'UNIX_TIMESTAMP(sentry_groupedmessage.last_seen)', 'new': 'UNIX_TIMESTAMP(sentry_groupedmessage.first_seen)', }) SEARCH_SORT_OPTIONS = SortedDict(( ('score', _('Score')), ('date', _('Last Seen')), ('new', _('First Seen')), )) STATUS_UNRESOLVED = 0 STATUS_RESOLVED = 1 STATUS_MUTED = 2 STATUS_LEVELS = ( (STATUS_UNRESOLVED, _('Unresolved')), (STATUS_RESOLVED, _('Resolved')), (STATUS_MUTED, _('Muted')), ) MEMBER_OWNER = 0 MEMBER_USER = 50 MEMBER_SYSTEM = 100 MEMBER_TYPES = ( (MEMBER_OWNER, _('Admin')), (MEMBER_USER, _('User')), (MEMBER_SYSTEM, _('System Agent')), )
""" sentry.constants ~~~~~~~~~~~~~~~~ These settings act as the default (base) settings for the Sentry-provided web-server :copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from django.utils.datastructures import SortedDict from django.utils.translation import ugettext_lazy as _ SORT_OPTIONS = SortedDict(( ('priority', _('Priority')), ('date', _('Last Seen')), ('new', _('First Seen')), ('freq', _('Frequency')), ('tottime', _('Total Time Spent')), ('avgtime', _('Average Time Spent')), ('accel_15', _('Trending: %(minutes)d minutes' % {'minutes': 15})), ('accel_60', _('Trending: %(minutes)d minutes' % {'minutes': 60})), )) SORT_CLAUSES = { 'priority': 'sentry_groupedmessage.score', 'date': 'EXTRACT(EPOCH FROM sentry_groupedmessage.last_seen)', 'new': 'EXTRACT(EPOCH FROM sentry_groupedmessage.first_seen)', 'freq': 'sentry_groupedmessage.times_seen', 'tottime': 'sentry_groupedmessage.time_spent_total', 'avgtime': '(sentry_groupedmessage.time_spent_total / sentry_groupedmessage.time_spent_count)', } SCORE_CLAUSES = SORT_CLAUSES.copy() SQLITE_SORT_CLAUSES = SORT_CLAUSES.copy() SQLITE_SORT_CLAUSES.update({ 'date': 'sentry_groupedmessage.last_seen', 'new': 'sentry_groupedmessage.first_seen', }) SQLITE_SCORE_CLAUSES = SQLITE_SORT_CLAUSES.copy() MYSQL_SORT_CLAUSES = SORT_CLAUSES.copy() MYSQL_SORT_CLAUSES.update({ 'date': 'sentry_groupedmessage.last_seen', 'new': 'sentry_groupedmessage.first_seen', }) MYSQL_SCORE_CLAUSES = SCORE_CLAUSES.copy() MYSQL_SCORE_CLAUSES.update({ 'date': 'UNIX_TIMESTAMP(sentry_groupedmessage.last_seen)', 'new': 'UNIX_TIMESTAMP(sentry_groupedmessage.first_seen)', }) SEARCH_SORT_OPTIONS = SortedDict(( ('score', _('Score')), ('date', _('Last Seen')), ('new', _('First Seen')), )) STATUS_UNRESOLVED = 0 STATUS_RESOLVED = 1 STATUS_MUTED = 2 STATUS_LEVELS = ( (STATUS_UNRESOLVED, _('unresolved')), (STATUS_RESOLVED, _('resolved')), (STATUS_MUTED, _('muted')), ) MEMBER_OWNER = 0 MEMBER_USER = 50 MEMBER_SYSTEM = 100 MEMBER_TYPES = ( (MEMBER_OWNER, _('admin')), (MEMBER_USER, _('user')), (MEMBER_SYSTEM, _('system agent')), )
Python
0.999887
ca2a6d06f09f5f2d511d6cf676fdd9a8f6c411cf
remove cruft, bump heroku
src/settings/production.py
src/settings/production.py
from base import * DEBUG = False ALLOWED_HOSTS = ["*"] SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY", "afaefawe23af") assert SECRET_KEY, "Set your DJANGO_SECRET_KEY env var" # Celery BROKER_URL = os.environ.get('CLOUDAMQP_URL', None) #assert BROKER_URL, "Celery BROKER_URL env var missing!" # Memcached CACHES = { 'default': { 'BACKEND': 'django_bmemcached.memcached.BMemcached', 'LOCATION': os.environ.get('MEMCACHEDCLOUD_SERVERS', '').split(','), 'OPTIONS': { 'username': os.environ.get('MEMCACHEDCLOUD_USERNAME'), 'password': os.environ.get('MEMCACHEDCLOUD_PASSWORD') } } }
from base import * DEBUG = False ALLOWED_HOSTS = ["*"] SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY", "afaefawe23af") assert SECRET_KEY, "Set your DJANGO_SECRET_KEY env var" # Celery BROKER_URL = os.environ.get('CLOUDAMQP_URL', None) # BROKER_URL = os.environ.get("RABBITMQ_BIGWIG_URL", None) #assert BROKER_URL, "Celery BROKER_URL env var missing!" # Memcached CACHES = { 'default': { 'BACKEND': 'django_bmemcached.memcached.BMemcached', 'LOCATION': os.environ.get('MEMCACHEDCLOUD_SERVERS', '').split(','), 'OPTIONS': { 'username': os.environ.get('MEMCACHEDCLOUD_USERNAME'), 'password': os.environ.get('MEMCACHEDCLOUD_PASSWORD') } } }
Python
0
5526f8e3dca2f84fce34df5a134bada8479a2f69
Fix dumpdata ordering for VRFs
netbox/ipam/models/__init__.py
netbox/ipam/models/__init__.py
# Ensure that VRFs are imported before IPs/prefixes so dumpdata & loaddata work correctly from .fhrp import * from .vrfs import * from .ip import * from .services import * from .vlans import * __all__ = ( 'ASN', 'Aggregate', 'IPAddress', 'IPRange', 'FHRPGroup', 'FHRPGroupAssignment', 'Prefix', 'RIR', 'Role', 'RouteTarget', 'Service', 'ServiceTemplate', 'VLAN', 'VLANGroup', 'VRF', )
from .fhrp import * from .ip import * from .services import * from .vlans import * from .vrfs import * __all__ = ( 'ASN', 'Aggregate', 'IPAddress', 'IPRange', 'FHRPGroup', 'FHRPGroupAssignment', 'Prefix', 'RIR', 'Role', 'RouteTarget', 'Service', 'ServiceTemplate', 'VLAN', 'VLANGroup', 'VRF', )
Python
0
e0c046abe14d7666d9fea54dc0339579f2b0ba98
Fix indentation
neuralmonkey/runners/runner.py
neuralmonkey/runners/runner.py
from typing import Callable, Dict, List import numpy as np import tensorflow as tf from neuralmonkey.runners.base_runner import (BaseRunner, Executable, ExecutionResult, NextExecute) # tests: mypy,pylint # pylint: disable=too-few-public-methods class GreedyRunner(BaseRunner): def __init__(self, output_series: str, decoder, postprocess: Callable[[List[str]], List[str]]=None) -> None: super(GreedyRunner, self).__init__(output_series, decoder) self._postprocess = postprocess self.image_summaries = tf.merge_summary( tf.get_collection("summary_val_plots")) def get_executable(self, train=False, summaries=True): if train: fecthes = {"train_xent": self._decoder.train_loss, "runtime_xent": self._decoder.runtime_loss} else: fecthes = {"train_xent": tf.zeros([]), "runtime_xent": tf.zeros([])} fecthes["decoded_logprobs"] = self._decoder.runtime_logprobs if summaries: fecthes['image_summaries'] = self.image_summaries return GreedyRunExecutable(self.all_coders, fecthes, self._decoder.vocabulary, self._postprocess) @property def loss_names(self) -> List[str]: return ["train_xent", "runtime_xent"] class GreedyRunExecutable(Executable): def __init__(self, all_coders, fecthes, vocabulary, postprocess): self.all_coders = all_coders self._fetches = fecthes self._vocabulary = vocabulary self._postprocess = postprocess self.decoded_sentences = [] self.result = None # type: Option[ExecutionResult] def next_to_execute(self) -> NextExecute: """Get the feedables and tensors to run.""" return self.all_coders, self._fetches, {} def collect_results(self, results: List[Dict]) -> None: train_loss = 0. runtime_loss = 0. summed_logprobs = [-np.inf for _ in self._fetches["decoded_logprobs"]] for sess_result in results: train_loss += sess_result["train_xent"] runtime_loss += sess_result["runtime_xent"] for i, logprob in enumerate(sess_result["decoded_logprobs"]): summed_logprobs[i] = np.logaddexp(summed_logprobs[i], logprob) argmaxes = [np.argmax(l, axis=1) for l in summed_logprobs] decoded_tokens = self._vocabulary.vectors_to_sentences(argmaxes) if self._postprocess is not None: decoded_tokens = [self._postprocess(seq) for seq in decoded_tokens] image_summaries = results[0].get('image_summaries') self.result = ExecutionResult( outputs=decoded_tokens, losses=[train_loss, runtime_loss], scalar_summaries=None, histogram_summaries=None, image_summaries=image_summaries )
from typing import Callable, Dict, List import numpy as np import tensorflow as tf from neuralmonkey.runners.base_runner import (BaseRunner, Executable, ExecutionResult, NextExecute) # tests: mypy,pylint # pylint: disable=too-few-public-methods class GreedyRunner(BaseRunner): def __init__(self, output_series: str, decoder, postprocess: Callable[[List[str]], List[str]]=None) -> None: super(GreedyRunner, self).__init__(output_series, decoder) self._postprocess = postprocess self.image_summaries = tf.merge_summary( tf.get_collection("summary_val_plots")) def get_executable(self, train=False, summaries=True): if train: fecthes = {"train_xent": self._decoder.train_loss, "runtime_xent": self._decoder.runtime_loss} else: fecthes = {"train_xent": tf.zeros([]), "runtime_xent": tf.zeros([])} fecthes["decoded_logprobs"] = self._decoder.runtime_logprobs if summaries: fecthes['image_summaries'] = self.image_summaries return GreedyRunExecutable(self.all_coders, fecthes, self._decoder.vocabulary, self._postprocess) @property def loss_names(self) -> List[str]: return ["train_xent", "runtime_xent"] class GreedyRunExecutable(Executable): def __init__(self, all_coders, fecthes, vocabulary, postprocess): self.all_coders = all_coders self._fetches = fecthes self._vocabulary = vocabulary self._postprocess = postprocess self.decoded_sentences = [] self.result = None # type: Option[ExecutionResult] def next_to_execute(self) -> NextExecute: """Get the feedables and tensors to run.""" return self.all_coders, self._fetches, {} def collect_results(self, results: List[Dict]) -> None: train_loss = 0. runtime_loss = 0. summed_logprobs = [-np.inf for _ in self._fetches["decoded_logprobs"]] for sess_result in results: train_loss += sess_result["train_xent"] runtime_loss += sess_result["runtime_xent"] for i, logprob in enumerate(sess_result["decoded_logprobs"]): summed_logprobs[i] = np.logaddexp(summed_logprobs[i], logprob) argmaxes = [np.argmax(l, axis=1) for l in summed_logprobs] decoded_tokens = self._vocabulary.vectors_to_sentences(argmaxes) if self._postprocess is not None: decoded_tokens = [self._postprocess(seq) for seq in decoded_tokens] image_summaries = results[0].get('image_summaries') self.result = ExecutionResult( outputs=decoded_tokens, losses=[train_loss, runtime_loss], scalar_summaries=None, histogram_summaries=None, image_summaries=image_summaries )
Python
0.017244
fe0691595eea7197db07f3505446e1553df3d188
Bump version number after merging pull request.
src/openvr/version.py
src/openvr/version.py
# Store the version here so: # 1) we don't load dependencies by storing it in __init__.py # 2) we can import it in setup.py for the same reason # 3) we can import it into your module module # http://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package __version__ = '1.0.0602a'
# Store the version here so: # 1) we don't load dependencies by storing it in __init__.py # 2) we can import it in setup.py for the same reason # 3) we can import it into your module module # http://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package __version__ = '1.0.0601'
Python
0
4e74ba40f442dd27ddd29464b518c2a06ad1019a
Bump version
src/oscar/__init__.py
src/oscar/__init__.py
import os # Use 'dev', 'beta', or 'final' as the 4th element to indicate release type. VERSION = (1, 0, 1, 'machtfit', 22) def get_short_version(): return '%s.%s' % (VERSION[0], VERSION[1]) def get_version(): return '{}.{}.{}-{}-{}'.format(*VERSION) # Cheeky setting that allows each template to be accessible by two paths. # Eg: the template 'oscar/templates/oscar/base.html' can be accessed via both # 'base.html' and 'oscar/base.html'. This allows Oscar's templates to be # extended by templates with the same filename OSCAR_MAIN_TEMPLATE_DIR = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'templates/oscar') OSCAR_CORE_APPS = [ 'oscar', 'oscar.apps.analytics', 'oscar.apps.checkout', 'oscar.apps.address', 'oscar.apps.shipping', 'oscar.apps.catalogue', 'oscar.apps.catalogue.reviews', 'oscar.apps.partner', 'oscar.apps.basket', 'oscar.apps.payment', 'oscar.apps.offer', 'oscar.apps.order', 'oscar.apps.customer', 'oscar.apps.promotions', 'oscar.apps.voucher', 'oscar.apps.wishlists', 'oscar.apps.dashboard', 'oscar.apps.dashboard.reports', 'oscar.apps.dashboard.users', 'oscar.apps.dashboard.orders', 'oscar.apps.dashboard.promotions', 'oscar.apps.dashboard.catalogue', 'oscar.apps.dashboard.offers', 'oscar.apps.dashboard.partners', 'oscar.apps.dashboard.pages', 'oscar.apps.dashboard.ranges', 'oscar.apps.dashboard.reviews', 'oscar.apps.dashboard.vouchers', 'oscar.apps.dashboard.communications', # 3rd-party apps that oscar depends on 'treebeard', 'sorl.thumbnail', 'django_tables2', ] def get_core_apps(overrides=None): """ Return a list of oscar's apps amended with any passed overrides """ if not overrides: return OSCAR_CORE_APPS def get_app_label(app_label, overrides): pattern = app_label.replace('oscar.apps.', '') for override in overrides: if override.endswith(pattern): if 'dashboard' in override and 'dashboard' not in pattern: continue return override return app_label apps = [] for app_label in OSCAR_CORE_APPS: apps.append(get_app_label(app_label, overrides)) return apps
import os # Use 'dev', 'beta', or 'final' as the 4th element to indicate release type. VERSION = (1, 0, 1, 'machtfit', 21) def get_short_version(): return '%s.%s' % (VERSION[0], VERSION[1]) def get_version(): return '{}.{}.{}-{}-{}'.format(*VERSION) # Cheeky setting that allows each template to be accessible by two paths. # Eg: the template 'oscar/templates/oscar/base.html' can be accessed via both # 'base.html' and 'oscar/base.html'. This allows Oscar's templates to be # extended by templates with the same filename OSCAR_MAIN_TEMPLATE_DIR = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'templates/oscar') OSCAR_CORE_APPS = [ 'oscar', 'oscar.apps.analytics', 'oscar.apps.checkout', 'oscar.apps.address', 'oscar.apps.shipping', 'oscar.apps.catalogue', 'oscar.apps.catalogue.reviews', 'oscar.apps.partner', 'oscar.apps.basket', 'oscar.apps.payment', 'oscar.apps.offer', 'oscar.apps.order', 'oscar.apps.customer', 'oscar.apps.promotions', 'oscar.apps.voucher', 'oscar.apps.wishlists', 'oscar.apps.dashboard', 'oscar.apps.dashboard.reports', 'oscar.apps.dashboard.users', 'oscar.apps.dashboard.orders', 'oscar.apps.dashboard.promotions', 'oscar.apps.dashboard.catalogue', 'oscar.apps.dashboard.offers', 'oscar.apps.dashboard.partners', 'oscar.apps.dashboard.pages', 'oscar.apps.dashboard.ranges', 'oscar.apps.dashboard.reviews', 'oscar.apps.dashboard.vouchers', 'oscar.apps.dashboard.communications', # 3rd-party apps that oscar depends on 'treebeard', 'sorl.thumbnail', 'django_tables2', ] def get_core_apps(overrides=None): """ Return a list of oscar's apps amended with any passed overrides """ if not overrides: return OSCAR_CORE_APPS def get_app_label(app_label, overrides): pattern = app_label.replace('oscar.apps.', '') for override in overrides: if override.endswith(pattern): if 'dashboard' in override and 'dashboard' not in pattern: continue return override return app_label apps = [] for app_label in OSCAR_CORE_APPS: apps.append(get_app_label(app_label, overrides)) return apps
Python
0
6011cf6d892d4ca941c47b578fdaebc80672f532
Raise an error if the run was cancelled.
api/kiveapi/runstatus.py
api/kiveapi/runstatus.py
""" This module defines a class that keeps track of a run in Kive. """ from . import KiveRunFailedException from .dataset import Dataset class RunStatus(object): """ This keeps track of a run in Kive. There isn't a direct analogue in Kive for this, but it represents a part of Run's functionality. """ def __init__(self, obj, api): self.run_id = obj['id'] self.pipeline_id = obj['pipeline'] self.url = obj['run_status'] self.results_url = obj['run_outputs'] self.api = api self.raw = obj def _grab_stats(self): data = self.api.get(self.url).json() if "!" in data["status"]: raise KiveRunFailedException("Run %s failed" % self.run_id) if "x" in data["status"]: raise KiveRunFailedException("Run %s cancelled" % self.run_id) return data def get_status(self): """ Queries the server for the status of a run :return: A description string of the status """ # TODO: Make change kive to return sane overall statuses status = self._grab_stats()['status'] if status == '?': return "Waiting to start..." if '*' in status and '.' not in status: return 'Complete.' return 'Running...' def is_waiting(self): """ Returns whether or not the run is queued on the server for processing. :return: """ status = self._grab_stats()['status'] return status == '?' def is_running(self): """ Returns whether or not the run is running on the server :return: """ status = self._grab_stats() return status.get('start', False) and not status.get('end', False) def is_complete(self): """ Returns whether or not the run has completed. :return: """ status = self._grab_stats() return status.get('end', None) is not None def is_successful(self): """ Returns whether the run was successful, provided that it's also complete :return: """ return self.is_complete() def get_progress(self): """ Gets the current run's progress bar :return: """ return self._grab_stats()['status'] def get_progress_percent(self): """ Gets the current progress as a percentage. :return: """ status = self._grab_stats()['status'] return 100*float(status.count('*'))/float(len(status) - status.count('-')) def get_inputs(self): """ Gets all the datasets that fed this pipeline. :return: A list of Dataset objects. """ datasets = self.api.get(self.results_url).json()['input_summary'] return [Dataset(d, self.api) for d in datasets] def get_results(self): """ Gets all the datasets that resulted from this pipeline. Includes pipeline outputs and intermediate results. If the run is still active, return any outputs that are ready. :return: A dictionary of Dataset objects, keyed by name. """ datasets = self.api.get(self.results_url).json()['output_summary'] return {d['name']: Dataset(d, self.api) for d in datasets}
""" This module defines a class that keeps track of a run in Kive. """ from . import KiveRunFailedException from .dataset import Dataset class RunStatus(object): """ This keeps track of a run in Kive. There isn't a direct analogue in Kive for this, but it represents a part of Run's functionality. """ def __init__(self, obj, api): self.run_id = obj['id'] self.pipeline_id = obj['pipeline'] self.url = obj['run_status'] self.results_url = obj['run_outputs'] self.api = api self.raw = obj def _grab_stats(self): data = self.api.get(self.url).json() if "!" in data["status"]: raise KiveRunFailedException("Run %s failed" % self.run_id) return data def get_status(self): """ Queries the server for the status of a run :return: A description string of the status """ # TODO: Make change kive to return sane overall statuses status = self._grab_stats()['status'] if status == '?': return "Waiting to start..." if '!' in status: raise KiveRunFailedException("Run %s failed" % self.run_id) if '*' in status and '.' not in status: return 'Complete.' return 'Running...' def is_waiting(self): """ Returns whether or not the run is queued on the server for processing. :return: """ status = self._grab_stats()['status'] return status == '?' def is_running(self): """ Returns whether or not the run is running on the server :return: """ status = self._grab_stats() return status.get('start', False) and not status.get('end', False) def is_complete(self): """ Returns whether or not the run has completed. :return: """ status = self._grab_stats() return status.get('end', None) is not None def is_successful(self): """ Returns whether the run was successful, provided that it's also complete :return: """ return self.is_complete() def get_progress(self): """ Gets the current run's progress bar :return: """ return self._grab_stats()['status'] def get_progress_percent(self): """ Gets the current progress as a percentage. :return: """ status = self._grab_stats()['status'] return 100*float(status.count('*'))/float(len(status) - status.count('-')) def get_inputs(self): """ Gets all the datasets that fed this pipeline. :return: A list of Dataset objects. """ datasets = self.api.get(self.results_url).json()['input_summary'] return [Dataset(d, self.api) for d in datasets] def get_results(self): """ Gets all the datasets that resulted from this pipeline. Includes pipeline outputs and intermediate results. If the run is still active, return any outputs that are ready. :return: A dictionary of Dataset objects, keyed by name. """ datasets = self.api.get(self.results_url).json()['output_summary'] return {d['name']: Dataset(d, self.api) for d in datasets}
Python
0
580c133c09758050aae30ae3aa453ce3c5b22e56
refactor python
Python/stack.py
Python/stack.py
__author__ = 'Daniel' class Stack(): def __init__(self): self.items = [] def push(self, item): self.items.append(item) def is_empty(self): return self.items == [] def size(self): return len(self.items) def pop(self): return self.items.pop() def peek(self): return self.items[len(self.items) - 1] def check_parentheses(inp): stack = Stack() for c in inp: if c == ')' or c == ']' or c == '}' or c == '>': if stack.is_empty() or stack.pop() != c: return False if c == '(': stack.push(')') if c == '[': stack.push(']') if c == '{': stack.push('}') if c == '<': stack.push('>') return stack.is_empty() def to_base(num, base): digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" stack = Stack() while num > 0: stack.push(digits[num % base]) num //= base res = "" while not stack.is_empty(): res += stack.pop() return res def to_binary(num): return to_base(num, 2) def to_postfix(string): tokens = string.split() prec = {"+": 1, "-": 1, "*": 2, "/": 2, "(": 0, ")": 0} operators = Stack() res = [] for token in tokens: if token == "(": operators.push("(") elif token == ")": op = operators.pop() while op != "(": res.append(op) op = operators.pop() elif token in "+-*/": while not operators.is_empty() and prec[token] <= prec[operators.peek()]: res.append(operators.pop()) operators.push(token) else: res.append(token) while not operators.is_empty(): res.append(operators.pop()) return " ".join(res) def eval_postfix(string): tokens = string.split() def eval_op(f): right = eval_stack.pop() left = eval_stack.pop() eval_stack.push(f(left, right)) eval_stack = Stack() from operator import add, floordiv, mul, sub for token in tokens: if token == "+": eval_op(add) elif token == "-": eval_op(sub) elif token == "*": eval_op(mul) elif token == "/": eval_op(floordiv) else: eval_stack.push(int(token)) return eval_stack.pop()
__author__ = 'Daniel' class Stack(): def __init__(self): self.items = [] def push(self, item): self.items.append(item) def is_empty(self): return self.items == [] def size(self): return len(self.items) def pop(self): return self.items.pop() def peek(self): return self.items[len(self.items) - 1] def check_parentheses(inp): stack = Stack() for c in inp: if c == ')' or c == ']' or c == '}' or c == '>': if stack.is_empty() or stack.pop() != c: return False if c == '(': stack.push(')') if c == '[': stack.push(']') if c == '{': stack.push('}') if c == '<': stack.push('>') return stack.is_empty() def to_base(num, base): digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" stack = Stack() while num > 0: stack.push(digits[num % base]) num //= base res = "" while not stack.is_empty(): res += stack.pop() return res def to_binary(num): return to_base(num, 2) def to_postfix(string): tokens = string.split() prec = {"+": 1, "-": 1, "*": 2, "/": 2, "(": 0, ")": 0} operators = Stack() res = [] for token in tokens: if token == "(": operators.push("(") elif token == ")": op = operators.pop() while op != "(": res.append(op) op = operators.pop() elif token in "+-*/": while not operators.is_empty() and prec[token] <= prec[operators.peek()]: res.append(operators.pop()) operators.push(token) else: res.append(token) while not operators.is_empty(): res.append(operators.pop()) return " ".join(res) def eval_postfix(string): tokens = string.split() evalStack = Stack() for token in tokens: if token == "+": right = evalStack.pop() left = evalStack.pop() evalStack.push(left + right) elif token == "-": right = evalStack.pop() left = evalStack.pop() evalStack.push(left - right) elif token == "*": right = evalStack.pop() left = evalStack.pop() evalStack.push(left * right) elif token == "/": right = evalStack.pop() left = evalStack.pop() evalStack.push(left / right) else: evalStack.push(int(token)) return evalStack.pop()
Python
0.999983
343fa1849457202a393ccfdc5b86075cc1b0b88c
add observables
plugins/feeds/public/hybdrid_analysis.py
plugins/feeds/public/hybdrid_analysis.py
import logging from datetime import timedelta from core.errors import ObservableValidationError from core.feed import Feed from core.observables import Hash, Hostname class Hybrid_Analysis(Feed): default_values = { "frequency": timedelta(minutes=5), "name": "Hybdrid-Analysis", "source": "https://www.hybrid-analysis.com/feed?json", "description": "Hybrid Analysis Public Feeds", } def update(self): for item in self.update_json(headers={'User-agent': 'VxApi Connector'})['data']: self.analyze(item) pass def analyze(self, item): sha256 = Hash.get_or_create(value=item['sha256']) tags = [] context = {'source': self.name} if 'vxfamily' in item: tags.append(' '.join(item['vxfamily'].split('.'))) if 'tags' in item: tags.extend(item['tags']) if 'threatlevel_human' in item: context['threatlevel_human'] = item['threatlevel_human'] if 'threatlevel' in item: context['threatlevel'] = item['threatlevel'] if 'type' in item: context['type'] = item['type'] if 'size' in item: context['size'] = item['size'] if 'vt_detect' in item: context['virustotal_score'] = item['vt_detect'] if 'et_alerts_total' in item: context['et_alerts_total'] = item['et_alerts_total'] if 'process_list' in item: context['count process spawn'] = len(item['process_list']) context['url'] = 'https://www.hybrid-analysis.com' + item['reporturl'] sha256.add_context(context) sha256.tag(tags) md5 = Hash.get_or_create(value=item['md5']) md5.tag(tags) md5.add_context(context) sha1 = Hash.get_or_create(value=item['sha1']) sha1.tag(tags) sha1.add_context(context) sha256.active_link_to(md5, 'md5', self.name) sha256.active_link_to(sha1, 'sha1', self.name) if 'domains' in item: for domain in item['domains']: try: new_host = Hostname.get_or_create(value=domain) sha256.active_link_to(new_host, 'C2', self.name) sha1.active_link_to(new_host, 'C2', self.name) md5.active_link_to(new_host, 'C2', self.name) new_host.add_context({'source': self.name, 'contacted by': sha256}) except ObservableValidationError as e: logging.error(e) if 'extracted_files' in item: for extracted_file in item['extracted_files']: context_file_dropped = {'source': self.name} if not 'sha256' in extracted_file: logging.error(extracted_file) continue new_file = Hash.get_or_create(value=extracted_file['sha256']) context_file_dropped['virustotal_score'] = 0 context_file_dropped['size'] = extracted_file['file_size'] if 'av_matched' in extracted_file: context_file_dropped['virustotal_score'] = extracted_file['av_matched'] if 'threatlevel_readable' in extracted_file: context_file_dropped['threatlevel'] = extracted_file['threatlevel_readable'] if 'av_label' in extracted_file: new_file.tag(extracted_file['av_label']) if 'type_tags' in extracted_file: new_file.tag(extracted_file['type_tags']) new_file.add_context(context_file_dropped) new_file.active_link_to(sha256, 'drop', self.name) new_file.active_link_to(md5, 'drop', self.name) new_file.active_link_to(sha1, 'drop', self.name)
import logging from datetime import timedelta from core.errors import ObservableValidationError from core.feed import Feed from core.observables import Hash, Hostname class Hybrid_Analysis(Feed): default_values = { "frequency": timedelta(minutes=5), "name": "Hybdrid-Analysis", "source": "https://www.hybrid-analysis.com/feed?json", "description": "Hybrid Analysis Public Feeds", } def update(self): for item in self.update_json(headers={'User-agent': 'VxApi Connector'})['data']: self.analyze(item) pass def analyze(self, item): sha256 = Hash.get_or_create(value=item['sha256']) tags = [] context = {'source': self.name} if 'vxfamily' in item: tags.append(' '.join(item['vxfamily'].split('.'))) if 'tags' in item: tags.extend(item['tags']) if 'threatlevel_human' in item: context['threatlevel_human'] = item['threatlevel_human'] if 'threatlevel' in item: context['threatlevel'] = item['threatlevel'] if 'type' in item: context['type'] = item['type'] if 'size' in item: context['size'] = item['size'] if 'vt_detect' in item: context['virustotal_score'] = item['vt_detect'] if 'et_alerts_total' in item: context['et_alerts_total'] = item['et_alerts_total'] if 'process_list' in item: context['count process spawn'] = len(item['process_list']) context['url'] = 'https://www.hybrid-analysis.com' + item['reporturl'] sha256.add_context(context) sha256.tag(tags) md5 = Hash.get_or_create(value=item['md5']) md5.tag(tags) md5.add_context(context) sha1 = Hash.get_or_create(value=item['sha1']) sha1.tag(tags) sha1.add_context(context) sha256.active_link_to(md5, 'md5', self.name) sha256.active_link_to(sha1, 'sha1', self.name) if 'domains' in item: for domain in item['domains']: try: new_host = Hostname.get_or_create(value=domain) sha256.active_link_to(new_host, 'C2', self.name) sha1.active_link_to(new_host, 'C2', self.name) md5.active_link_to(new_host, 'C2', self.name) new_host.add_context({'source':self.name, 'contacted by': sha256}) except ObservableValidationError as e: logging.error(e)
Python
0.00209
aa7bbd84fa16105417ceb7f9e06d392a4e54fdc6
Remove unused import
salt/beacons/twilio_txt_msg.py
salt/beacons/twilio_txt_msg.py
# -*- coding: utf-8 -*- ''' Beacon to emit Twilio text messages ''' # Import Python libs from __future__ import absolute_import import logging # Import 3rd Party libs try: from twilio.rest import TwilioRestClient HAS_TWILIO = True except ImportError: HAS_TWILIO = False log = logging.getLogger(__name__) __virtualname__ = 'twilio_txt_msg' def __virtual__(): if HAS_TWILIO: return __virtualname__ else: return False def beacon(config): ''' Emit a dict name "texts" whose value is a list of texts. .. code-block:: yaml beacons: twilio_txt_msg: account_sid: "<account sid>" auth_token: "<auth token>" twilio_number: "+15555555555" interval: 10 ''' log.trace('twilio_txt_msg beacon starting') ret = [] if not all([config['account_sid'], config['auth_token'], config['twilio_number']]): return ret output = {} output['texts'] = [] client = TwilioRestClient(config['account_sid'], config['auth_token']) messages = client.messages.list(to=config['twilio_number']) log.trace('Num messages: {0}'.format(len(messages))) if len(messages) < 1: log.trace('Twilio beacon has no texts') return ret for message in messages: item = {} item['id'] = str(message.sid) item['body'] = str(message.body) item['from'] = str(message.from_) item['sent'] = str(message.date_sent) item['images'] = [] if int(message.num_media): media = client.media(message.sid).list() if len(media): for pic in media: item['images'].append(str(pic.uri)) output['texts'].append(item) message.delete() ret.append(output) return ret
# -*- coding: utf-8 -*- ''' Beacon to emit Twilio text messages ''' # Import Python libs from __future__ import absolute_import from datetime import datetime import logging # Import 3rd Party libs try: from twilio.rest import TwilioRestClient HAS_TWILIO = True except ImportError: HAS_TWILIO = False log = logging.getLogger(__name__) __virtualname__ = 'twilio_txt_msg' def __virtual__(): if HAS_TWILIO: return __virtualname__ else: return False def beacon(config): ''' Emit a dict name "texts" whose value is a list of texts. .. code-block:: yaml beacons: twilio_txt_msg: account_sid: "<account sid>" auth_token: "<auth token>" twilio_number: "+15555555555" interval: 10 ''' log.trace('twilio_txt_msg beacon starting') ret = [] if not all([config['account_sid'], config['auth_token'], config['twilio_number']]): return ret output = {} output['texts'] = [] client = TwilioRestClient(config['account_sid'], config['auth_token']) messages = client.messages.list(to=config['twilio_number']) log.trace('Num messages: {0}'.format(len(messages))) if len(messages) < 1: log.trace('Twilio beacon has no texts') return ret for message in messages: item = {} item['id'] = str(message.sid) item['body'] = str(message.body) item['from'] = str(message.from_) item['sent'] = str(message.date_sent) item['images'] = [] if int(message.num_media): media = client.media(message.sid).list() if len(media): for pic in media: item['images'].append(str(pic.uri)) output['texts'].append(item) message.delete() ret.append(output) return ret
Python
0.000001
c340c1b92a3d82a25ce2e43b19603ee58de0b146
Improve celery logging
home/core/async.py
home/core/async.py
""" async.py ~~~~~~~~ Handles running of tasks in an asynchronous fashion. Not explicitly tied to Celery. The `run` method simply must exist here and handle the execution of whatever task is passed to it, whether or not it is handled asynchronously. """ from apscheduler.schedulers.background import BackgroundScheduler from celery import Celery from celery.security import setup_security from celery.utils.log import get_task_logger setup_security(allowed_serializers=['pickle', 'json'], serializer='pickle') queue = Celery('home', broker='redis://', backend='redis://', serializer='pickle') queue.conf.update( CELERY_TASK_SERIALIZER='pickle', CELERY_ACCEPT_CONTENT=['pickle', 'json'], ) scheduler = BackgroundScheduler() scheduler.start() logger = get_task_logger(__name__) @queue.task def _run(method, **kwargs) -> None: """ Run the configured actions in multiple processes. """ logger.info('Running {} with config: {}'.format(method.__name__, kwargs)) method(**kwargs) def run(method, delay=0, **kwargs): return _run.apply_async(args=[method], kwargs=kwargs, countdown=float(delay))
""" async.py ~~~~~~~~ Handles running of tasks in an asynchronous fashion. Not explicitly tied to Celery. The `run` method simply must exist here and handle the execution of whatever task is passed to it, whether or not it is handled asynchronously. """ from apscheduler.schedulers.background import BackgroundScheduler from celery import Celery from celery.security import setup_security setup_security(allowed_serializers=['pickle', 'json'], serializer='pickle') queue = Celery('home', broker='redis://', backend='redis://', serializer='pickle') queue.conf.update( CELERY_TASK_SERIALIZER='pickle', CELERY_ACCEPT_CONTENT=['pickle', 'json'], ) scheduler = BackgroundScheduler() scheduler.start() @queue.task def _run(method, **kwargs) -> None: """ Run the configured actions in multiple processes. """ method(**kwargs) def run(method, delay=0, **kwargs): return _run.apply_async(args=[method], kwargs=kwargs, countdown=float(delay))
Python
0.000005
8414668c97c359a39cf96a37819cb7e37b54c670
Fix new Pylint
ixdjango/utils.py
ixdjango/utils.py
""" Utility classes/functions """ import os from random import choice import re from subprocess import PIPE, Popen ALPHANUMERIC = 'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789' def random_string(length=10, chars=ALPHANUMERIC): """ Generates a random string of length specified and using supplied chars. Useful for salting hashing functions """ return ''.join([choice(chars) for _ in range(length)]) def querydict_to_dict(querydict): """ Converts a QueryDict instance (i.e.request params) into a plain dictionary """ pure_dict = {} for item_key in querydict.keys(): item_val_list = querydict.getlist(item_key) if item_val_list: if len(item_val_list) == 0: pure_dict[item_key] = None if len(item_val_list) == 1: pure_dict[item_key] = item_val_list[0] else: pure_dict[item_key] = item_val_list else: pure_dict[item_key] = None return pure_dict def remote_addr_from_request(request): """ Returns the correct remote address from the request object. If the request was proxied, this correct information is in HTTP_X_FORWARDED_FOR """ if not request: raise TypeError("No request passed to function") if 'HTTP_X_FORWARDED_FOR' in request.META: return request.META['HTTP_X_FORWARDED_FOR'] else: return request.META['REMOTE_ADDR'] def flatten_request_header(header): """ Transform a dict representing header parameters into a flat string of comma separated parameters suitable for inserting into the actual headers """ flattened_header = '' if isinstance(header, dict): contents = [] for content_key, content_val in header.items(): contents.append('%s="%s"' % (content_key, content_val)) flattened_header = ','.join(contents) else: flattened_header = str(header) return flattened_header def flatten_auth_header(headers_dict, auth_type): """ Auth headers have auth type at the start of the string """ return "%s %s" % (auth_type, flatten_request_header(headers_dict)) def flat_header_val_to_dict(header_val): """ Transform a header string of comma separated parameters into a dict """ val_dict = {} val_comps = header_val.rsplit(',') if len(val_comps): for val_comp in val_comps: key, sep, val = val_comp.partition("=") if sep != "=": return {} key = key.strip() val = val.strip() val = val.strip('"') if key in val_dict: if isinstance(val_dict[key], list): val_dict[key].append(val) else: val_dict[key] = [val_dict[key], val] else: val_dict[key] = val return val_dict def flat_auth_header_val_to_data(header_val): """ Capture auth type from the string and then remove it before passing on to flat_header_val_to_dict """ match = re.match(r'^([\S]+[\s]+)?(.*)$', header_val) if match and match.group(1): return (flat_header_val_to_dict(match.group(2).strip()), match.group(1).strip()) return (flat_header_val_to_dict(header_val), None) def get_npm_module(module): """ Return the path of an npm module binary Example: get_npm_module('lessc') """ proc = Popen(['npm', 'bin'], stdout=PIPE) proc.wait() path = proc.stdout.read().strip() return os.path.join(path, module)
""" Utility classes/functions """ import os from random import choice import re from subprocess import PIPE, Popen def random_string( length=10, chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789' ): """ Generates a random string of length specified and using supplied chars. Useful for salting hashing functions """ return ''.join([choice(chars) for _ in range(length)]) def querydict_to_dict(querydict): """ Converts a QueryDict instance (i.e.request params) into a plain dictionary """ pure_dict = {} for item_key in querydict.keys(): item_val_list = querydict.getlist(item_key) if item_val_list: if len(item_val_list) == 0: pure_dict[item_key] = None if len(item_val_list) == 1: pure_dict[item_key] = item_val_list[0] else: pure_dict[item_key] = item_val_list else: pure_dict[item_key] = None return pure_dict def remote_addr_from_request(request): """ Returns the correct remote address from the request object. If the request was proxied, this correct information is in HTTP_X_FORWARDED_FOR """ if not request: raise TypeError("No request passed to function") if 'HTTP_X_FORWARDED_FOR' in request.META: return request.META['HTTP_X_FORWARDED_FOR'] else: return request.META['REMOTE_ADDR'] def flatten_request_header(header): """ Transform a dict representing header parameters into a flat string of comma separated parameters suitable for inserting into the actual headers """ flattened_header = '' if isinstance(header, dict): contents = [] for content_key, content_val in header.items(): contents.append('%s="%s"' % (content_key, content_val)) flattened_header = ','.join(contents) else: flattened_header = str(header) return flattened_header def flatten_auth_header(headers_dict, auth_type): """ Auth headers have auth type at the start of the string """ return "%s %s" % (auth_type, flatten_request_header(headers_dict)) def flat_header_val_to_dict(header_val): """ Transform a header string of comma separated parameters into a dict """ val_dict = {} val_comps = header_val.rsplit(',') if len(val_comps): for val_comp in val_comps: key, sep, val = val_comp.partition("=") if sep != "=": return {} key = key.strip() val = val.strip() val = val.strip('"') if key in val_dict: if isinstance(val_dict[key], list): val_dict[key].append(val) else: val_dict[key] = [val_dict[key], val] else: val_dict[key] = val return val_dict def flat_auth_header_val_to_data(header_val): """ Capture auth type from the string and then remove it before passing on to flat_header_val_to_dict """ match = re.match(r'^([\S]+[\s]+)?(.*)$', header_val) if match and match.group(1): return (flat_header_val_to_dict(match.group(2).strip()), match.group(1).strip()) return (flat_header_val_to_dict(header_val), None) def get_npm_module(module): """ Return the path of an npm module binary Example: get_npm_module('lessc') """ proc = Popen(['npm', 'bin'], stdout=PIPE) proc.wait() path = proc.stdout.read().strip() return os.path.join(path, module)
Python
0.000025
771860a6a9176dc6627f25f5faac960ab3edcc50
add expand user
src/speaker-recognition.py
src/speaker-recognition.py
#!/usr/bin/env python2 # -*- coding: UTF-8 -*- # File: speaker-recognition.py # Date: Sat Nov 29 14:06:43 2014 +0800 # Author: Yuxin Wu <ppwwyyxxc@gmail.com> import argparse import sys import glob import os import itertools import scipy.io.wavfile as wavfile sys.path.append(os.path.join( os.path.dirname(os.path.realpath(__file__)), 'gui')) from gui.interface import ModelInterface from filters.silence import remove_silence def get_args(): desc = "Speaker Recognition Command Line Tool" epilog = """ Wav files in each input directory will be labeled as the basename of the directory. Note that wildcard inputs should be *quoted*, and they will be sent to glob module. Examples: Train: ./speaker-recognition.py -t enroll -i "/tmp/person* ./mary" -m model.out Predict: ./speaker-recognition.py -t predict -i "./*.wav" -m model.out """ parser = argparse.ArgumentParser(description=desc,epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-t', '--task', help='Task to do. Either "enroll" or "predict"', required=True) parser.add_argument('-i', '--input', help='Input Files(to predict) or Directories(to enroll)', required=True) parser.add_argument('-m', '--model', help='Model file to save(in enroll) or use(in predict)', required=True) ret = parser.parse_args() return ret def task_enroll(input_dirs, output_model): m = ModelInterface() input_dirs = [os.path.expanduser(k) for k in input_dirs.strip().split()] dirs = itertools.chain(*(glob.glob(d) for d in input_dirs)) dirs = [d for d in dirs if os.path.isdir(d)] files = [] if len(dirs) == 0: print "No valid directory found!" sys.exit(1) for d in dirs: label = os.path.basename(d) wavs = glob.glob(d + '/*.wav') if len(wavs) == 0: print "No wav file found in {0}".format(d) continue print "Label {0} has files {1}".format(label, ','.join(wavs)) for wav in wavs: fs, signal = wavfile.read(wav) m.enroll(label, fs, signal) m.train() m.dump(output_model) def task_predict(input_files, input_model): m = ModelInterface.load(input_model) for f in glob.glob([os.path.expanduser(k) for k in input_files]): fs, signal = wavfile.read(f) label = m.predict(fs, signal) print f, '->', label if __name__ == '__main__': global args args = get_args() task = args.task if task == 'enroll': task_enroll(args.input, args.model) elif task == 'predict': task_predict(args.input, args.model)
#!/usr/bin/env python2 # -*- coding: UTF-8 -*- # File: speaker-recognition.py # Date: Wed Oct 29 22:42:26 2014 +0800 # Author: Yuxin Wu <ppwwyyxxc@gmail.com> import argparse import sys import glob import os import itertools import scipy.io.wavfile as wavfile sys.path.append(os.path.join( os.path.dirname(os.path.realpath(__file__)), 'gui')) from gui.interface import ModelInterface from filters.silence import remove_silence def get_args(): desc = "Speaker Recognition Command Line Tool" epilog = """ Wav files in each input directory will be labeled as the basename of the directory. Note that wildcard inputs should be *quoted*, and they will be sent to glob module. Examples: Train: ./speaker-recognition.py -t enroll -i "/tmp/person* ./mary" -m model.out Predict: ./speaker-recognition.py -t predict -i "./*.wav" -m model.out """ parser = argparse.ArgumentParser(description=desc,epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-t', '--task', help='Task to do. Either "enroll" or "predict"', required=True) parser.add_argument('-i', '--input', help='Input Files(to predict) or Directories(to enroll)', required=True) parser.add_argument('-m', '--model', help='Model file to save(in enroll) or use(in predict)', required=True) ret = parser.parse_args() return ret def task_enroll(input_dirs, output_model): m = ModelInterface() input_dirs = input_dirs.strip().split() dirs = itertools.chain(*(glob.glob(d) for d in input_dirs)) dirs = [d for d in dirs if os.path.isdir(d)] files = [] if len(dirs) == 0: print "No valid directory found!" sys.exit(1) for d in dirs: label = os.path.basename(d) wavs = glob.glob(d + '/*.wav') if len(wavs) == 0: print "No wav file found in {0}".format(d) continue print "Label {0} has files {1}".format(label, ','.join(wavs)) for wav in wavs: fs, signal = wavfile.read(wav) m.enroll(label, fs, signal) m.train() m.dump(output_model) def task_predict(input_files, input_model): m = ModelInterface.load(input_model) for f in glob.glob(input_files): fs, signal = wavfile.read(f) label = m.predict(fs, signal) print f, '->', label if __name__ == '__main__': global args args = get_args() task = args.task if task == 'enroll': task_enroll(args.input, args.model) elif task == 'predict': task_predict(args.input, args.model)
Python
0.000002
06a648614d51e2c9f456a33dc164c11021c724a8
Handle adding to WHERE where WHERE already exists.
gemini/gemini_region.py
gemini/gemini_region.py
#!/usr/bin/env python import sqlite3 import re import os import sys import GeminiQuery def _report_results(args, query, gq): # report the results of the region query gq.run(query) if args.use_header and gq.header: print gq.header for row in gq: print row def get_region(args, gq): region_regex = re.compile("(\S+):(\d+)-(\d+)") try: region = region_regex.findall(args.region)[0] except IndexError: sys.exit("Malformed region (--reg) string") if len(region) != 3: sys.exit("Malformed region (--reg) string") chrom = region[0] start = region[1] end = region[2] if args.columns is not None: query = "SELECT " + str(args.columns) + \ " FROM variants " else: query = "SELECT * FROM variants " query += "WHERE chrom = " + "'" + chrom + "'" + \ " AND ((start BETWEEN " + start + " AND " + end + ")" +\ " OR (end BETWEEN " + start + " AND " + end + "))" if args.filter: query += " AND " + args.filter query += " ORDER BY chrom, start" _report_results(args, query, gq) def get_gene(args, gq): """ Report all variants in a specific gene. """ if args.columns is not None: query = "SELECT " + str(args.columns) + \ " FROM variants " else: query = "SELECT * FROM variants " query += "WHERE gene = " + "'" + args.gene + "' " if args.filter: query += " AND " + args.filter query += " ORDER BY chrom, start" _report_results(args, query, gq) def add_region_to_query(args): region_regex = re.compile("(\S+):(\d+)-(\d+)") try: region = region_regex.findall(args.region)[0] except IndexError: sys.exit("Malformed region (--reg) string") if len(region) != 3: sys.exit("Malformed region (--reg) string") chrom = region[0] start = region[1] end = region[2] where_clause = " chrom = " + "'" + chrom + "'" + \ " AND ((start BETWEEN " + start + " AND " + end + ")" +\ " OR (end BETWEEN " + start + " AND " + end + "))" args.query = _add_to_where_clause(args.query, where_clause) def _add_to_where_clause(query, where_clause): where_index = query.lower().find("where") prefix = query[0:where_index] suffix = query[where_index + len("where"):] if where_index == -1: query += " WHERE " + where_clause else: query = "{0} WHERE ({1}) AND ({2})".format(prefix, suffix, where_clause) return query def region(parser, args): if os.path.exists(args.db): gq = GeminiQuery.GeminiQuery(args.db, out_format=args.format) if args.region is not None and args.gene is not None: sys.exit('EXITING: Choose either --reg or --gene, not both.\n') elif args.region is not None: get_region(args, gq) elif args.gene is not None: get_gene(args, gq)
#!/usr/bin/env python import sqlite3 import re import os import sys import GeminiQuery def _report_results(args, query, gq): # report the results of the region query gq.run(query) if args.use_header and gq.header: print gq.header for row in gq: print row def get_region(args, gq): region_regex = re.compile("(\S+):(\d+)-(\d+)") try: region = region_regex.findall(args.region)[0] except IndexError: sys.exit("Malformed region (--reg) string") if len(region) != 3: sys.exit("Malformed region (--reg) string") chrom = region[0] start = region[1] end = region[2] if args.columns is not None: query = "SELECT " + str(args.columns) + \ " FROM variants " else: query = "SELECT * FROM variants " query += "WHERE chrom = " + "'" + chrom + "'" + \ " AND ((start BETWEEN " + start + " AND " + end + ")" +\ " OR (end BETWEEN " + start + " AND " + end + "))" if args.filter: query += " AND " + args.filter query += " ORDER BY chrom, start" _report_results(args, query, gq) def get_gene(args, gq): """ Report all variants in a specific gene. """ if args.columns is not None: query = "SELECT " + str(args.columns) + \ " FROM variants " else: query = "SELECT * FROM variants " query += "WHERE gene = " + "'" + args.gene + "' " if args.filter: query += " AND " + args.filter query += " ORDER BY chrom, start" _report_results(args, query, gq) def add_region_to_query(args): region_regex = re.compile("(\S+):(\d+)-(\d+)") try: region = region_regex.findall(args.region)[0] except IndexError: sys.exit("Malformed region (--reg) string") if len(region) != 3: sys.exit("Malformed region (--reg) string") chrom = region[0] start = region[1] end = region[2] where_clause = " WHERE chrom = " + "'" + chrom + "'" + \ " AND ((start BETWEEN " + start + " AND " + end + ")" +\ " OR (end BETWEEN " + start + " AND " + end + "))" args.query += where_clause def region(parser, args): if os.path.exists(args.db): gq = GeminiQuery.GeminiQuery(args.db, out_format=args.format) if args.region is not None and args.gene is not None: sys.exit('EXITING: Choose either --reg or --gene, not both.\n') elif args.region is not None: get_region(args, gq) elif args.gene is not None: get_gene(args, gq)
Python
0
9b18db54d64e168231079255334649fb9b503f3e
Add murrine back into monodevelop-mac-dev packages list
profiles/monodevelop-mac-dev/packages.py
profiles/monodevelop-mac-dev/packages.py
import os from bockbuild.darwinprofile import DarwinProfile class MonoDevelopMacDevPackages: def __init__ (self): # Toolchain self.packages.extend ([ 'autoconf.py', 'automake.py', 'libtool.py', 'gettext.py', 'pkg-config.py' ]) # Base Libraries self.packages.extend ([ 'libpng.py', 'libjpeg.py', 'libtiff.py', 'libxml2.py', 'freetype.py', 'fontconfig.py', 'pixman.py', 'cairo.py', 'glib.py', 'pango.py', 'atk.py', 'intltool.py', 'gdk-pixbuf.py', 'gtk+.py', 'libglade.py', ]) # Theme self.packages.extend ([ 'librsvg.py', 'hicolor-icon-theme.py', 'gtk-engines.py', 'murrine.py', 'gtk-quartz-engine.py' ]) # Mono self.packages.extend ([ 'mono.py', 'gtk-sharp.py', 'mono-addins.py', ]) self.packages = [os.path.join ('..', '..', 'packages', p) for p in self.packages]
import os from bockbuild.darwinprofile import DarwinProfile class MonoDevelopMacDevPackages: def __init__ (self): # Toolchain self.packages.extend ([ 'autoconf.py', 'automake.py', 'libtool.py', 'gettext.py', 'pkg-config.py' ]) # Base Libraries self.packages.extend ([ 'libpng.py', 'libjpeg.py', 'libtiff.py', 'libxml2.py', 'freetype.py', 'fontconfig.py', 'pixman.py', 'cairo.py', 'glib.py', 'pango.py', 'atk.py', 'intltool.py', 'gdk-pixbuf.py', 'gtk+.py', 'libglade.py', ]) # Theme self.packages.extend ([ 'librsvg.py', 'hicolor-icon-theme.py', 'gtk-engines.py', 'gtk-quartz-engine.py' ]) # Mono self.packages.extend ([ 'mono.py', 'gtk-sharp.py', 'mono-addins.py', ]) self.packages = [os.path.join ('..', '..', 'packages', p) for p in self.packages]
Python
0
5ca4e1df8fc67f9b56d5ea55cb4e17e78c5c6ed5
Fix test factory
project/apps/smanager/tests/factories.py
project/apps/smanager/tests/factories.py
# Standard Library import datetime import rest_framework_jwt # Third-Party from factory import Faker # post_generation, from factory import Iterator from factory import LazyAttribute from factory import PostGenerationMethodCall from factory import RelatedFactory from factory import Sequence from factory import SubFactory from factory.django import DjangoModelFactory from factory.django import mute_signals from factory.fuzzy import FuzzyInteger # Django from django.db.models.signals import pre_delete from django.db.models.signals import pre_save from django.db.models.signals import m2m_changed # First-Party from apps.smanager.models import Repertory from apps.smanager.models import Assignment from apps.smanager.models import Contest from apps.smanager.models import Entry from apps.smanager.models import Session from rest_framework_jwt.models import User class AssignmentFactory(DjangoModelFactory): # status = Assignment.STATUS.active kind = Assignment.KIND.official # convention = SubFactory('factories.ConventionFactory') # person = SubFactory('factories.PersonFactory') class Meta: model = Assignment class ContestFactory(DjangoModelFactory): # status = Contest.STATUS.included session = SubFactory('apps.smanager.tests.factories.SessionFactory') # award = SubFactory('factories.AwardFactory') class Meta: model = Contest class EntryFactory(DjangoModelFactory): status = Entry.STATUS.new is_evaluation = True is_private = False session = SubFactory('apps.smanager.tests.factories.SessionFactory') # group = SubFactory('factories.GroupFactory') class Meta: model = Entry class RepertoryFactory(DjangoModelFactory): # status = Repertory.STATUS.active # group = SubFactory('factories.GroupFactory') entry = SubFactory('apps.smanager.tests.factories.EntryFactory') class Meta: model = Repertory class SessionFactory(DjangoModelFactory): status = Session.STATUS.new kind = Session.KIND.quartet name = "International Championship" district = Session.DISTRICT.bhs is_invitational = False num_rounds = 2 # convention = SubFactory('factories.ConventionFactory') class Meta: model = Session # @post_generation # def create_rounds(self, create, extracted, **kwargs): # if create: # for i in range(self.num_rounds): # num = i + 1 # kind = self.num_rounds - i # RoundFactory( # session=self, # num=num, # kind=kind, # ) @mute_signals(pre_delete, pre_save, m2m_changed) class UserFactory(DjangoModelFactory): username = Faker('uuid4') password = PostGenerationMethodCall('set_password', 'password') is_staff = False class Meta: model = User
# Standard Library import datetime import rest_framework_jwt # Third-Party from factory import Faker # post_generation, from factory import Iterator from factory import LazyAttribute from factory import PostGenerationMethodCall from factory import RelatedFactory from factory import Sequence from factory import SubFactory from factory.django import DjangoModelFactory from factory.django import mute_signals from factory.fuzzy import FuzzyInteger # Django from django.db.models.signals import pre_delete from django.db.models.signals import pre_save from django.db.models.signals import m2m_changed # First-Party from apps.smanager.models import Repertory from apps.smanager.models import Assignment from apps.smanager.models import Contest from apps.smanager.models import Entry from apps.smanager.models import Session from rest_framework_jwt.models import User class AssignmentFactory(DjangoModelFactory): # status = Assignment.STATUS.active kind = Assignment.KIND.official # convention = SubFactory('factories.ConventionFactory') # person = SubFactory('factories.PersonFactory') class Meta: model = Assignment class ContestFactory(DjangoModelFactory): # status = Contest.STATUS.included session = SubFactory('apps.smanager.tests.factories.SessionFactory') # award = SubFactory('factories.AwardFactory') class Meta: model = Contest class EntryFactory(DjangoModelFactory): status = Entry.STATUS.new is_evaluation = True is_private = False session = SubFactory('apps.smanager.tests.factories.SessionFactory') # group = SubFactory('factories.GroupFactory') class Meta: model = Entry class RepertoryFactory(DjangoModelFactory): # status = Repertory.STATUS.active # group = SubFactory('factories.GroupFactory') entry = SubFactory('apps.smanager.tests.factories.EntryFactory') class Meta: model = Repertory class SessionFactory(DjangoModelFactory): status = Session.STATUS.new kind = Session.KIND.quartet is_invitational = False num_rounds = 2 # convention = SubFactory('factories.ConventionFactory') class Meta: model = Session # @post_generation # def create_rounds(self, create, extracted, **kwargs): # if create: # for i in range(self.num_rounds): # num = i + 1 # kind = self.num_rounds - i # RoundFactory( # session=self, # num=num, # kind=kind, # ) @mute_signals(pre_delete, pre_save, m2m_changed) class UserFactory(DjangoModelFactory): username = Faker('uuid4') password = PostGenerationMethodCall('set_password', 'password') is_staff = False class Meta: model = User
Python
0.000001
1e4c1c7213763ba70780707e690e37a1c01e6b59
use cpp to preprocess the input files and handle multiple DGETs per line
generate_task_header.py
generate_task_header.py
#!/usr/bin/python import os import re from toposort import toposort_flatten import copy import subprocess dtask_re = re.compile(r'DTASK\(\s*(\w+)\s*,(.+)\)') dget_re = re.compile(r'DGET\(\s*(\w+)\s*\)') def find_tasks_in_file(filename): tasks = [] cpp = subprocess.Popen(['cpp', '-w', filename], stdout=subprocess.PIPE) lines = iter(cpp.stdout.readline, '') for line in lines: if line[0] != '#': match = dtask_re.search(line) if match: #print(match.group(0)) tasks.append({'name': match.group(1), 'type': match.group(2).strip(), 'deps': set()}) for match in dget_re.finditer(line): if match: #print(match.group(0)) tasks[-1]['deps'].add(match.group(1)) return tasks def find_tasks(dir): tasks = [] for root, dirs, files in os.walk(dir): for filename in files: ext = os.path.splitext(filename)[1][1:] if ext == 'c' or ext == 'cpp': new_tasks = find_tasks_in_file(os.path.join(root, filename)) tasks.extend(new_tasks) return tasks def order_tasks(tasks): types = {} deps = {} for task in tasks: types[task['name']] = task['type'] deps[task['name']] = task['deps'] deps_copy = copy.deepcopy(deps) return map(lambda name: (name, types[name], deps_copy[name]), toposort_flatten(deps)) def generate_header(dir, header): #touch the header file with open(header, 'w') as f: os.utime(header, None) f.write('#undef DTASK\n') f.write('#undef DGET\n') tasks = order_tasks(find_tasks(dir)) ids = {} id = 0 with open(header, 'w') as f: f.write('''#ifndef __ALL_TASKS__ #define __ALL_TASKS__ #include "dtask.h" ''') for (task, type, deps) in tasks: f.write('#define {} 0x{:x}\n'.format(task.upper(), 1 << id)) ids[task] = id id = id + 1 f.write('\n') f.write('#define ALL_TASKS { \\\n') for (task, type, deps) in tasks: f.write(' {{ __dtask_{}, "{}", {}, {:d} }}, \\\n' .format(task, task, ' | '.join(map(lambda x: x.upper(), deps)), ids[task])) f.write(' }\n\n') for (task, type, deps) in tasks: f.write('DECLARE_DTASK({}, {});\n'.format(task, type)) f.write(''' #endif ''') generate_header('.', 'all_tasks.h')
#!/usr/bin/python import os import re from toposort import toposort_flatten import copy dtask_re = re.compile('DTASK\(\s*(\w+)\s*,(.+)\)') dget_re = re.compile('DGET\(\s*(\w+)\s*\)') def find_tasks_in_file(filename): tasks = [] with open(filename) as f: for line in f: match = dtask_re.search(line) if match: #print(match.group(0)) tasks.append({'name': match.group(1), 'type': match.group(2).strip(), 'deps': set()}) match = dget_re.search(line) if match: #print(match.group(0)) tasks[-1]['deps'].add(match.group(1)) return tasks def find_tasks(dir): tasks = [] for root, dirs, files in os.walk(dir): for filename in files: ext = os.path.splitext(filename)[1][1:] if ext == 'c' or ext == 'cpp': new_tasks = find_tasks_in_file(os.path.join(root, filename)) tasks.extend(new_tasks) return tasks def order_tasks(tasks): types = {} deps = {} for task in tasks: types[task['name']] = task['type'] deps[task['name']] = task['deps'] deps_copy = copy.deepcopy(deps) return map(lambda name: (name, types[name], deps_copy[name]), toposort_flatten(deps)) def generate_header(dir, header): tasks = order_tasks(find_tasks(dir)) ids = {} id = 0 with open(header, 'w') as f: f.write('''#ifndef __ALL_TASKS__ #define __ALL_TASKS__ #include "dtask.h" ''') for (task, type, deps) in tasks: f.write('#define {} 0x{:x}\n'.format(task.upper(), 1 << id)) ids[task] = id id = id + 1 f.write('\n') f.write('#define ALL_TASKS { \\\n') for (task, type, deps) in tasks: f.write(' {{ __dtask_{}, "{}", {}, {:d} }}, \\\n' .format(task, task, ' | '.join(map(lambda x: x.upper(), deps)), ids[task])) f.write(' }\n\n') for (task, type, deps) in tasks: f.write('DECLARE_DTASK({}, {});\n'.format(task, type)) f.write(''' #endif ''') generate_header('.', 'all_tasks.h')
Python
0
f76daf38fb8998bbf5d0b663ff64572fb240fd24
bump Python API version am: 454844e69f am: 07e940c2de am: 19c98a2e99 am: 9bc9e3d185 am: 4289bd8427
src/trace_processor/python/setup.py
src/trace_processor/python/setup.py
from distutils.core import setup setup( name='perfetto', packages=['perfetto', 'perfetto.trace_processor'], package_data={'perfetto.trace_processor': ['*.descriptor']}, include_package_data=True, version='0.3.0', license='apache-2.0', description='Python API for Perfetto\'s Trace Processor', author='Perfetto', author_email='perfetto-pypi@google.com', url='https://perfetto.dev/', download_url='https://github.com/google/perfetto/archive/refs/tags/v20.1.tar.gz', keywords=['trace processor', 'tracing', 'perfetto'], install_requires=[ 'protobuf', ], classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: Apache Software License', "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", ], )
from distutils.core import setup setup( name='perfetto', packages=['perfetto', 'perfetto.trace_processor'], package_data={'perfetto.trace_processor': ['*.descriptor']}, include_package_data=True, version='0.2.9', license='apache-2.0', description='Python API for Perfetto\'s Trace Processor', author='Perfetto', author_email='perfetto-pypi@google.com', url='https://perfetto.dev/', download_url='https://github.com/google/perfetto/archive/v6.0.tar.gz', keywords=['trace processor', 'tracing', 'perfetto'], install_requires=[ 'protobuf', ], classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: Apache Software License', "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", ], )
Python
0
2025dae49acd6827d0e961e9be345ad9cb3f1086
Add pytables to save cosine similarity
pygraphc/similarity/LogTextSimilarity.py
pygraphc/similarity/LogTextSimilarity.py
from pygraphc.preprocess.PreprocessLog import PreprocessLog from pygraphc.similarity.StringSimilarity import StringSimilarity from itertools import combinations from tables import * class LogTextSimilarity(object): """A class for calculating cosine similarity between a log pair. This class is intended for non-graph based clustering method. """ class Cosine(IsDescription): source = Int32Col() dest = Int32Col() similarity = Float32Col() def __init__(self, logtype, logs): """The constructor of class LogTextSimilarity. Parameters ---------- logtype : str Type for event log, e.g., auth, syslog, etc. logs : list List of every line of original logs. """ self.logtype = logtype self.logs = logs def get_cosine_similarity(self): """Get cosine similarity from a pair of log lines in a file. Returns ------- cosine_similarity : dict Dictionary of cosine similarity in non-graph clustering. Key: (log_id1, log_id2), value: cosine similarity distance. """ preprocess = PreprocessLog(self.logtype) preprocess.preprocess_text(self.logs) events = preprocess.events_text # h5 configuration for cosine similarity h5cosine_file = open_file('cosine.h5', mode='w', title='Cosine similarity') h5group = h5file.create_group("/", 'cosine_group', 'Cosine similarity group') h5table = h5file.create_table(h5group, 'cosine_table', Cosine, "Cosine similarity table") h5cosine = h5table.row # calculate cosine similarity cosines_similarity = {} for log_pair in combinations(range(preprocess.loglength), 2): cosines_similarity[log_pair] = StringSimilarity.get_cosine_similarity(events[log_pair[0]]['tf-idf'], events[log_pair[1]]['tf-idf'], events[log_pair[0]]['length'], events[log_pair[1]]['length']) h5cosine['source'] = log_pair[0] h5cosine['dest'] = log_pair[1] h5cosine['similarity'] = cosines_similarity[log_pair] h5cosine.append() # write to file and then close h5table.flush() h5cosine_file.close() return cosines_similarity
from pygraphc.preprocess.PreprocessLog import PreprocessLog from pygraphc.similarity.StringSimilarity import StringSimilarity from itertools import combinations class LogTextSimilarity(object): """A class for calculating cosine similarity between a log pair. This class is intended for non-graph based clustering method. """ def __init__(self, logtype, logs): """The constructor of class LogTextSimilarity. Parameters ---------- logtype : str Type for event log, e.g., auth, syslog, etc. logs : list List of every line of original logs. """ self.logtype = logtype self.logs = logs def get_cosine_similarity(self): """Get cosine similarity from a pair of log lines in a file. Returns ------- cosine_similarity : dict Dictionary of cosine similarity in non-graph clustering. Key: (log_id1, log_id2), value: cosine similarity distance. """ preprocess = PreprocessLog(self.logtype) preprocess.preprocess_text(self.logs) events = preprocess.events_text # calculate cosine similarity cosines_similarity = {} for log_pair in combinations(range(preprocess.loglength), 2): cosines_similarity[log_pair] = StringSimilarity.get_cosine_similarity(events[log_pair[0]]['tf-idf'], events[log_pair[1]]['tf-idf'], events[log_pair[0]]['length'], events[log_pair[1]]['length']) return cosines_similarity
Python
0
f9fd73d383f4c62fa7300fecdd9f8e25688ff1e0
Fix spacing.
pymatgen/command_line/aconvasp_caller.py
pymatgen/command_line/aconvasp_caller.py
#!/usr/bin/env python ''' Interface with command line aconvasp http://aflowlib.org/ Only tested on Linux inspired by Shyue's qhull_caller WARNING: you need to have a convasp in your path for this to work ''' __author__="Geoffroy Hautier" __copyright__ = "Copyright 2011, The Materials Project" __version__ = "1.0" __maintainer__ = "Geoffroy Hautier" __email__ = "geoffroy.hautier@uclouvain.be" __status__ = "Production" __date__ ="$Nov 22, 2011M$" import subprocess import numpy as np from pymatgen.io.vaspio import Poscar def run_aconvasp_command(command, structure): """ Helper function for calling aconvasp with different arguments """ poscar = Poscar(structure) p = subprocess.Popen(command,stdout=subprocess.PIPE,stdin=subprocess.PIPE) output = p.communicate(input=poscar.get_string()) return output def get_num_division_kpoints(structure, kppa): """ get kpoint divisions for a given k-point density (per reciprocal-atom): kppa and a given structure """ output = run_aconvasp_command(['aconvasp', '--kpoints', str(kppa)], structure) tmp = output[0].rsplit("\n")[6].rsplit(" ") return [int(tmp[5]),int(tmp[6]),int(tmp[7])] def get_minkowski_red(structure): """ get a minkowski reduced structure """ output = run_aconvasp_command(['aconvasp', '--kpath'], structure) started = False poscar_string = "" for line in output[0].split("\n"): if started or line.find("KPOINTS TO RUN") != -1: poscar_string=poscar_string+line+"\n" if line.find("STRUCTURE TO RUN") != -1: started = True if line.find("KPOINTS TO RUN") != -1: started = False return Poscar.from_string(poscar_string).struct def get_vasp_kpoint_file_sym(structure): """ get a kpoint file ready to be ran in VASP along symmetries of a structure """ output = run_aconvasp_command(['aconvasp', '--kpath'], structure) started = False kpoints_string = "" for line in output[0].split("\n"): #print line if started or line.find("END")!=-1 : kpoints_string = kpoints_string + line + "\n" if line.find("KPOINTS TO RUN")!=-1: started=True if line.find("END") != -1: started = False return kpoints_string def get_point_group_rec(structure): """ gets the point group for the reciprocal lattice of the given structure """ run_aconvasp_command(['aconvasp', '--pgroupk'], structure) listUc=[] f = open("aflow.pgroupk.out", 'r') linetmp=[] axis=[] type_transf=None count=-1000 started = False for line in f: #print line if line.find("type") != -1: type_transf=line.split()[1] if line.find("Schoenflies") != -1: count=-1 linetmp=[] started=True continue count += 1 if not started: continue if count <= 2: linetmp.append([float(x) for x in line.rstrip("\nUc ").split()]) if line.find("axis") != -1: axis=np.array([float(line.split()[0]),float(line.split()[1]),float(line.split()[2])]) if count == 11: listUc.append({'matrix':np.array(linetmp),'type':type_transf,'axis':axis}) f.close() return listUc
#!/usr/bin/env python ''' Interface with command line aconvasp http://aflowlib.org/ Only tested on Linux inspired by Shyue's qhull_caller WARNING: you need to have a convasp in your path for this to work ''' __author__="Geoffroy Hautier" __copyright__ = "Copyright 2011, The Materials Project" __version__ = "1.0" __maintainer__ = "Geoffroy Hautier" __email__ = "geoffroy.hautier@uclouvain.be" __status__ = "Production" __date__ ="$Nov 22, 2011M$" import subprocess import numpy as np def run_aconvasp_command(command, structure): """ Helper function for calling aconvasp with different arguments """ from pymatgen.io.vaspio import Poscar poscar=Poscar(structure) p = subprocess.Popen(command,stdout=subprocess.PIPE,stdin=subprocess.PIPE) output = p.communicate(input=poscar.get_string()) return output def get_num_division_kpoints(structure, kppa): """ get kpoint divisions for a given k-point density (per reciprocal-atom): kppa and a given structure """ output=run_aconvasp_command(['aconvasp', '--kpoints', str(kppa)], structure) tmp=output[0].rsplit("\n")[6].rsplit(" ") return [int(tmp[5]),int(tmp[6]),int(tmp[7])] def get_minkowski_red(structure): """ get a minkowski reduced structure """ from pymatgen.io.vaspio import Poscar output=run_aconvasp_command(['aconvasp', '--kpath'], structure) started=False poscar_string="" for line in output[0].split("\n"): if(started==True or line.find("KPOINTS TO RUN")!=-1): poscar_string=poscar_string+line+"\n" if(line.find("STRUCTURE TO RUN")!=-1): started=True if(line.find("KPOINTS TO RUN")!=-1): started=False return Poscar.from_string(poscar_string).struct def get_vasp_kpoint_file_sym(structure): """ get a kpoint file ready to be ran in VASP along symmetries of a structure """ output=run_aconvasp_command(['aconvasp', '--kpath'], structure) started=False kpoints_string="" for line in output[0].split("\n"): #print line if(started==True or line.find("END")!=-1): kpoints_string=kpoints_string+line+"\n" if(line.find("KPOINTS TO RUN")!=-1): started=True if(line.find("END")!=-1): started=False return kpoints_string def get_point_group_rec(structure): """ gets the point group for the reciprocal lattice of the given structure """ run_aconvasp_command(['aconvasp', '--pgroupk'], structure) listUc=[] f = open("aflow.pgroupk.out", 'r') linetmp=[] axis=[] type_transf=None count=-1000 started = False for line in f: #print line if(line.find("type")!=-1): type_transf=line.split()[1] if(line.find("Schoenflies")!=-1): count=-1 linetmp=[] started=True continue count=count+1 if(started==False): continue if(count<=2): linetmp.append([float(x) for x in line.rstrip("\nUc ").split()]) if(line.find("axis")!=-1): axis=np.array([float(line.split()[0]),float(line.split()[1]),float(line.split()[2])]) if(count==11): listUc.append({'matrix':np.array(linetmp),'type':type_transf,'axis':axis}) f.close() return listUc
Python
0.999999
e384aee40e41d36c2ff32b76a1d4d162e0c9cecc
Stopping for the night. corrected JsLibraryAnalyser.js
pythonium/BowerLoad/JsLibraryAnalyser.py
pythonium/BowerLoad/JsLibraryAnalyser.py
__author__ = 'Jason' import glob import os import fnmatch class Mapper: #map out files{} -> classes{} -> functions{{inputs:[],returns:[]} #moduleMap = files{classes{functions{{inputs:[],returns:[]}} moduleMap = {"files":{}} RootJsfileList = [] RootDir = os.curdir() #or js library folder path def __init__(self): pass def find_all_js_files(self,RootDir=RootDir): for root, dirnames, filenames in os.walk(RootDir): for filename in fnmatch.filter(filenames, '*.js'): self.moduleMap["files"] += {str(filename):""} pass class Skelmaker: #Create Skeleton Python Modules For Easy Ide Intergration def __init__(self): pass
__author__ = 'Jason' import glob import os class Mapper: #map out files -> classes -> functions+returns moduleMap = {} RootJsfileList = [] def __init__(self): pass def _find_entry_Points(self): os.chdir("/mydir") for file in glob.glob("*.js"): print(file) pass class Skelmaker: #Create Skeleton Python Modules For Easy Ide Intergration def __init__(self): pass
Python
0.999996
a5840150f7089b1e00296f12254ef161f8ce93b6
fix foo("bar").baz()
pythonscript/pythonscript_transformer.py
pythonscript/pythonscript_transformer.py
from ast import Str from ast import Expr from ast import Call from ast import Name from ast import Assign from ast import Attribute from ast import FunctionDef from ast import NodeTransformer class PythonScriptTransformer(NodeTransformer): def visit_ClassDef(self, node): name = Name(node.name, None) yield Assign([name], Call(Name('JSObject', None), None, None, None, None)) yield Assign([Name('parents', None)], Call(Name('JSArray', Name), None, None, None, None)) if node.bases: yield Expr( Call( Attribute( Name('parents', None), 'push', None ), node.bases, None, None, None ) ) for item in node.body: yield self.generic_visit(item) if isinstance(item, FunctionDef): item_name = item.name item.name = closure_name = '%s__%s' % (node.name, item_name) yield Assign([Attribute(name, item_name, None)], Name(closure_name, None)) elif isinstance(item, Assign): item_name = item.targets[0].id item.targets[0].id = closure_name = '%s__%s' % (name.id, item_name) yield Assign([Attribute(name, item_name, None)], Name(closure_name, None)) yield Assign([name], Call(Name('create_class', None), [Str(node.name), Name('parents', None), Name(name.id, None)], None, None, None)) def visit_Attribute(self, node): return Call(Name('get_attribute', None), [self.visit(node.value), Str(node.attr)], None, None, None) def visit_Expr(self, node): return self.visit(node.value) def visit_Assign(self, node): attr = node.targets[0] if isinstance(attr, Attribute): return Expr(Call(Name('set_attribute', None), [attr.value, Str(attr.attr), node.value], None, None, None)) else: return self.generic_visit(node) def visit_Call(self, node): if hasattr(node.func, 'id') and node.func.id in ('JS', 'toString'): return self.generic_visit(node) return Call( Call( Name('get_attribute', None), [self.visit(node.func), Str('__call__')], None, None, None, ), map(self.visit, node.args), None, None, None, )
from ast import Str from ast import Expr from ast import Call from ast import Name from ast import Assign from ast import Attribute from ast import FunctionDef from ast import NodeTransformer class PythonScriptTransformer(NodeTransformer): def visit_ClassDef(self, node): name = Name(node.name, None) yield Assign([name], Call(Name('JSObject', None), None, None, None, None)) yield Assign([Name('parents', None)], Call(Name('JSArray', Name), None, None, None, None)) if node.bases: yield Expr( Call( Attribute( Name('parents', None), 'push', None ), node.bases, None, None, None ) ) for item in node.body: yield self.generic_visit(item) if isinstance(item, FunctionDef): item_name = item.name item.name = closure_name = '%s__%s' % (node.name, item_name) yield Assign([Attribute(name, item_name, None)], Name(closure_name, None)) elif isinstance(item, Assign): item_name = item.targets[0].id item.targets[0].id = closure_name = '%s__%s' % (name.id, item_name) yield Assign([Attribute(name, item_name, None)], Name(closure_name, None)) yield Assign([name], Call(Name('create_class', None), [Str(node.name), Name('parents', None), Name(name.id, None)], None, None, None)) def visit_Attribute(self, node): return Call(Name('get_attribute', None), [self.generic_visit(node.value), Str(node.attr)], None, None, None) def visit_Assign(self, node): attr = node.targets[0] if isinstance(attr, Attribute): return Expr(Call(Name('set_attribute', None), [attr.value, Str(attr.attr), node.value], None, None, None)) else: return self.generic_visit(node) def visit_Call(self, node): if hasattr(node.func, 'id') and node.func.id in ('JS', 'toString'): return self.generic_visit(node) return Call( Call( Name('get_attribute', None), [self.visit(node.func), Str('__call__')], None, None, None, ), map(self.visit, node.args), None, None, None, )
Python
0.999868
a7a7ea3b224252c22422a2f8b11e452f74ea3a77
Reformat test, remove debug
pyxform/tests_v1/test_upload_question.py
pyxform/tests_v1/test_upload_question.py
""" Test upload (image, audio, file) question types in XLSForm """ from pyxform.tests_v1.pyxform_test_case import PyxformTestCase class UploadTest(PyxformTestCase): def test_image_question(self): self.assertPyxformXform( name="data", md=""" | survey | | | | | | type | name | label | | | image | photo | Take a photo: | """, errored=False, xml__contains=[ '<bind nodeset="/data/photo" type="binary"/>', '<upload mediatype="image/*" ref="/data/photo">', "<label>Take a photo:</label>", "</upload>", ], ) def test_audio_question(self): self.assertPyxformXform( name="data", md=""" | survey | | | | | | type | name | label | | | audio | recording1 | Record a sound: | """, errored=False, xml__contains=[ '<bind nodeset="/data/recording1" type="binary"/>', '<upload mediatype="audio/*" ref="/data/recording1">', "<label>Record a sound:</label>", "</upload>", ], ) def test_file_question(self): self.assertPyxformXform( name="data", md=""" | survey | | | | | | type | name | label | | | file | file1 | Upload a file: | """, errored=False, xml__contains=[ '<bind nodeset="/data/file1" type="binary"/>', '<upload mediatype="application/*" ref="/data/file1">', "<label>Upload a file:</label>", "</upload>", ], ) def test_file_question_restrict_filetype(self): self.assertPyxformXform( name="data", md=""" | survey | | | | | | | type | name | label | body::accept | | | file | upload_a_pdf | Upload a PDF: | application/pdf | """, errored=False, xml__contains=['<upload accept="application/pdf"'], ) def test_image_question_custom_col_calc(self): self.assertPyxformXform( name="data", md=""" | survey | | | | | | | type | name | label | body:esri:style | | | text | watermark_phrase | Watermark Text: | | | | text | text1 | Text | | | | image | image1 | Take a Photo: | watermark=${watermark_phrase} | """, # noqa errored=False, xml__contains=["watermark= /data/watermark_phrase "], )
""" Test upload (image, audio, file) question types in XLSForm """ from pyxform.tests_v1.pyxform_test_case import PyxformTestCase class UploadTest(PyxformTestCase): def test_image_question(self): self.assertPyxformXform( name="data", md=""" | survey | | | | | | type | name | label | | | image | photo | Take a photo: | """, errored=False, xml__contains=[ "<bind nodeset=\"/data/photo\" type=\"binary\"/>", "<upload mediatype=\"image/*\" ref=\"/data/photo\">", "<label>Take a photo:</label>", "</upload>"] ) def test_audio_question(self): self.assertPyxformXform( name="data", md=""" | survey | | | | | | type | name | label | | | audio | recording1 | Record a sound: | """, errored=False, xml__contains=[ "<bind nodeset=\"/data/recording1\" type=\"binary\"/>", "<upload mediatype=\"audio/*\" ref=\"/data/recording1\">", "<label>Record a sound:</label>", "</upload>"] ) def test_file_question(self): self.assertPyxformXform( name="data", md=""" | survey | | | | | | type | name | label | | | file | file1 | Upload a file: | """, errored=False, xml__contains=[ "<bind nodeset=\"/data/file1\" type=\"binary\"/>", "<upload mediatype=\"application/*\" ref=\"/data/file1\">", "<label>Upload a file:</label>", "</upload>"] ) def test_file_question_restrict_filetype(self): self.assertPyxformXform( name="data", md=""" | survey | | | | | | | type | name | label | body::accept | | | file | upload_a_pdf | Upload a PDF: | application/pdf | """, errored=False, xml__contains=[ "<upload accept=\"application/pdf\"", ] ) def test_image_question_custom_col_calc(self): self.assertPyxformXform( debug=True, name="data", md=""" | survey | | | | | | | type | name | label | body:esri:style | | | text | watermark_phrase | Watermark Text: | | | | text | text1 | Text | | | | image | image1 | Take a Photo: | watermark=${watermark_phrase} | """, # noqa errored=False, xml__contains=[ "watermark= /data/watermark_phrase " ] )
Python
0
3b2730edbbef3f32aef6682d9d446d8416fc7562
add setWindowMinimizeButtonHint() for dialog
quite/controller/dialog_ui_controller.py
quite/controller/dialog_ui_controller.py
from . import WidgetUiController from ..gui import Shortcut from PySide.QtCore import Qt class DialogUiController(WidgetUiController): def __init__(self, parent=None, ui_file=None): super().__init__(parent, ui_file) Shortcut('ctrl+w', self.w).excited.connect(self.w.close) def exec(self): return self.w.exec() def setWindowMinimizeButtonHint(self): self.w.setWindowFlags(Qt.WindowMinimizeButtonHint | Qt.WindowMaximizeButtonHint) @classmethod def class_exec(cls, *args, **kwargs): return cls(*args, **kwargs).exec()
from . import WidgetUiController from ..gui import Shortcut class DialogUiController(WidgetUiController): def __init__(self, parent=None, ui_file=None): super().__init__(parent, ui_file) Shortcut('ctrl+w', self.w).excited.connect(self.w.close) def exec(self): return self.w.exec() @classmethod def class_exec(cls, *args, **kwargs): return cls(*args, **kwargs).exec()
Python
0
a08919c24e1af460ccba8820eb6646492848621e
Bump Version 0.5.4
libmc/__init__.py
libmc/__init__.py
from ._client import ( PyClient, ThreadUnsafe, encode_value, MC_DEFAULT_EXPTIME, MC_POLL_TIMEOUT, MC_CONNECT_TIMEOUT, MC_RETRY_TIMEOUT, MC_HASH_MD5, MC_HASH_FNV1_32, MC_HASH_FNV1A_32, MC_HASH_CRC_32, MC_RETURN_SEND_ERR, MC_RETURN_RECV_ERR, MC_RETURN_CONN_POLL_ERR, MC_RETURN_POLL_TIMEOUT_ERR, MC_RETURN_POLL_ERR, MC_RETURN_MC_SERVER_ERR, MC_RETURN_PROGRAMMING_ERR, MC_RETURN_INVALID_KEY_ERR, MC_RETURN_INCOMPLETE_BUFFER_ERR, MC_RETURN_OK, ) __VERSION__ = "0.5.4" __version__ = "v0.5.4" __author__ = "mckelvin" __email__ = "mckelvin@users.noreply.github.com" __date__ = "Thu Jul 16 18:20:00 2015 +0800" class Client(PyClient): pass __all__ = [ 'Client', 'ThreadUnsafe', '__VERSION__', 'encode_value', 'MC_DEFAULT_EXPTIME', 'MC_POLL_TIMEOUT', 'MC_CONNECT_TIMEOUT', 'MC_RETRY_TIMEOUT', 'MC_HASH_MD5', 'MC_HASH_FNV1_32', 'MC_HASH_FNV1A_32', 'MC_HASH_CRC_32', 'MC_RETURN_SEND_ERR', 'MC_RETURN_RECV_ERR', 'MC_RETURN_CONN_POLL_ERR', 'MC_RETURN_POLL_TIMEOUT_ERR', 'MC_RETURN_POLL_ERR', 'MC_RETURN_MC_SERVER_ERR', 'MC_RETURN_PROGRAMMING_ERR', 'MC_RETURN_INVALID_KEY_ERR', 'MC_RETURN_INCOMPLETE_BUFFER_ERR', 'MC_RETURN_OK', ]
from ._client import ( PyClient, ThreadUnsafe, encode_value, MC_DEFAULT_EXPTIME, MC_POLL_TIMEOUT, MC_CONNECT_TIMEOUT, MC_RETRY_TIMEOUT, MC_HASH_MD5, MC_HASH_FNV1_32, MC_HASH_FNV1A_32, MC_HASH_CRC_32, MC_RETURN_SEND_ERR, MC_RETURN_RECV_ERR, MC_RETURN_CONN_POLL_ERR, MC_RETURN_POLL_TIMEOUT_ERR, MC_RETURN_POLL_ERR, MC_RETURN_MC_SERVER_ERR, MC_RETURN_PROGRAMMING_ERR, MC_RETURN_INVALID_KEY_ERR, MC_RETURN_INCOMPLETE_BUFFER_ERR, MC_RETURN_OK, ) __VERSION__ = '0.5.3' __version__ = "v0.5.3-3-g3eb1a97" __author__ = "mckelvin" __email__ = "mckelvin@users.noreply.github.com" __date__ = "Sat Jul 11 14:24:54 2015 +0800" class Client(PyClient): pass __all__ = [ 'Client', 'ThreadUnsafe', '__VERSION__', 'encode_value', 'MC_DEFAULT_EXPTIME', 'MC_POLL_TIMEOUT', 'MC_CONNECT_TIMEOUT', 'MC_RETRY_TIMEOUT', 'MC_HASH_MD5', 'MC_HASH_FNV1_32', 'MC_HASH_FNV1A_32', 'MC_HASH_CRC_32', 'MC_RETURN_SEND_ERR', 'MC_RETURN_RECV_ERR', 'MC_RETURN_CONN_POLL_ERR', 'MC_RETURN_POLL_TIMEOUT_ERR', 'MC_RETURN_POLL_ERR', 'MC_RETURN_MC_SERVER_ERR', 'MC_RETURN_PROGRAMMING_ERR', 'MC_RETURN_INVALID_KEY_ERR', 'MC_RETURN_INCOMPLETE_BUFFER_ERR', 'MC_RETURN_OK', ]
Python
0
778f284c2208438b7bc26226cc295f80de6343e0
Use loop.add_signal_handler for handling SIGWINCH.
libpymux/utils.py
libpymux/utils.py
import array import asyncio import fcntl import signal import termios def get_size(stdout): # Thanks to fabric (fabfile.org), and # http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/ """ Get the size of this pseudo terminal. :returns: A (rows, cols) tuple. """ #assert stdout.isatty() # Buffer for the C call buf = array.array('h', [0, 0, 0, 0 ]) # Do TIOCGWINSZ (Get) #fcntl.ioctl(stdout.fileno(), termios.TIOCGWINSZ, buf, True) fcntl.ioctl(0, termios.TIOCGWINSZ, buf, True) # Return rows, cols return buf[0], buf[1] def set_size(stdout_fileno, rows, cols): """ Set terminal size. (This is also mainly for internal use. Setting the terminal size automatically happens when the window resizes. However, sometimes the process that created a pseudo terminal, and the process that's attached to the output window are not the same, e.g. in case of a telnet connection, or unix domain socket, and then we have to sync the sizes by hand.) """ # Buffer for the C call buf = array.array('h', [rows, cols, 0, 0 ]) # Do: TIOCSWINSZ (Set) fcntl.ioctl(stdout_fileno, termios.TIOCSWINSZ, buf) def alternate_screen(write): class Context: def __enter__(self): # Enter alternate screen buffer write(b'\033[?1049h') def __exit__(self, *a): # Exit alternate screen buffer and make cursor visible again. write(b'\033[?1049l') write(b'\033[?25h') return Context() def call_on_sigwinch(callback, loop=None): """ Set a function to be called when the SIGWINCH signal is received. (Normally, on terminal resize.) """ if loop is None: loop = asyncio.get_event_loop() def sigwinch_handler(): loop.call_soon(callback) loop.add_signal_handler(signal.SIGWINCH, sigwinch_handler)
import array import asyncio import fcntl import signal import termios def get_size(stdout): # Thanks to fabric (fabfile.org), and # http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/ """ Get the size of this pseudo terminal. :returns: A (rows, cols) tuple. """ #assert stdout.isatty() # Buffer for the C call buf = array.array('h', [0, 0, 0, 0 ]) # Do TIOCGWINSZ (Get) #fcntl.ioctl(stdout.fileno(), termios.TIOCGWINSZ, buf, True) fcntl.ioctl(0, termios.TIOCGWINSZ, buf, True) # Return rows, cols return buf[0], buf[1] def set_size(stdout_fileno, rows, cols): """ Set terminal size. (This is also mainly for internal use. Setting the terminal size automatically happens when the window resizes. However, sometimes the process that created a pseudo terminal, and the process that's attached to the output window are not the same, e.g. in case of a telnet connection, or unix domain socket, and then we have to sync the sizes by hand.) """ # Buffer for the C call buf = array.array('h', [rows, cols, 0, 0 ]) # Do: TIOCSWINSZ (Set) fcntl.ioctl(stdout_fileno, termios.TIOCSWINSZ, buf) def alternate_screen(write): class Context: def __enter__(self): # Enter alternate screen buffer write(b'\033[?1049h') def __exit__(self, *a): # Exit alternate screen buffer and make cursor visible again. write(b'\033[?1049l') write(b'\033[?25h') return Context() def call_on_sigwinch(callback): """ Set a function to be called when the SIGWINCH signal is received. (Normally, on terminal resize.) """ def sigwinch_handler(n, frame): loop = asyncio.get_event_loop() loop.call_soon(callback) signal.signal(signal.SIGWINCH, sigwinch_handler)
Python
0
fa57fa679b575ce871af3c4769828f400e6ab28b
bump version 2.1.3 for issue #70
premailer/__init__.py
premailer/__init__.py
from premailer import Premailer, transform __version__ = '2.1.3'
from premailer import Premailer, transform __version__ = '2.1.2'
Python
0
3cdbcc16450faa958e27f60d5f2adc7a943562d8
Fix MacOS build
platforms/osx/build_framework.py
platforms/osx/build_framework.py
#!/usr/bin/env python """ The script builds OpenCV.framework for OSX. """ from __future__ import print_function import os, os.path, sys, argparse, traceback, multiprocessing # import common code sys.path.insert(0, os.path.abspath(os.path.abspath(os.path.dirname(__file__))+'/../ios')) from build_framework import Builder MACOSX_DEPLOYMENT_TARGET='10.12' # default, can be changed via command line options or environment variable class OSXBuilder(Builder): def getToolchain(self, arch, target): return None def getBuildCommand(self, archs, target): buildcmd = [ "xcodebuild", "MACOSX_DEPLOYMENT_TARGET=" + os.environ['MACOSX_DEPLOYMENT_TARGET'], "ARCHS=%s" % archs[0], "-sdk", target.lower(), "-configuration", "Debug" if self.debug else "Release", "-parallelizeTargets", "-jobs", str(multiprocessing.cpu_count()) ] return buildcmd def getInfoPlist(self, builddirs): return os.path.join(builddirs[0], "osx", "Info.plist") if __name__ == "__main__": folder = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../..")) parser = argparse.ArgumentParser(description='The script builds OpenCV.framework for OSX.') parser.add_argument('out', metavar='OUTDIR', help='folder to put built framework') parser.add_argument('--opencv', metavar='DIR', default=folder, help='folder with opencv repository (default is "../.." relative to script location)') parser.add_argument('--contrib', metavar='DIR', default=None, help='folder with opencv_contrib repository (default is "None" - build only main framework)') parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework') parser.add_argument('--disable', metavar='FEATURE', default=[], action='append', help='OpenCV features to disable (add WITH_*=OFF)') parser.add_argument('--enable_nonfree', default=False, dest='enablenonfree', action='store_true', help='enable non-free modules (disabled by default)') parser.add_argument('--macosx_deployment_target', default=os.environ.get('MACOSX_DEPLOYMENT_TARGET', MACOSX_DEPLOYMENT_TARGET), help='specify MACOSX_DEPLOYMENT_TARGET') parser.add_argument('--debug', action='store_true', help='Build "Debug" binaries (CMAKE_BUILD_TYPE=Debug)') parser.add_argument('--debug_info', action='store_true', help='Build with debug information (useful for Release mode: BUILD_WITH_DEBUG_INFO=ON)') args = parser.parse_args() os.environ['MACOSX_DEPLOYMENT_TARGET'] = args.macosx_deployment_target print('Using MACOSX_DEPLOYMENT_TARGET=' + os.environ['MACOSX_DEPLOYMENT_TARGET']) b = OSXBuilder(args.opencv, args.contrib, False, False, args.without, args.disable, args.enablenonfree, [ (["x86_64"], "MacOSX") ], args.debug, args.debug_info) b.build(args.out)
#!/usr/bin/env python """ The script builds OpenCV.framework for OSX. """ from __future__ import print_function import os, os.path, sys, argparse, traceback, multiprocessing # import common code sys.path.insert(0, os.path.abspath(os.path.abspath(os.path.dirname(__file__))+'/../ios')) from build_framework import Builder MACOSX_DEPLOYMENT_TARGET='10.12' # default, can be changed via command line options or environment variable class OSXBuilder(Builder): def getToolchain(self, arch, target): return None def getBuildCommand(self, archs, target): buildcmd = [ "xcodebuild", "MACOSX_DEPLOYMENT_TARGET=" + os.environ['MACOSX_DEPLOYMENT_TARGET'], "ARCHS=%s" % archs[0], "-sdk", target.lower(), "-configuration", "Debug" if self.debug else "Release", "-parallelizeTargets", "-jobs", str(multiprocessing.cpu_count()) ] return buildcmd def getInfoPlist(self, builddirs): return os.path.join(builddirs[0], "osx", "Info.plist") if __name__ == "__main__": folder = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../..")) parser = argparse.ArgumentParser(description='The script builds OpenCV.framework for OSX.') parser.add_argument('out', metavar='OUTDIR', help='folder to put built framework') parser.add_argument('--opencv', metavar='DIR', default=folder, help='folder with opencv repository (default is "../.." relative to script location)') parser.add_argument('--contrib', metavar='DIR', default=None, help='folder with opencv_contrib repository (default is "None" - build only main framework)') parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework') parser.add_argument('--enable_nonfree', default=False, dest='enablenonfree', action='store_true', help='enable non-free modules (disabled by default)') parser.add_argument('--macosx_deployment_target', default=os.environ.get('MACOSX_DEPLOYMENT_TARGET', MACOSX_DEPLOYMENT_TARGET), help='specify MACOSX_DEPLOYMENT_TARGET') parser.add_argument('--debug', action='store_true', help='Build "Debug" binaries (CMAKE_BUILD_TYPE=Debug)') parser.add_argument('--debug_info', action='store_true', help='Build with debug information (useful for Release mode: BUILD_WITH_DEBUG_INFO=ON)') args = parser.parse_args() os.environ['MACOSX_DEPLOYMENT_TARGET'] = args.macosx_deployment_target print('Using MACOSX_DEPLOYMENT_TARGET=' + os.environ['MACOSX_DEPLOYMENT_TARGET']) b = OSXBuilder(args.opencv, args.contrib, False, False, args.without, args.enablenonfree, [ (["x86_64"], "MacOSX") ], args.debug, args.debug_info) b.build(args.out)
Python
0.000047
0782ba56218e825dea5b76cbf030a522932bcfd6
Remove unnecessary (and debatable) comment.
networkx/classes/ordered.py
networkx/classes/ordered.py
""" OrderedDict variants of the default base classes. """ try: # Python 2.7+ from collections import OrderedDict except ImportError: # Oython 2.6 try: from ordereddict import OrderedDict except ImportError: OrderedDict = None from .graph import Graph from .multigraph import MultiGraph from .digraph import DiGraph from .multidigraph import MultiDiGraph __all__ = [] if OrderedDict is not None: __all__.extend([ 'OrderedGraph', 'OrderedDiGraph', 'OrderedMultiGraph', 'OrderedMultiDiGraph' ]) class OrderedGraph(Graph): node_dict_factory = OrderedDict adjlist_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict class OrderedDiGraph(DiGraph): node_dict_factory = OrderedDict adjlist_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict class OrderedMultiGraph(MultiGraph): node_dict_factory = OrderedDict adjlist_dict_factory = OrderedDict edge_key_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict class OrderedMultiDiGraph(MultiDiGraph): node_dict_factory = OrderedDict adjlist_dict_factory = OrderedDict edge_key_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict
""" OrderedDict variants of the default base classes. These classes are especially useful for doctests and unit tests. """ try: # Python 2.7+ from collections import OrderedDict except ImportError: # Oython 2.6 try: from ordereddict import OrderedDict except ImportError: OrderedDict = None from .graph import Graph from .multigraph import MultiGraph from .digraph import DiGraph from .multidigraph import MultiDiGraph __all__ = [] if OrderedDict is not None: __all__.extend([ 'OrderedGraph', 'OrderedDiGraph', 'OrderedMultiGraph', 'OrderedMultiDiGraph' ]) class OrderedGraph(Graph): node_dict_factory = OrderedDict adjlist_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict class OrderedDiGraph(DiGraph): node_dict_factory = OrderedDict adjlist_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict class OrderedMultiGraph(MultiGraph): node_dict_factory = OrderedDict adjlist_dict_factory = OrderedDict edge_key_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict class OrderedMultiDiGraph(MultiDiGraph): node_dict_factory = OrderedDict adjlist_dict_factory = OrderedDict edge_key_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict
Python
0
05e162a7fcc9870e37a6deab176cf3c6491e8481
add docstrings for methods of PrimaryDecider and refactor them a bit
plenum/server/primary_decider.py
plenum/server/primary_decider.py
from typing import Iterable from collections import deque from plenum.common.message_processor import MessageProcessor from plenum.server.has_action_queue import HasActionQueue from plenum.server.router import Router, Route from stp_core.common.log import getlogger from typing import List logger = getlogger() class PrimaryDecider(HasActionQueue, MessageProcessor): def __init__(self, node): HasActionQueue.__init__(self) self.node = node self.name = node.name self.f = node.f self.replicas = node.replicas self.viewNo = node.viewNo self.rank = node.rank self.nodeNames = node.allNodeNames self.nodeCount = 0 self.inBox = deque() self.outBox = deque() self.inBoxRouter = Router(*self.routes) # Need to keep track of who was primary for the master protocol # instance for previous view, this variable only matters between # elections, the elector will set it before doing triggering new # election and will reset it after primary is decided for master # instance self.previous_master_primary = None def __repr__(self): return "{}".format(self.name) @property def was_master_primary_in_prev_view(self): return self.previous_master_primary == self.name @property def routes(self) -> Iterable[Route]: raise NotImplementedError @property def supported_msg_types(self) -> Iterable[type]: return [k for k, v in self.routes] def decidePrimaries(self) -> None: """ Start election of the primary replica for each protocol instance """ raise NotImplementedError async def serviceQueues(self, limit): # TODO: this should be abstract raise NotImplementedError def viewChanged(self, viewNo: int): """ Notifies primary decider about the fact that view changed to let it prepare for election, which then will be started from outside by calling decidePrimaries() """ if viewNo <= self.viewNo: logger.warning("Provided view no {} is not greater than the " "current view no {}".format(viewNo, self.viewNo)) return False self.viewNo = viewNo self.previous_master_primary = self.node.master_primary_name for replica in self.replicas: replica.primaryName = None return True def get_msgs_for_lagged_nodes(self) -> List[object]: """ Returns election messages from the last view change """ raise NotImplementedError def send(self, msg): """ Send a message to the node on which this replica resides. :param msg: the message to send """ logger.debug("{}'s elector sending {}".format(self.name, msg)) self.outBox.append(msg) def start_election_for_instance(self, instance_id): """ Called when starting election for a particular protocol instance """ raise NotImplementedError
from typing import Iterable from collections import deque from plenum.common.message_processor import MessageProcessor from plenum.server.has_action_queue import HasActionQueue from plenum.server.router import Router, Route from stp_core.common.log import getlogger from typing import List logger = getlogger() class PrimaryDecider(HasActionQueue, MessageProcessor): def __init__(self, node): HasActionQueue.__init__(self) # TODO: How does primary decider ensure that a node does not have a # primary while its catching up self.node = node self.name = node.name self.f = node.f self.replicas = node.replicas self.viewNo = node.viewNo self.rank = node.rank self.nodeNames = node.allNodeNames self.nodeCount = 0 self.inBox = deque() self.outBox = deque() self.inBoxRouter = Router(*self.routes) # Need to keep track of who was primary for the master protocol # instance for previous view, this variable only matters between # elections, the elector will set it before doing triggering new # election and will reset it after primary is decided for master # instance self.previous_master_primary = None def __repr__(self): return "{}".format(self.name) @property def was_master_primary_in_prev_view(self): return self.previous_master_primary == self.name @property def routes(self) -> Iterable[Route]: raise NotImplementedError @property def supported_msg_types(self) -> Iterable[type]: return [k for k, v in self.routes] def decidePrimaries(self): """ Choose the primary replica for each protocol instance in the system using a PrimaryDecider. """ raise NotImplementedError async def serviceQueues(self, limit): # TODO: this should be abstract return 0 def viewChanged(self, viewNo: int): if viewNo > self.viewNo: self.viewNo = viewNo self.previous_master_primary = self.node.master_primary_name for replica in self.replicas: replica.primaryName = None return True else: logger.warning("Provided view no {} is not greater than the " "current view no {}".format(viewNo, self.viewNo)) return False def get_msgs_for_lagged_nodes(self) -> List[int]: raise NotImplementedError def send(self, msg): """ Send a message to the node on which this replica resides. :param msg: the message to send """ logger.debug("{}'s elector sending {}".format(self.name, msg)) self.outBox.append(msg) def start_election_for_instance(self, instance_id): """ Called when starting election for a particular protocol instance """ raise NotImplementedError
Python
0
8f78c04f6e2f21deb02a285fc78c5da907f0287b
Delete extra print()
nn/file/cnn_dailymail_rc.py
nn/file/cnn_dailymail_rc.py
import functools import tensorflow as tf from .. import flags from ..flags import FLAGS class _RcFileReader: def __init__(self): # 0 -> null, 1 -> unknown self._word_indices = flags.word_indices def read(self, filename_queue): key, value = tf.WholeFileReader().read(filename_queue) return (key, *self._read_record(value)) def _read_record(self, string): def read_record(string): context, question, answer = string.split("\n\n")[1:4] context = self._map_words_to_indices(context) question = self._map_words_to_indices(question) return (context, question, self._map_word_to_index(answer), len(context), len(question)) context, question, answer, context_length, question_length = tf.py_func( read_record, [string], [tf.int32, tf.int32, tf.int32, tf.int32, tf.int32], name="read_rc_data") context_length.set_shape([]) question_length.set_shape([]) return (tf.reshape(context, [context_length]), tf.reshape(question, [question_length]), tf.reshape(answer, [])) def _map_word_to_index(): return word_indices[word] if word in self._word_indices else 1 # unknown def _map_document_to_indices(self, document): return np.array([self._map_word_to_index(word) for word in document.split()], dtype=np.int32) def read_files(filename_queue): tensors = _RcFileReader().read(filename_queue) return tf.contrib.training.bucket_by_sequence_length( tf.shape(tensors[1])[0], list(tensors), FLAGS.batch_size, [int(num) for num in FLAGS.length_boundaries.split(",")], num_threads=FLAGS.num_threads_per_queue, capacity=FLAGS.queue_capacity, dynamic_pad=True, allow_smaller_final_batch=True)[1]
import functools import tensorflow as tf from .. import flags from ..flags import FLAGS class _RcFileReader: def __init__(self): # 0 -> null, 1 -> unknown self._word_indices = flags.word_indices def read(self, filename_queue): key, value = tf.WholeFileReader().read(filename_queue) return (key, *self._read_record(value)) def _read_record(self, string): def read_record(string): context, question, answer = string.split("\n\n")[1:4] context = self._map_words_to_indices(context) question = self._map_words_to_indices(question) return (context, question, self._map_word_to_index(answer), len(context), len(question)) context, question, answer, context_length, question_length = tf.py_func( read_record, [string], [tf.int32, tf.int32, tf.int32, tf.int32, tf.int32], name="read_rc_data") context_length.set_shape([]) question_length.set_shape([]) print(tf.reshape(context, [context_length]).get_shape()) return (tf.reshape(context, [context_length]), tf.reshape(question, [question_length]), tf.reshape(answer, [])) def _map_word_to_index(): return word_indices[word] if word in self._word_indices else 1 # unknown def _map_document_to_indices(self, document): return np.array([self._map_word_to_index(word) for word in document.split()], dtype=np.int32) def read_files(filename_queue): tensors = _RcFileReader().read(filename_queue) return tf.contrib.training.bucket_by_sequence_length( tf.shape(tensors[1])[0], list(tensors), FLAGS.batch_size, [int(num) for num in FLAGS.length_boundaries.split(",")], num_threads=FLAGS.num_threads_per_queue, capacity=FLAGS.queue_capacity, dynamic_pad=True, allow_smaller_final_batch=True)[1]
Python
0.000003
55eac8bed7e08c245642c1292ebc644fcbd8e12a
Add jobs serializers' tests
polyaxon/api/jobs/serializers.py
polyaxon/api/jobs/serializers.py
from rest_framework import fields, serializers from rest_framework.exceptions import ValidationError from db.models.jobs import Job, JobStatus from libs.spec_validation import validate_job_spec_config class JobStatusSerializer(serializers.ModelSerializer): uuid = fields.UUIDField(format='hex', read_only=True) job = fields.SerializerMethodField() class Meta: model = JobStatus exclude = ('id',) def get_job(self, obj): return obj.job.uuid.hex class JobSerializer(serializers.ModelSerializer): uuid = fields.UUIDField(format='hex', read_only=True) user = fields.SerializerMethodField() project = fields.SerializerMethodField() project_name = fields.SerializerMethodField() started_at = fields.DateTimeField(read_only=True) finished_at = fields.DateTimeField(read_only=True) class Meta: model = Job fields = ( 'uuid', 'unique_name', 'user', 'sequence', 'description', 'created_at', 'updated_at', 'last_status', 'started_at', 'finished_at', 'is_running', 'is_done', 'is_clone', 'project', 'project_name',) def get_user(self, obj): return obj.user.username def get_project(self, obj): return obj.project.uuid.hex def get_project_name(self, obj): return obj.project.unique_name class JobDetailSerializer(JobSerializer): original = fields.SerializerMethodField() resources = fields.SerializerMethodField() class Meta(JobSerializer.Meta): fields = JobSerializer.Meta.fields + ( 'original', 'original_job', 'description', 'config', 'resources',) extra_kwargs = {'original_job': {'write_only': True}} def get_original(self, obj): return obj.original_job.unique_name if obj.original_job else None def get_resources(self, obj): return obj.resources.to_dict() if obj.resources else None class JobCreateSerializer(serializers.ModelSerializer): user = fields.SerializerMethodField() class Meta: model = Job fields = ('user', 'description', 'config',) def get_user(self, obj): return obj.user.username def validate_config(self, config): """We only validate the config if passed. Also we use the JobSpecification to check if this config was intended as job. """ spec = validate_job_spec_config(config) if spec.is_job: # Resume normal creation return config # Raise an error to tell the user to use job creation instead raise ValidationError('Current job creation could not be performed.\n' 'The reason is that the specification sent correspond ' 'to a `{}`.\n'.format(spec.kind))
from rest_framework import fields, serializers from rest_framework.exceptions import ValidationError from db.models.jobs import Job, JobStatus from libs.spec_validation import validate_job_spec_config class JobStatusSerializer(serializers.ModelSerializer): uuid = fields.UUIDField(format='hex', read_only=True) job = fields.SerializerMethodField() class Meta: model = JobStatus exclude = ('id',) def get_job(self, obj): return obj.job.uuid.hex class JobSerializer(serializers.ModelSerializer): uuid = fields.UUIDField(format='hex', read_only=True) user = fields.SerializerMethodField() project = fields.SerializerMethodField() project_name = fields.SerializerMethodField() started_at = fields.DateTimeField(read_only=True) finished_at = fields.DateTimeField(read_only=True) class Meta: model = Job fields = ( 'uuid', 'unique_name', 'user', 'sequence', 'description', 'created_at', 'updated_at', 'last_status', 'started_at', 'finished_at', 'is_running', 'is_done', 'is_clone', 'project', 'project_name',) def get_user(self, obj): return obj.user.username def get_project(self, obj): return obj.project.uuid.hex def get_project_name(self, obj): return obj.project.unique_name class JobDetailSerializer(JobSerializer): original = fields.SerializerMethodField() resources = fields.SerializerMethodField() class Meta(JobSerializer.Meta): fields = JobSerializer.Meta.fields + ( 'original', 'original_job', 'description', 'config', 'resources',) extra_kwargs = {'original_job': {'write_only': True}} def get_original(self, obj): return obj.original_job.unique_name if obj.original_job else None def get_resources(self, obj): return obj.resources.to_dict() if obj.resources else None class JobCreateSerializer(serializers.ModelSerializer): user = fields.SerializerMethodField() class Meta: model = Job fields = ('user', 'description', 'config',) def get_user(self, obj): return obj.user.username def validate_config(self, config): """We only validate the config if passed. Also we use the JobSpecification to check if this config was intended as job. """ # config is optional if not config: return config spec = validate_job_spec_config(config) if spec.is_job: # Resume normal creation return config # Raise an error to tell the user to use job creation instead raise ValidationError('Current job creation could not be performed.\n' 'The reason is that the specification sent correspond ' 'to a `{}`.\n'.format(spec.kind)) def validate(self, attrs): if self.initial_data.get('check_specification') and not attrs.get('config'): raise ValidationError('Experiment expects a `config`.') return attrs def create(self, validated_data): """Check the params or set the value from the specification.""" if not validated_data.get('declarations') and validated_data.get('config'): config = validate_job_spec_config(validated_data['config']) validated_data['declarations'] = config.declarations return super(JobCreateSerializer, self).create(validated_data=validated_data)
Python
0
16bf079d1b139db08988fdb3cc1ff818cecfc12e
Add ModelTranslationAdminMixin.
linguist/admin.py
linguist/admin.py
# -*- coding: utf-8 -*- from django.contrib import admin from .models import Translation class ModelTranslationAdminMixin(object): """ Mixin for model admin classes. """ pass class TranslationAdmin(admin.ModelAdmin): """ Translation model admin options. """ pass admin.site.register(Translation, TranslationAdmin)
# -*- coding: utf-8 -*- from django.contrib import admin from .models import Translation class TranslationAdmin(admin.ModelAdmin): pass admin.site.register(Translation, TranslationAdmin)
Python
0
cfeb26b8c591b6d61f3184de74b2a37a2c2c21cc
Fix `lintreview register`
lintreview/cli.py
lintreview/cli.py
import argparse import lintreview.github as github from flask import url_for from lintreview.web import app def main(): parser = create_parser() args = parser.parse_args() args.func(args) def register_hook(args): credentials = None if args.login_user and args.login_pass: credentials = { 'GITHUB_USER': args.login_user, 'GITHUB_PASSWORD': args.login_pass } with app.app_context(): if credentials: credentials['GITHUB_URL'] = app.config['GITHUB_URL'] gh = github.get_client( credentials, args.user, args.repo) else: gh = github.get_client( app.config, args.user, args.repo) endpoint = url_for('start_review', _external=True) github.register_hook(gh, endpoint, args.user, args.repo) def remove_hook(args): print 'unregister' print args def create_parser(): desc = """ Command line utilities for lintreview. """ parser = argparse.ArgumentParser(description=desc) commands = parser.add_subparsers( title="Subcommands", description="Valid subcommands") desc = """ Register webhooks for a given user & repo The installed webhook will be used to trigger lint reviews as pull requests are opened/updated. """ register = commands.add_parser('register', help=desc) register.add_argument( '-u', '--user', dest='login_user', help="The user that has admin rights to the repo " "you are adding hooks to. Useful when the user " "in settings is not the administrator of " "your repositories.") register.add_argument( '-p', '--password', dest='login_pass', help="The password of the admin user.") register.add_argument('user', help="The user or organization the repo is under.") register.add_argument('repo', help="The repository to install a hook into.") register.set_defaults(func=register_hook) desc = """ Unregister webhooks for a given user & repo. """ remove = commands.add_parser('unregister', help=desc) remove.add_argument( '-u', '--user', dest='login_user', help="The user that has admin rights to the repo you " "are removing hooks from. Useful when the " "user in settings is not the administrator of " "your repositories.") remove.add_argument( '-p', '--password', dest='login_pass', help="The password of the admin user.") remove.add_argument('user', help="The user or organization the repo is under.") remove.add_argument('repo', help="The repository to remove a hook from.") remove.set_defaults(func=remove_hook) return parser if __name__ == '__main__': main()
import argparse import lintreview.github as github from lintreview.web import app def main(): parser = create_parser() args = parser.parse_args() args.func(args) def register_hook(args): credentials = None if args.login_user and args.login_pass: credentials = { 'GITHUB_USER': args.login_user, 'GITHUB_PASSWORD': args.login_pass } github.register_hook(app, args.user, args.repo, credentials) def remove_hook(args): print 'unregister' print args def create_parser(): desc = """ Command line utilities for lintreview. """ parser = argparse.ArgumentParser(description=desc) commands = parser.add_subparsers( title="Subcommands", description="Valid subcommands") desc = """ Register webhooks for a given user & repo The installed webhook will be used to trigger lint reviews as pull requests are opened/updated. """ register = commands.add_parser('register', help=desc) register.add_argument( '-u', '--user', dest='login_user', help="The user that has admin rights to the repo " "you are adding hooks to. Useful when the user " "in settings is not the administrator of " "your repositories.") register.add_argument( '-p', '--password', dest='login_pass', help="The password of the admin user.") register.add_argument('user', help="The user or organization the repo is under.") register.add_argument('repo', help="The repository to install a hook into.") register.set_defaults(func=register_hook) desc = """ Unregister webhooks for a given user & repo. """ remove = commands.add_parser('unregister', help=desc) remove.add_argument( '-u', '--user', dest='login_user', help="The user that has admin rights to the repo you " "are removing hooks from. Useful when the " "user in settings is not the administrator of " "your repositories.") remove.add_argument( '-p', '--password', dest='login_pass', help="The password of the admin user.") remove.add_argument('user', help="The user or organization the repo is under.") remove.add_argument('repo', help="The repository to remove a hook from.") remove.set_defaults(func=remove_hook) return parser if __name__ == '__main__': main()
Python
0
01d65552b406ef21a5ab4f53fd20cdd9ed6c55f8
support github ping events
lintreview/web.py
lintreview/web.py
import logging import pkg_resources from flask import Flask, request, Response from lintreview.config import load_config from lintreview.github import get_client from lintreview.github import get_lintrc from lintreview.tasks import process_pull_request from lintreview.tasks import cleanup_pull_request config = load_config() app = Flask("lintreview") app.config.update(config) log = logging.getLogger(__name__) version = pkg_resources.get_distribution('lintreview').version @app.route("/ping") def ping(): return "lint-review: %s pong\n" % (version,) @app.route("/review/start", methods=["POST"]) def start_review(): event = request.headers.get('X-Github-Event') if event == 'ping': return Response(status=200) try: action = request.json["action"] pull_request = request.json["pull_request"] number = pull_request["number"] base_repo_url = pull_request["base"]["repo"]["git_url"] head_repo_url = pull_request["head"]["repo"]["git_url"] user = pull_request["base"]["repo"]["owner"]["login"] repo = pull_request["base"]["repo"]["name"] except Exception as e: log.error("Got an invalid JSON body. '%s'", e) return Response(status=403, response="You must provide a valid JSON body\n") log.info("Received GitHub pull request notification for " "%s %s, (%s) from: %s", base_repo_url, number, action, head_repo_url) if action not in ("opened", "synchronize", "reopened", "closed"): log.info("Ignored '%s' action." % action) return Response(status=204) if action == "closed": return close_review(user, repo, pull_request) gh = get_client(app.config, user, repo) try: lintrc = get_lintrc(gh) log.debug("lintrc file contents '%s'", lintrc) except Exception as e: log.warn("Cannot download .lintrc file for '%s', " "skipping lint checks.", base_repo_url) log.warn(e) return Response(status=204) try: log.info("Scheduling pull request for %s/%s %s", user, repo, number) process_pull_request.delay(user, repo, number, lintrc) except: log.error('Could not publish job to celery. Make sure its running.') return Response(status=500) return Response(status=204) def close_review(user, repo, pull_request): try: log.info("Scheduling cleanup for %s/%s", user, repo) cleanup_pull_request.delay(user, repo, pull_request['number']) except: log.error('Could not publish job to celery. ' 'Make sure its running.') return Response(status=204)
import logging import pkg_resources from flask import Flask, request, Response from lintreview.config import load_config from lintreview.github import get_client from lintreview.github import get_lintrc from lintreview.tasks import process_pull_request from lintreview.tasks import cleanup_pull_request config = load_config() app = Flask("lintreview") app.config.update(config) log = logging.getLogger(__name__) version = pkg_resources.get_distribution('lintreview').version @app.route("/ping") def ping(): return "lint-review: %s pong\n" % (version,) @app.route("/review/start", methods=["POST"]) def start_review(): try: action = request.json["action"] pull_request = request.json["pull_request"] number = pull_request["number"] base_repo_url = pull_request["base"]["repo"]["git_url"] head_repo_url = pull_request["head"]["repo"]["git_url"] user = pull_request["base"]["repo"]["owner"]["login"] repo = pull_request["base"]["repo"]["name"] except Exception as e: log.error("Got an invalid JSON body. '%s'", e) return Response(status=403, response="You must provide a valid JSON body\n") log.info("Received GitHub pull request notification for " "%s %s, (%s) from: %s", base_repo_url, number, action, head_repo_url) if action not in ("opened", "synchronize", "reopened", "closed"): log.info("Ignored '%s' action." % action) return Response(status=204) if action == "closed": return close_review(user, repo, pull_request) gh = get_client(app.config, user, repo) try: lintrc = get_lintrc(gh) log.debug("lintrc file contents '%s'", lintrc) except Exception as e: log.warn("Cannot download .lintrc file for '%s', " "skipping lint checks.", base_repo_url) log.warn(e) return Response(status=204) try: log.info("Scheduling pull request for %s/%s %s", user, repo, number) process_pull_request.delay(user, repo, number, lintrc) except: log.error('Could not publish job to celery. Make sure its running.') return Response(status=500) return Response(status=204) def close_review(user, repo, pull_request): try: log.info("Scheduling cleanup for %s/%s", user, repo) cleanup_pull_request.delay(user, repo, pull_request['number']) except: log.error('Could not publish job to celery. ' 'Make sure its running.') return Response(status=204)
Python
0
7594763e5e6167c15fa7898b13283e875c13c099
Update BotPMError.py
resources/Dependencies/DecoraterBotCore/BotPMError.py
resources/Dependencies/DecoraterBotCore/BotPMError.py
# coding=utf-8 """ DecoraterBotCore ~~~~~~~~~~~~~~~~~~~ Core to DecoraterBot :copyright: (c) 2015-2017 Decorater :license: MIT, see LICENSE for more details. """ import discord __all__ = ['BotPMError'] class BotPMError: """ Class for PMing bot errors. """ def __init__(self, bot): self.bot = bot def construct_reply(self, message): """Constructs an bot reply.""" svr_name = message.channel.server.name cnl_name = message.channel.name msginfo = 'Missing the Send Message Permssions in the ' \ '{0} server on the {1} channel.' unabletosendmessageerror = msginfo.format(svr_name, cnl_name) return unabletosendmessageerror async def resolve_send_message_error(self, ctx): """ Relolves Errors when Sending messages. :param ctx: Merssage Context. :return: Nothing. """ await self.resolve_send_message_error_old( ctx.message) async def resolve_send_message_error_old(self, message): """ Relolves Errors when Sending messages. :param message: Merssage. :return: Nothing. """ unabletosendmessageerror = self.construct_reply( message) try: await self.bot.send_message( message.author, content=unabletosendmessageerror) except discord.errors.Forbidden: return
# coding=utf-8 """ DecoraterBotCore ~~~~~~~~~~~~~~~~~~~ Core to DecoraterBot :copyright: (c) 2015-2017 Decorater :license: MIT, see LICENSE for more details. """ import discord __all__ = ['BotPMError'] class BotPMError: """ Class for PMing bot errors. """ def __init__(self, bot): self.bot = bot def construct_reply(self, message): """Constructs an bot reply.""" svr_name = message.channel.server.name cnl_name = message.channel.name msginfo = 'Missing the Send Message Permssions in the ' \ '{0} server on the {1} channel.' unabletosendmessageerror = msginfo.format(svr_name, cnl_name) return unabletosendmessageerror async def resolve_send_message_error(self, ctx): """ Relolves Errors when Sending messages. :param ctx: Merssage Context. :return: Nothing. """ await self.resolve_send_message_error_old( ctx.message) async def resolve_send_message_error_old(self, message): """ Relolves Errors when Sending messages. :param message: Merssage. :return: Nothing. """ unabletosendmessageerror = self.construct_reply( message) try: await bot.send_message( message.author, content=unabletosendmessageerror) except discord.errors.Forbidden: return
Python
0.000001
cb7f6efbbbe640a2c360f7dc93cb2bc87b2e0ab2
fix example
entity_extract/examples/pos_extraction.py
entity_extract/examples/pos_extraction.py
#from entity_extract.extractor.extractors import PosExtractor from entity_extract.extractor.utilities import SentSplit, Tokenizer from entity_extract.extractor.extractors import PosExtractor from entity_extract.extractor.pos_tagger import PosTagger # Initialize Services sentSplitter = SentSplit() tokenizer = Tokenizer() tagger = PosTagger() #p = PosExtractor() sents = sentSplitter.split('This is a sentence about the pie in the sky. If would be interesting. If only there was') for sent in sents: tokens = tokenizer.tokenize(sent) tags = tagger.tag(tokens) print tags
#from entity_extract.extractor.extractors import PosExtractor from entity_extract.extractor.utilities import SentSplit, Tokenizer from entity_extract.extractor.extractors import PosExtractor from entity_extract.extractor.pos_tagger import PosTagger #p = PosExtractor() sents = p.SentPlit('This is a sentence about the pie in the sky. If would be interesting. If only there was') for sent in sents: tokens = Tokenizer.tokenize(sent) tags = PosTagger(tokens) print tags
Python
0.0001
8dc5b661149fe075d703042cb32af7bbc0bd5d4a
Switch encoding.py to python3 type hints.
encoding.py
encoding.py
"""Script for encoding a payload into an image.""" import argparse import pathlib from PIL import Image, ImageMath import utilities def argument_parser() -> argparse.ArgumentParser: """Returns a configured argparser.ArgumentParser for this program.""" parser = argparse.ArgumentParser( description='Encode SECRETS into a picture', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( 'host_image', type=pathlib.Path, help='The image that will hide the information.') parser.add_argument( 'payload_image', type=pathlib.Path, help='The image that will be hidden within the host image.') parser.add_argument( '--significant_digits', type=int, default=1, help='The number of least significant digits available to encode over.') parser.add_argument( '--display', action='store_true', default=False, help='Display the encoded image generated by this program.') parser.add_argument( '--save', action='store_true', help='Save the encoded image generated by this program.') parser.add_argument( '--output_dir', type=pathlib.Path, default='.', help=( 'A specific location to which the processed image will be saved. ' 'If not specified, the current working directory will be used.')) return parser def encode(host: Image, payload: Image, n_significant_digits: int) -> Image: """Encode a payload into an image (using the last n_significant_digits).""" output_rgb_channels = [] for host_channel, payload_channel in zip(host.split(), payload.split()): # Mask out all but the least significant byte, encoding payload there mask = utilities.bit_mask(n_significant_digits) expression = ( "convert(" "(host & (0xff - {mask})) | (payload & {mask}), 'L')".format( mask=mask)) output_rgb_channels.append( ImageMath.eval( expression, host=host_channel, payload=payload_channel)) return Image.merge('RGB', output_rgb_channels) def main(): args = argument_parser().parse_args() host = Image.open(args.host_image) payload = Image.open(args.payload_image) encoded = encode(host, payload, args.significant_digits) # Display the encoded image if args.display: encoded.show() # Save the encoded image, if the user wants us to if args.save: user_response = ( utilities.query_user( 'GONNA SAVE ENCODED IMAGE to "{0:s}"; GAR, IS THAT K???'.format( str(args.output_dir.absolute())))) if user_response: p = args.host_image # Short reference to the host_image path filename = '{0:s}{1:s}{2:s}'.format(p.stem, '.encoded', p.suffix) encoded.save( args.output_dir.joinpath(filename), format='png', quality=100) if __name__ == '__main__': main()
"""Script for encoding a payload into an image.""" import argparse import pathlib from PIL import Image, ImageMath import utilities def argument_parser(): # type: () -> argparse.ArgumentParser """Returns a configured argparser.ArgumentParser for this program.""" parser = argparse.ArgumentParser( description='Encode SECRETS into a picture', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( 'host_image', type=pathlib.Path, help='The image that will hide the information.') parser.add_argument( 'payload_image', type=pathlib.Path, help='The image that will be hidden within the host image.') parser.add_argument( '--significant_digits', type=int, default=1, help='The number of least significant digits available to encode over.') parser.add_argument( '--display', action='store_true', default=False, help='Display the encoded image generated by this program.') parser.add_argument( '--save', action='store_true', help='Save the encoded image generated by this program.') parser.add_argument( '--output_dir', type=pathlib.Path, default='.', help=( 'A specific location to which the processed image will be saved. ' 'If not specified, the current working directory will be used.')) return parser def encode(host, payload, n_significant_digits): # type: (PIL.Image, PIL.Image, int) -> PIL.Image """Encode a payload into an image (using the last n_significant_digits).""" output_rgb_channels = [] for host_channel, payload_channel in zip(host.split(), payload.split()): # Mask out all but the least significant byte, encoding payload there mask = utilities.bit_mask(n_significant_digits) expression = ( "convert(" "(host & (0xff - {mask})) | (payload & {mask}), 'L')".format( mask=mask)) output_rgb_channels.append( ImageMath.eval( expression, host=host_channel, payload=payload_channel)) return Image.merge('RGB', output_rgb_channels) def main(): args = argument_parser().parse_args() host = Image.open(args.host_image) payload = Image.open(args.payload_image) encoded = encode(host, payload, args.significant_digits) # Display the encoded image if args.display: encoded.show() # Save the encoded image, if the user wants us to if args.save: user_response = ( utilities.query_user( 'GONNA SAVE ENCODED IMAGE to "{0:s}"; GAR, IS THAT K???'.format( str(args.output_dir.absolute())))) if user_response: p = args.host_image # Short reference to the host_image path filename = '{0:s}{1:s}{2:s}'.format(p.stem, '.encoded', p.suffix) encoded.save( args.output_dir.joinpath(filename), format='png', quality=100) if __name__ == '__main__': main()
Python
0
015fcfaaed0a3ff54801f5821df4f5527255ab06
Update SSL.py
gevent_openssl/SSL.py
gevent_openssl/SSL.py
"""gevent_openssl.SSL - gevent compatibility with OpenSSL.SSL. """ import sys import socket import OpenSSL.SSL class SSLConnection(object): """OpenSSL Connection Wapper""" def __init__(self, context, sock): self._context = context self._sock = sock self._connection = OpenSSL.SSL.Connection(context, sock) self._makefile_refs = 0 def __getattr__(self, attr): if attr not in ('_context', '_sock', '_connection', '_makefile_refs'): return getattr(self._connection, attr) def __iowait(self, io_func, *args, **kwargs): timeout = self._sock.gettimeout() or 0.1 fd = self._sock.fileno() while True: try: return io_func(*args, **kwargs) except (OpenSSL.SSL.WantReadError, OpenSSL.SSL.WantX509LookupError): sys.exc_clear() _, _, errors = select.select([fd], [], [fd], timeout) if errors: break except OpenSSL.SSL.WantWriteError: sys.exc_clear() _, _, errors = select.select([], [fd], [fd], timeout) if errors: break def accept(self): sock, addr = self._sock.accept() client = OpenSSL.SSL.Connection(sock._context, sock) return client, addr def do_handshake(self): return self.__iowait(self._connection.do_handshake) def connect(self, *args, **kwargs): return self.__iowait(self._connection.connect, *args, **kwargs) def send(self, data, flags=0): try: return self.__iowait(self._connection.send, data, flags) except OpenSSL.SSL.SysCallError as e: if e[0] == -1 and not data: # errors when writing empty strings are expected and can be ignored return 0 raise def recv(self, bufsiz, flags=0): pending = self._connection.pending() if pending: return self._connection.recv(min(pending, bufsiz)) try: return self.__iowait(self._connection.recv, bufsiz, flags) except OpenSSL.SSL.ZeroReturnError: return '' except OpenSSL.SSL.SysCallError as e: if e[0] == -1 and 'Unexpected EOF' in e[1]: # errors when reading empty strings are expected and can be ignored return '' raise def read(self, bufsiz, flags=0): return self.recv(bufsiz, flags) def write(self, buf, flags=0): return self.sendall(buf, flags) def close(self): if self._makefile_refs < 1: self._connection = None if self._sock: socket.socket.close(self._sock) else: self._makefile_refs -= 1 def makefile(self, mode='r', bufsize=-1): self._makefile_refs += 1 return socket._fileobject(self, mode, bufsize, close=True)
"""gevent_openssl.SSL - gevent compatibility with OpenSSL.SSL. """ import sys import socket import OpenSSL.SSL class Connection(object): def __init__(self, context, sock): self._context = context self._sock = sock self._connection = OpenSSL.SSL.Connection(context, sock) self._makefile_refs = 0 def __getattr__(self, attr): if attr not in ('_context', '_sock', '_connection', '_makefile_refs'): return getattr(self._connection, attr) def __wait_sock_io(self, sock, io_func, *args, **kwargs): timeout = self._sock.gettimeout() or 0.1 fd = self._sock.fileno() while True: try: return io_func(*args, **kwargs) except (OpenSSL.SSL.WantReadError, OpenSSL.SSL.WantX509LookupError): sys.exc_clear() _, _, errors = select.select([fd], [], [fd], timeout) if errors: break except OpenSSL.SSL.WantWriteError: sys.exc_clear() _, _, errors = select.select([], [fd], [fd], timeout) if errors: break def accept(self): sock, addr = self._sock.accept() client = OpenSSL.SSL.Connection(sock._context, sock) return client, addr def do_handshake(self): return self.__wait_sock_io(self._sock, self._connection.do_handshake) def connect(self, *args, **kwargs): return self.__wait_sock_io(self._sock, self._connection.connect, *args, **kwargs) def send(self, data, flags=0): try: return self.__wait_sock_io(self._sock, self._connection.send, data, flags) except OpenSSL.SSL.SysCallError as e: if e[0] == -1 and not data: # errors when writing empty strings are expected and can be ignored return 0 raise def recv(self, bufsiz, flags=0): pending = self._connection.pending() if pending: return self._connection.recv(min(pending, bufsiz)) try: return self.__wait_sock_io(self._sock, self._connection.recv, bufsiz, flags) except OpenSSL.SSL.ZeroReturnError: return '' def read(self, bufsiz, flags=0): return self.recv(bufsiz, flags) def write(self, buf, flags=0): return self.sendall(buf, flags) def close(self): if self._makefile_refs < 1: self._connection = None if self._sock: socket.socket.close(self._sock) else: self._makefile_refs -= 1 def makefile(self, mode='r', bufsize=-1): self._makefile_refs += 1 return socket._fileobject(self, mode, bufsize, close=True)
Python
0.000001
78ef8bbb721d6673ba576726c57dfae963153153
fix bugs in evaluate.py
evaluate.py
evaluate.py
from envs import create_env import numpy as np import time import argparse def evaluate_loop(env, network, max_episodes, args): sleep_time = args.sleep_time render = args.render verbose = args.verbose last_state = env.reset() last_features = network.get_initial_features() n_episode, step = 0, 0 episode_reward = np.zeros((max_episodes,), dtype='float32') episode_length = np.zeros((max_episodes,), dtype='float32') print('evaluating for {} episodes...'.format(max_episodes)) while n_episode < max_episodes: fetched = network.act(last_state, *last_features) action, features = fetched[0], fetched[2:] state, reward, terminal, _ = env.step(action.argmax()) if render: env.render() episode_reward[n_episode] += reward if verbose: print("#step = {}, action = {}".format(step, action.argmax())) print("reward = {}".format(reward)) if terminal: last_state = env.reset() last_features = network.get_initial_features() print("#episode = {}, #step = {}, reward sum = {}".format(n_episode, step, episode_reward[n_episode])) episode_length[n_episode] = step step = 0 n_episode += 1 else: last_state = state last_features = features step += 1 time.sleep(sleep_time) print('evaluation done.') print('avg score = {}'.format(episode_reward.mean())) print('avg episode length = {}'.format(episode_length.mean())) def main(args): env_id = args.env_id ckpt_dir = args.ckpt_dir max_episodes = args.max_episodes # env env = create_env(env_id, 0, 1) if args.render: env.render() # work-around to the nasty env.render() failing issue when working with tensorflow # see https://github.com/openai/gym/issues/418 import tensorflow as tf from model import Convx2LSTMActorCritic # model sess = tf.Session() with tf.variable_scope("global"): network = Convx2LSTMActorCritic(env.observation_space.shape, env.action_space.n) init = tf.global_variables_initializer() sess.run(init) # load model parameters checkpoint = tf.train.get_checkpoint_state(ckpt_dir) if checkpoint and checkpoint.model_checkpoint_path: saver = tf.train.Saver() saver.restore(sess, checkpoint.model_checkpoint_path) print("checkpoint loaded:", checkpoint.model_checkpoint_path) else: raise Exception('cannot find checkpoint path') # run evaluating with sess.as_default(): evaluate_loop(env, network, max_episodes, args) if __name__ == "__main__": parser = argparse.ArgumentParser(description=None) parser.add_argument('--env-id', default="BreakoutDeterministic-v3", help='Environment id') parser.add_argument('--ckpt-dir', default="save/breakout/train", help='Checkpoint directory path') parser.add_argument('--max-episodes', default=2, type=int, help='Number of episodes to evaluate') parser.add_argument('--sleep-time', default=0.0, type=float, help='sleeping time') parser.add_argument('--render', action='store_true', help='render screen') parser.add_argument('--verbose', action='store_true', help='verbose') args = parser.parse_args() main(args=args)
from envs import create_env import numpy as np import time import argparse def evaluate_loop(env, network, max_episodes, args): sleep_time = args.sleep_time render = args.render verbose = args.verbose last_state = env.reset() last_features = network.get_initial_features() n_episode, step = 0, 0 episode_reward = np.zeros((max_episodes,), dtype='float32') episode_length = np.zeros((max_episodes,), dtype='float32') print('evaluating for {} episodes...'.format(max_episodes)) while n_episode < max_episodes: fetched = network.act(last_state, *last_features) action = fetched[0] state, reward, terminal, _ = env.step(action.argmax()) if render: env.render() episode_reward[n_episode] += reward if verbose: print("#step = {}, action = {}".format(step, action.argmax())) print("reward = {}".format(reward)) if terminal: print("#episode = {}, #step = {}, reward sum = {}".format(n_episode, step, episode_reward[n_episode])) episode_length[n_episode] = step env.reset() step = 0 n_episode += 1 else: step += 1 time.sleep(sleep_time) print('evaluation done.') print('avg score = {}'.format(episode_reward.mean())) print('avg episode length = {}'.format(episode_length.mean())) def main(args): env_id = args.env_id ckpt_dir = args.ckpt_dir max_episodes = args.max_episodes # env env = create_env(env_id, 0, 1) if args.render: env.render() # work-around to the nasty env.render() failing issue when working with tensorflow # see https://github.com/openai/gym/issues/418 import tensorflow as tf from model import Convx2LSTMActorCritic # model with tf.variable_scope("global"): network = Convx2LSTMActorCritic(env.observation_space.shape, env.action_space.n) sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) saver = tf.train.Saver() # load model parameters checkpoint = tf.train.get_checkpoint_state(ckpt_dir) if checkpoint and checkpoint.model_checkpoint_path: saver.restore(sess, checkpoint.model_checkpoint_path) print("checkpoint loaded:", checkpoint.model_checkpoint_path) else: raise Exception('cannot find checkpoint path') # run evaluating with sess.as_default(): evaluate_loop(env, network, max_episodes, args) if __name__ == "__main__": parser = argparse.ArgumentParser(description=None) parser.add_argument('--env-id', default="BreakoutDeterministic-v3", help='Environment id') parser.add_argument('--ckpt-dir', default="save/breakout/train", help='Checkpoint directory path') parser.add_argument('--max-episodes', default=2, type=int, help='Number of episodes to evaluate') parser.add_argument('--sleep-time', default=0.0, type=float, help='sleeping time') parser.add_argument('--render', action='store_true', help='render screen') parser.add_argument('--verbose', action='store_true', help='verbose') args = parser.parse_args() main(args=args)
Python
0.000001
2e6c80717099fb6c6ca59d9d6193807b1aabfa8b
Update docstring
git_update/actions.py
git_update/actions.py
"""Git repo actions.""" import logging import os import pathlib import click from git import InvalidGitRepositoryError, Repo from git.exc import GitCommandError LOG = logging.getLogger(__name__) def crawl(path): """Crawl the path for possible Git directories. Args: path (str): Original path to crawl. """ main_dir = pathlib.Path(path) if not main_dir.is_dir(): main_dir = main_dir.parent main_dir = main_dir.resolve() LOG.info('Finding directories in %s', main_dir) dir_list = [directory for directory in main_dir.iterdir() if directory.is_dir() and directory.parts[-1] != '.git'] LOG.debug('List of directories: %s', dir_list) for directory in dir_list: update_repo(os.path.join(main_dir, directory)) def check_changes(current, fetch_info_list, branch_list): """Check for changes in local branches and remote. Args: current: Dict(reference: commit) from before `git pull` operation. fetch_info_list: List of remote references from `git pull`. branch_list: List of branches in repository. """ log = logging.getLogger(__name__) for fetch_info in fetch_info_list: log.debug('Checking for change in %s', fetch_info.name) try: if current[fetch_info.ref] != fetch_info.commit: log.info('%s has updates, %s..%s', fetch_info.name, current[fetch_info.ref], fetch_info.commit) except KeyError: log.info('New reference %s', fetch_info.name) for branch in branch_list: log.debug('Checking for change in %s', branch.name) if current[branch] != branch.commit: log.info('%s updated, %s..%s', branch.name, current[branch], branch.commit) return True def update_repo(directory): """Update a repository. Returns: False if bad repository. True if everything worked. """ log = logging.getLogger(__name__) try: repo = Repo(directory) current = {ref: ref.commit for ref in repo.refs} click.secho('Updating {0}'.format(repo.git_dir), fg='blue') remote = repo.remote() fetch_info_list = remote.pull() except InvalidGitRepositoryError: log.warning('%s is not a valid repository.', directory) return False except ValueError: log.warning('Check remotes for %s: %s', directory, repo.remotes) return False except GitCommandError as error: log.fatal('Pull failed. %s', error) return False check_changes(current, fetch_info_list, repo.branches) return True
"""Git repo actions.""" import logging import os import pathlib import click from git import InvalidGitRepositoryError, Repo from git.exc import GitCommandError LOG = logging.getLogger(__name__) def crawl(path): """Crawl the path for possible Git directories.""" main_dir = pathlib.Path(path) if not main_dir.is_dir(): main_dir = main_dir.parent main_dir = main_dir.resolve() LOG.info('Finding directories in %s', main_dir) dir_list = [directory for directory in main_dir.iterdir() if directory.is_dir() and directory.parts[-1] != '.git'] LOG.debug('List of directories: %s', dir_list) for directory in dir_list: update_repo(os.path.join(main_dir, directory)) def check_changes(current, fetch_info_list, branch_list): """Check for changes in local branches and remote. Args: current: Dict(reference: commit) from before `git pull` operation. fetch_info_list: List of remote references from `git pull`. branch_list: List of branches in repository. """ log = logging.getLogger(__name__) for fetch_info in fetch_info_list: log.debug('Checking for change in %s', fetch_info.name) try: if current[fetch_info.ref] != fetch_info.commit: log.info('%s has updates, %s..%s', fetch_info.name, current[fetch_info.ref], fetch_info.commit) except KeyError: log.info('New reference %s', fetch_info.name) for branch in branch_list: log.debug('Checking for change in %s', branch.name) if current[branch] != branch.commit: log.info('%s updated, %s..%s', branch.name, current[branch], branch.commit) return True def update_repo(directory): """Update a repository. Returns: False if bad repository. True if everything worked. """ log = logging.getLogger(__name__) try: repo = Repo(directory) current = {ref: ref.commit for ref in repo.refs} click.secho('Updating {0}'.format(repo.git_dir), fg='blue') remote = repo.remote() fetch_info_list = remote.pull() except InvalidGitRepositoryError: log.warning('%s is not a valid repository.', directory) return False except ValueError: log.warning('Check remotes for %s: %s', directory, repo.remotes) return False except GitCommandError as error: log.fatal('Pull failed. %s', error) return False check_changes(current, fetch_info_list, repo.branches) return True
Python
0
ef1f303072307f259e8555e0148c29677b4f7d6f
Fix approve permissions typing
idb/ipc/approve.py
idb/ipc/approve.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from typing import Set, Dict, Any from idb.grpc.types import CompanionClient from idb.grpc.idb_pb2 import ApproveRequest MAP: Dict[str, Any] = { "photos": ApproveRequest.PHOTOS, "camera": ApproveRequest.CAMERA, "contacts": ApproveRequest.CONTACTS, } async def client( client: CompanionClient, bundle_id: str, permissions: Set[str] ) -> None: await client.stub.approve( ApproveRequest( bundle_id=bundle_id, permissions=[MAP[permission] for permission in permissions], ) )
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from typing import Set, Dict # noqa F401 from idb.grpc.types import CompanionClient from idb.grpc.idb_pb2 import ApproveRequest MAP = { # type: Dict[str, ApproveRequest.Permission] "photos": ApproveRequest.PHOTOS, "camera": ApproveRequest.CAMERA, "contacts": ApproveRequest.CONTACTS, } async def client( client: CompanionClient, bundle_id: str, permissions: Set[str] ) -> None: print(f"Sending {[MAP[permission] for permission in permissions]}") await client.stub.approve( ApproveRequest( bundle_id=bundle_id, permissions=[MAP[permission] for permission in permissions], ) )
Python
0.000009
7cb9703b1af4138e8f1a036245125d723add55a3
Fix error handling to return sensible HTTP error codes.
grano/views/__init__.py
grano/views/__init__.py
from colander import Invalid from flask import request from werkzeug.exceptions import HTTPException from grano.core import app from grano.lib.serialisation import jsonify from grano.views.base_api import blueprint as base_api from grano.views.entities_api import blueprint as entities_api from grano.views.relations_api import blueprint as relations_api from grano.views.schemata_api import blueprint as schemata_api from grano.views.sessions_api import blueprint as sessions_api from grano.views.projects_api import blueprint as projects_api from grano.views.accounts_api import blueprint as accounts_api from grano.views.files_api import blueprint as files_api from grano.views.imports_api import blueprint as imports_api from grano.views.pipelines_api import blueprint as pipelines_api from grano.views.log_entries_api import blueprint as log_entries_api from grano.views.permissions_api import blueprint as permissions_api from grano.views.auth import check_auth @app.errorhandler(401) @app.errorhandler(403) @app.errorhandler(404) @app.errorhandler(410) @app.errorhandler(500) def handle_exceptions(exc): if isinstance(exc, HTTPException): message = exc.get_description(request.environ) message = message.replace('<p>', '').replace('</p>', '') body = { 'status': exc.code, 'name': exc.name, 'message': message } headers = exc.get_headers(request.environ) else: body = { 'status': 500, 'name': exc.__class__.__name__, 'message': unicode(exc) } headers = {} return jsonify(body, status=body.get('status'), headers=headers) @app.errorhandler(Invalid) def handle_invalid(exc): body = { 'status': 400, 'name': 'Invalid Data', 'message': unicode(exc), 'errors': exc.asdict() } return jsonify(body, status=400) app.register_blueprint(base_api) app.register_blueprint(entities_api) app.register_blueprint(relations_api) app.register_blueprint(schemata_api) app.register_blueprint(sessions_api) app.register_blueprint(projects_api) app.register_blueprint(accounts_api) app.register_blueprint(files_api) app.register_blueprint(permissions_api) app.register_blueprint(imports_api) app.register_blueprint(pipelines_api) app.register_blueprint(log_entries_api)
from colander import Invalid from flask import request from grano.core import app from grano.lib.serialisation import jsonify from grano.views.base_api import blueprint as base_api from grano.views.entities_api import blueprint as entities_api from grano.views.relations_api import blueprint as relations_api from grano.views.schemata_api import blueprint as schemata_api from grano.views.sessions_api import blueprint as sessions_api from grano.views.projects_api import blueprint as projects_api from grano.views.accounts_api import blueprint as accounts_api from grano.views.files_api import blueprint as files_api from grano.views.imports_api import blueprint as imports_api from grano.views.pipelines_api import blueprint as pipelines_api from grano.views.log_entries_api import blueprint as log_entries_api from grano.views.permissions_api import blueprint as permissions_api from grano.views.auth import check_auth @app.errorhandler(401) @app.errorhandler(403) @app.errorhandler(404) @app.errorhandler(410) @app.errorhandler(500) def handle_exceptions(exc): if not hasattr(exc, 'get_description'): message = exc.get_description(request.environ) message = message.replace('<p>', '').replace('</p>', '') body = { 'status': exc.code, 'name': exc.name, 'message': message } headers = exc.get_headers(request.environ) else: body = { 'status': 500, 'name': exc.__class__.__name__, 'message': unicode(exc) } headers = {} return jsonify(body, status=exc.code, headers=headers) @app.errorhandler(Invalid) def handle_invalid(exc): body = { 'status': 400, 'name': 'Invalid Data', 'message': unicode(exc), 'errors': exc.asdict() } return jsonify(body, status=400) app.register_blueprint(base_api) app.register_blueprint(entities_api) app.register_blueprint(relations_api) app.register_blueprint(schemata_api) app.register_blueprint(sessions_api) app.register_blueprint(projects_api) app.register_blueprint(accounts_api) app.register_blueprint(files_api) app.register_blueprint(permissions_api) app.register_blueprint(imports_api) app.register_blueprint(pipelines_api) app.register_blueprint(log_entries_api)
Python
0
b4f0bbb8e9fd198cfa60daa3a01a4a48a0fd18af
Replace assertFalse/assertTrue(a in b)
sahara/tests/unit/plugins/storm/test_config_helper.py
sahara/tests/unit/plugins/storm/test_config_helper.py
# Copyright 2017 Massachusetts Open Cloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from testtools import testcase from sahara.plugins.storm import config_helper as s_config from sahara.plugins.storm import plugin as s_plugin class TestStormConfigHelper(testcase.TestCase): def test_generate_storm_config(self): STORM_092 = '0.9.2' STORM_101 = '1.0.1' STORM_110 = '1.1.0' tested_versions = [] master_hostname = "s-master" zk_hostnames = ["s-zoo"] configs_092 = s_config.generate_storm_config( master_hostname, zk_hostnames, STORM_092) self.assertIn('nimbus.host', configs_092.keys()) self.assertNotIn('nimbus.seeds', configs_092.keys()) tested_versions.append(STORM_092) configs_101 = s_config.generate_storm_config( master_hostname, zk_hostnames, STORM_101) self.assertNotIn('nimbus.host', configs_101.keys()) self.assertIn('nimbus.seeds', configs_101.keys()) self.assertIn('client.jartransformer.class', configs_101.keys()) self.assertEqual(configs_101['client.jartransformer.class'], 'org.apache.storm.hack.StormShadeTransformer') tested_versions.append(STORM_101) configs_110 = s_config.generate_storm_config( master_hostname, zk_hostnames, STORM_110) self.assertNotIn('nimbus.host', configs_110.keys()) self.assertIn('nimbus.seeds', configs_110.keys()) self.assertIn('client.jartransformer.class', configs_110.keys()) self.assertEqual(configs_110['client.jartransformer.class'], 'org.apache.storm.hack.StormShadeTransformer') tested_versions.append(STORM_110) storm = s_plugin.StormProvider() self.assertEqual(storm.get_versions(), tested_versions)
# Copyright 2017 Massachusetts Open Cloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from testtools import testcase from sahara.plugins.storm import config_helper as s_config from sahara.plugins.storm import plugin as s_plugin class TestStormConfigHelper(testcase.TestCase): def test_generate_storm_config(self): STORM_092 = '0.9.2' STORM_101 = '1.0.1' STORM_110 = '1.1.0' tested_versions = [] master_hostname = "s-master" zk_hostnames = ["s-zoo"] configs_092 = s_config.generate_storm_config( master_hostname, zk_hostnames, STORM_092) self.assertTrue('nimbus.host' in configs_092.keys()) self.assertFalse('nimbus.seeds' in configs_092.keys()) tested_versions.append(STORM_092) configs_101 = s_config.generate_storm_config( master_hostname, zk_hostnames, STORM_101) self.assertFalse('nimbus.host' in configs_101.keys()) self.assertTrue('nimbus.seeds' in configs_101.keys()) self.assertTrue('client.jartransformer.class' in configs_101.keys()) self.assertEqual(configs_101['client.jartransformer.class'], 'org.apache.storm.hack.StormShadeTransformer') tested_versions.append(STORM_101) configs_110 = s_config.generate_storm_config( master_hostname, zk_hostnames, STORM_110) self.assertFalse('nimbus.host' in configs_110.keys()) self.assertTrue('nimbus.seeds' in configs_110.keys()) self.assertTrue('client.jartransformer.class' in configs_110.keys()) self.assertEqual(configs_110['client.jartransformer.class'], 'org.apache.storm.hack.StormShadeTransformer') tested_versions.append(STORM_110) storm = s_plugin.StormProvider() self.assertEqual(storm.get_versions(), tested_versions)
Python
0.000311
d98ac8c127caf4b70c8b0da9b6b6415c47f0f3eb
remove cruft
munge/codec/__init__.py
munge/codec/__init__.py
import os import imp __all__ = ['django', 'mysql', 'json', 'yaml'] __codecs = {} def add_codec(exts, cls): if not isinstance(exts, tuple): exts = tuple(exts) # check for dupe extensions dupe_exts = set(ext for k in __codecs.keys() for ext in k).intersection(exts) if dupe_exts: raise ValueError("duplicate extension %s" % str(dupe_exts)) __codecs[exts] = cls def get_codecs(): return __codecs def list_codecs(): return [ext[0] for ext in get_codecs().keys()] def get_codec(tag, codecs=get_codecs()): for exts, cls in codecs.items(): if tag in exts: return cls def find_datafile(name, search_path=('.'), codecs=get_codecs()): """ find all matching data files in search_path search_path: path of directories to load from codecs: allow to override from list of installed returns array of tuples (codec_object, filename) """ rv = [] if isinstance(search_path, basestring): search_path = [search_path] #print "search path ", str(search_path) ext = os.path.splitext(name)[1][1:] cls = get_codec(ext) if cls: for each in search_path: fq_filename = os.path.join(each, name) if os.path.exists(fq_filename): rv.append((cls, fq_filename)) for exts, obj in codecs.items(): for ext in exts: filename = "%s.%s" % (name, ext) for each in search_path: fq_filename = os.path.join(each, filename) if os.path.exists(fq_filename): rv.append((obj, fq_filename)) return rv def load_datafile(name, search_path=('.'), codecs=get_codecs(), **kwargs): """ find datafile and load them from codec TODO only does the first one kwargs: default = if passed will return that on failure instead of throwing """ mod = find_datafile(name, search_path, codecs) if not mod: if 'default' in kwargs: return kwargs['default'] raise IOError("file %s not found in search path %s" %(name, str(search_path))) (codec, datafile) = mod[0] return codec().load(open(datafile))
import os import imp __all__ = ['django', 'mysql', 'json', 'yaml'] __codecs = {} # TODO move to .load? def _do_find_import(directory, skiplist=None, suffixes=None): # explicitly look for None, suffixes=[] might be passed to not load anything if suffixes is None: suffixes = [t[0] for t in imp.get_suffixes()] loaded = dict() for module in os.listdir(directory): name, ext = os.path.splitext(module) if name in loaded: continue if name in skiplist: continue if ext in suffixes: #print "finding %s in %s" % (name, directory) #mod = imp.load_module(name, *imp.find_module(name, [directory])) try: imp_args = imp.find_module(name, [directory]) mod = imp.load_module(name, *imp_args) loaded[name] = mod.__file__ finally: try: imp_args[0].close() except Exception: pass return loaded def find_import(): this = os.path.split(__file__) this_dir = this[0] # remove trailing c if cached bytecode #this_file = this[1].rstrip('c') _do_find_import(this_dir, ('all', '__init__')) def add_codec(exts, cls): if not isinstance(exts, tuple): exts = tuple(exts) # check for dupe extensions dupe_exts = set(ext for k in __codecs.keys() for ext in k).intersection(exts) if dupe_exts: raise ValueError("duplicate extension %s" % str(dupe_exts)) __codecs[exts] = cls def get_codecs(): return __codecs def list_codecs(): return [ext[0] for ext in get_codecs().keys()] def get_codec(tag, codecs=get_codecs()): for exts, cls in codecs.items(): if tag in exts: return cls def find_datafile(name, search_path=('.'), codecs=get_codecs()): """ find all matching data files in search_path search_path: path of directories to load from codecs: allow to override from list of installed returns array of tuples (codec_object, filename) """ rv = [] if isinstance(search_path, basestring): search_path = [search_path] #print "search path ", str(search_path) ext = os.path.splitext(name)[1][1:] cls = get_codec(ext) if cls: for each in search_path: fq_filename = os.path.join(each, name) if os.path.exists(fq_filename): rv.append((cls, fq_filename)) for exts, obj in codecs.items(): for ext in exts: filename = "%s.%s" % (name, ext) for each in search_path: fq_filename = os.path.join(each, filename) if os.path.exists(fq_filename): rv.append((obj, fq_filename)) return rv def load_datafile(name, search_path=('.'), codecs=get_codecs(), **kwargs): """ find datafile and load them from codec TODO only does the first one kwargs: default = if passed will return that on failure instead of throwing """ mod = find_datafile(name, search_path, codecs) if not mod: if 'default' in kwargs: return kwargs['default'] raise IOError("file %s not found in search path %s" %(name, str(search_path))) (codec, datafile) = mod[0] return codec().load(open(datafile))
Python
0
603c36aec2a4704bb4cf41c224194a5f83f9babe
Set the module as auto_install
sale_payment_method_automatic_workflow/__openerp__.py
sale_payment_method_automatic_workflow/__openerp__.py
# -*- coding: utf-8 -*- ############################################################################## # # Author: Guewen Baconnier # Copyright 2015 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## {'name': 'Sale Payment Method - Automatic Reconcile', 'version': '1.0', 'author': ['Camptocamp', 'Akretion'], 'license': 'AGPL-3', 'category': 'Generic Modules/Others', 'depends': ['sale_payment_method', 'sale_automatic_workflow'], 'website': 'http://www.camptocamp.com', 'data': [], 'test': [], 'installable': True, 'auto_install': True, }
# -*- coding: utf-8 -*- ############################################################################## # # Author: Guewen Baconnier # Copyright 2015 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## {'name': 'Sale Payment Method - Automatic Reconcile', 'version': '1.0', 'author': ['Camptocamp', 'Akretion'], 'license': 'AGPL-3', 'category': 'Generic Modules/Others', 'depends': ['sale_payment_method', 'sale_automatic_workflow'], 'website': 'http://www.camptocamp.com', 'data': [], 'test': [], 'installable': True, 'auto_install': False, }
Python
0.000001
27b0e86f15a2f89b2f8715dffa5cade17b7f5adf
Update singletons.py
omstd_lefito/lib/singletons.py
omstd_lefito/lib/singletons.py
# -*- coding: utf-8 -*- __ALL__ = ["Displayer", "IntellCollector"] # ------------------------------------------------------------------------- class Displayer: """Output system""" instance = None # --------------------------------------------------------------------- def __new__(cls, *args, **kwargs): if cls.instance is None: cls.instance = object.__new__(cls, *args, **kwargs) cls.__initialized = False return cls.instance # --------------------------------------------------------------------- def config(self, **kwargs): self.out_file = kwargs.get("out_file", None) self.out_screen = kwargs.get("out_screen", True) self.verbosity = kwargs.get("verbosity", 0) if self.out_file: self.out_file_handler = open(self.out_file, "w") # --------------------------------------------------------------------- def display(self, message): if self.verbosity > 0: self.__display(message) # --------------------------------------------------------------------- def display_verbosity(self, message): if self.verbosity > 1: self.__display(message) # --------------------------------------------------------------------- def display_more_verbosity(self, message): if self.verbosity > 2: self.__display(message) # --------------------------------------------------------------------- def __display(self, message): if self.out_screen: print(message) if self.out_file_handler: self.out_file_handler.write(message) # --------------------------------------------------------------------- def __init__(self): if not self.__initialized: self.__initialized = True self.out_file = None self.out_file_handler = None self.out_screen = True self.verbosity = 0 # ------------------------------------------------------------------------- class IntellCollector: """gathered data container""" instance = None # --------------------------------------------------------------------- def __new__(cls, *args, **kwargs): if cls.instance is None: cls.instance = object.__new__(cls, *args, **kwargs) cls.__initialized = False return cls.instance # --------------------------------------------------------------------- def config(self, **kwargs): self.target = kwargs.get("target", None) # -------------------------------------------------------------------------- def getsess(self): out = Displayer() if 'set-cookie' in self.originalhead: out.display(self.originalhead['set-cookie']) m = re.search("(PHPSESSID=(?P<value>.*);)", self.originalhead['set-cookie']) if m: out.display(m.group('value')) self.originalsess = m.group('value') else: self.originalsess = '' else: self.originalsess = '' # --------------------------------------------------------------------- def gather(self, params): out = Displayer() if params.url is not None: self.target = params.url else: self.target = str(input("url: ")) originalreq = dorequest(self.target, params) m = re.search(b"(charset=(?P<value>.*)\")", originalreq['body']) if m: self.charset = m.group('value').decode() self.originalreq_lines = [x.decode(self.charset) for x in originalreq['body'].splitlines()] self.originalhead = originalreq['head'] out.display(originalreq['head']) self.getsess() self.parsedurl = urlparse(self.target) self.parametros = self.parsedurl.query.split('&') # --------------------------------------------------------------------- def show(self): out = Displayer() out.display("target: %s" % str(self.target)) out.display("originalreq_lines: %s" % str(self.originalreq_lines)) out.display("originalhead: %s" % str(self.originalhead)) out.display("originalsess: %s" % str(self.originalsess)) out.display("parsedurl: %s" % str(self.parsedurl)) out.display("parametros: %s" % str(self.parametros)) out.display("charset: %s" % str(self.charset)) # --------------------------------------------------------------------- def __init__(self): if not self.__initialized: self.__initialized = True self.target = None self.originalreq_lines = [] self.originalhead = None self.originalsess = None self.parsedurl = None self.parametros = [] self.charset = 'utf-8'
# -*- coding: utf-8 -*- __ALL__ = ["Displayer", "IntellCollector"] # ------------------------------------------------------------------------- class Displayer: """Output system""" instance = None # --------------------------------------------------------------------- def __new__(cls, *args, **kwargs): if cls.instance is None: cls.instance = object.__new__(cls, *args, **kwargs) cls.__initialized = False return cls.instance # --------------------------------------------------------------------- def config(self, **kwargs): self.out_file = kwargs.get("out_file", None) self.out_screen = kwargs.get("out_screen", True) self.verbosity = kwargs.get("verbosity", 0) if self.out_file: self.out_file_handler = open(self.out_file, "w") # --------------------------------------------------------------------- def display(self, message): if self.verbosity > 0: self.__display(message) # --------------------------------------------------------------------- def display_verbosity(self, message): if self.verbosity > 1: self.__display(message) # --------------------------------------------------------------------- def display_more_verbosity(self, message): if self.verbosity > 2: self.__display(message) # --------------------------------------------------------------------- def __display(self, message): if self.out_screen: print(message) if self.out_file_handler: self.out_file_handler.write(message) # --------------------------------------------------------------------- def __init__(self): if not self.__initialized: self.__initialized = True self.out_file = None self.out_file_handler = None self.out_screen = True self.verbosity = 0 # ------------------------------------------------------------------------- class IntellCollector: """gathered data container""" instance = None # --------------------------------------------------------------------- def __new__(cls, *args, **kwargs): if cls.instance is None: cls.instance = object.__new__(cls, *args, **kwargs) cls.__initialized = False return cls.instance # --------------------------------------------------------------------- def config(self, **kwargs): self.target = kwargs.get("target", None) # --------------------------------------------------------------------- def gather(self, params): out = Displayer() if params.url is not None: self.target = params.url else: self.target = str(input("url: ")) originalreq = dorequest(self.target, params) m = re.search(b"(charset=(?P<value>.*)\")", originalreq['body']) if m: self.charset = m.group('value').decode() self.originalreq_lines = [x.decode(self.charset) for x in originalreq['body'].splitlines()] self.originalhead = originalreq['head'] out.display(originalreq['head']) self.originalsess = getsess(originalreq['head']) self.parsedurl = urlparse(self.target) self.parametros = self.parsedurl.query.split('&') # --------------------------------------------------------------------- def show(self): out = Displayer() out.display("target: %s" % str(self.target)) out.display("originalreq_lines: %s" % str(self.originalreq_lines)) out.display("originalhead: %s" % str(self.originalhead)) out.display("originalsess: %s" % str(self.originalsess)) out.display("parsedurl: %s" % str(self.parsedurl)) out.display("parametros: %s" % str(self.parametros)) out.display("charset: %s" % str(self.charset)) # --------------------------------------------------------------------- def __init__(self): if not self.__initialized: self.__initialized = True self.target = None self.originalreq_lines = [] self.originalhead = None self.originalsess = None self.parsedurl = None self.parametros = [] self.charset = 'utf-8'
Python
0.000001
6a6ad3224cbb28a3f109a35413a2e675cdbf1b09
Implement domain functions
openprovider/modules/domain.py
openprovider/modules/domain.py
# coding=utf-8 from openprovider.modules import E, OE, common from openprovider.models import Model def _domain(domain): sld, tld = domain.split('.', 1) return E.domain( E.name(sld), E.extension(tld), ) class DomainModule(common.Module): """Bindings to API methods in the domain module.""" def check(self, domain): """ Check availability for a single domain. Returns the domain's status as a string (either "active" or "free"). """ response = self.request(self._check_cmd([domain])) return response.data.array[0].item[0].status def check_many(self, domains): """ Check availability for a number of domains. Returns a dictionary mapping the domain names to their statuses as a string ("active"/"free"). """ response = self.request(self._check_cmd(domains)) items = response.data.array[0].item return dict((i.domain, i.status) for i in items) def _check_cmd(self, domains): return E.checkDomainRequest( E.domains( E.array( *[E.item( E.name(domain.split(".")[0]), E.extension(domain.split(".")[1]) ) for domain in domains] ) ) ) def create_domain_request(self, domain, period, owner_handle, admin_handle, tech_handle, billing_handle=None, reseller_handle=None, ns_group=None, ns_template_name=None, name_servers=None, use_domicile=False, promo_code=None, autorenew=None, comments=None, dnssec_keys=None, application_mode=None): nameservers = E.nameServers(E.array(*[E.item(E.name(ns.name), OE('ip', ns.ip), OE('ip6', ns.ip6)) for ns in name_servers])) if name_servers else None request = E.createDomainRequest( _domain(domain), E.period(period), E.ownerHandle(owner_handle), E.adminHandle(admin_handle), E.techHandle(tech_handle), OE('billingHandle', billing_handle), OE('resellerHandle', reseller_handle), OE('nsGroup', ns_group), OE('nsTemplateName', ns_template_name), nameservers, OE('useDomicile', use_domicile, int), OE('promoCode', promo_code), OE('autorenew', autorenew), OE('comments', comments), OE('dnssecKeys', dnssec_keys), OE('applicationMode', application_mode), ) response = self.request(request) return response.as_model(Model) def delete_domain_request(self, domain, request_type='delete'): self.request(E.deleteDomainRequest(_domain(domain), E('type', request_type))) def modify_domain_request(self, domain, owner_handle=None, admin_handle=None, tech_handle=None, billing_handle=None, reseller_handle=None, ns_group=None, ns_template_name=None, name_servers=None, use_domicile=False, promo_code=None, autorenew=None, comments=None, dnssec_keys=None, application_mode=None): nameservers = E.nameServers(E.array(*[E.item(E.name(ns.name), OE('ip', ns.ip), OE('ip6', ns.ip6)) for ns in name_servers])) if name_servers else None request = E.modifyDomainRequest( _domain(domain), OE('ownerHandle', owner_handle), OE('adminHandle', admin_handle), OE('techHandle', tech_handle), OE('billingHandle', billing_handle), OE('resellerHandle', reseller_handle), OE('nsGroup', ns_group), OE('nsTemplateName', ns_template_name), nameservers, OE('useDomicile', use_domicile, int), OE('promoCode', promo_code), OE('autorenew', autorenew), OE('comments', comments), OE('dnssecKeys', dnssec_keys), OE('applicationMode', application_mode), ) self.request(request) def retrieve_domain_request(self, domain, additional_data=False, registry_details=False): request = E.retrieveDomainRequest(_domain(domain)) response = self.request(request) return response.as_model(Model)
# coding=utf-8 from openprovider.modules import E, common class DomainModule(common.Module): """Bindings to API methods in the domain module.""" def check(self, domain): """ Check availability for a single domain. Returns the domain's status as a string (either "active" or "free"). """ response = self.request(self._check_cmd([domain])) return response.data.array[0].item[0].status def check_many(self, domains): """ Check availability for a number of domains. Returns a dictionary mapping the domain names to their statuses as a string ("active"/"free"). """ response = self.request(self._check_cmd(domains)) items = response.data.array[0].item return dict((i.domain, i.status) for i in items) def _check_cmd(self, domains): return E.checkDomainRequest( E.domains( E.array( *[E.item( E.name(domain.split(".")[0]), E.extension(domain.split(".")[1]) ) for domain in domains] ) ) )
Python
0.000505
989988aa604b5a125c765294080777c57ec6c535
Fix bug OppsDetail, add channel_long_slug
opps/articles/views/generic.py
opps/articles/views/generic.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from django.views.generic.detail import DetailView from django.views.generic.list import ListView from django.contrib.sites.models import get_current_site from django.shortcuts import get_object_or_404 from django.utils import timezone from django import template from django.conf import settings from opps.articles.utils import set_context_data from opps.channels.models import Channel class OppsList(ListView): context_object_name = "context" paginate_by = settings.OPPS_PAGINATE_BY limit = settings.OPPS_VIEWS_LIMIT slug = None def get_context_data(self, **kwargs): return set_context_data(self, OppsList, **kwargs) @property def template_name(self): domain_folder = self.type if self.site.id > 1: domain_folder = "{0}/{1}".format(self.site, self.type) return '{0}/{1}.html'.format(domain_folder, self.long_slug) @property def queryset(self): self.site = get_current_site(self.request) try: self.long_slug = self.kwargs.get( 'channel__long_slug', Channel.objects.get_homepage(site=self.site).long_slug) except AttributeError: self.long_slug = None return None self.channel = get_object_or_404(Channel, site=self.site, long_slug=self.long_slug, date_available__lte=timezone.now(), published=True) self.channel_long_slug = [self.long_slug] self.channel_long_slug.append( [children.long_slug for children in self.channel.get_children()]) self.article = self.model.objects.filter( site=self.site, channel_long_slug__in=self.channel_long_slug, date_available__lte=timezone.now(), published=True)[:self.limit] return self.article class OppsDetail(DetailView): context_object_name = "context" limit = settings.OPPS_VIEWS_LIMIT channel_long_slug = [] def get_context_data(self, **kwargs): return set_context_data(self, OppsDetail, **kwargs) @property def template_name(self): domain_folder = self.type if self.site.id > 1: domain_folder = "{0}/{1}".format(self.site, self.type) try: _template = '{0}/{1}/{2}.html'.format( domain_folder, self.long_slug, self.slug) template.loader.get_template(_template) except template.TemplateDoesNotExist: _template = '{0}/{1}.html'.format(domain_folder, self.long_slug) return _template @property def queryset(self): self.site = get_current_site(self.request) self.slug = self.kwargs.get('slug') try: self.long_slug = self.kwargs.get( 'channel__long_slug', Channel.objects.get_homepage(site=self.site).long_slug) except AttributeError: self.long_slug = None return None self.article = self.model.objects.filter( site=self.site, channel_long_slug=self.long_slug, slug=self.slug, date_available__lte=timezone.now(), published=True) return self.article
#!/usr/bin/env python # -*- coding: utf-8 -*- from django.views.generic.detail import DetailView from django.views.generic.list import ListView from django.contrib.sites.models import get_current_site from django.shortcuts import get_object_or_404 from django.utils import timezone from django import template from django.conf import settings from opps.articles.utils import set_context_data from opps.channels.models import Channel class OppsList(ListView): context_object_name = "context" paginate_by = settings.OPPS_PAGINATE_BY limit = settings.OPPS_VIEWS_LIMIT slug = None def get_context_data(self, **kwargs): return set_context_data(self, OppsList, **kwargs) @property def template_name(self): domain_folder = self.type if self.site.id > 1: domain_folder = "{0}/{1}".format(self.site, self.type) return '{0}/{1}.html'.format(domain_folder, self.long_slug) @property def queryset(self): self.site = get_current_site(self.request) try: self.long_slug = self.kwargs.get( 'channel__long_slug', Channel.objects.get_homepage(site=self.site).long_slug) except AttributeError: self.long_slug = None return None self.channel = get_object_or_404(Channel, site=self.site, long_slug=self.long_slug, date_available__lte=timezone.now(), published=True) self.channel_long_slug = [self.long_slug] self.channel_long_slug.append( [children.long_slug for children in self.channel.get_children()]) self.article = self.model.objects.filter( site=self.site, channel_long_slug__in=self.channel_long_slug, date_available__lte=timezone.now(), published=True)[:self.limit] return self.article class OppsDetail(DetailView): context_object_name = "context" limit = settings.OPPS_VIEWS_LIMIT def get_context_data(self, **kwargs): return set_context_data(self, OppsDetail, **kwargs) @property def template_name(self): domain_folder = self.type if self.site.id > 1: domain_folder = "{0}/{1}".format(self.site, self.type) try: _template = '{0}/{1}/{2}.html'.format( domain_folder, self.long_slug, self.slug) template.loader.get_template(_template) except template.TemplateDoesNotExist: _template = '{0}/{1}.html'.format(domain_folder, self.long_slug) return _template @property def queryset(self): self.site = get_current_site(self.request) self.slug = self.kwargs.get('slug') try: self.long_slug = self.kwargs.get( 'channel__long_slug', Channel.objects.get_homepage(site=self.site).long_slug) except AttributeError: self.long_slug = None return None self.article = self.model.objects.filter( site=self.site, channel_long_slug=self.long_slug, slug=self.slug, date_available__lte=timezone.now(), published=True) return self.article
Python
0
4a6060f476aebac163dbac8f9822539596379c0a
Use current_app.babel_instance instead of babel
welt2000/__init__.py
welt2000/__init__.py
from flask import Flask, request, session, current_app from flask.ext.babel import Babel from babel.core import negotiate_locale from welt2000.__about__ import ( __title__, __summary__, __uri__, __version__, __author__, __email__, __license__, ) # noqa app = Flask(__name__) app.secret_key = '1234567890' babel = Babel(app) @app.template_global() @babel.localeselector def get_locale(): available = ['en'] available.extend(map(str, current_app.babel_instance.list_translations())) lang = session.get('lang') if lang and lang in available: return lang preferred = map(lambda l: l[0], request.accept_languages) return negotiate_locale(preferred, available) from welt2000 import views # noqa
from flask import Flask, request, session from flask.ext.babel import Babel from babel.core import negotiate_locale from welt2000.__about__ import ( __title__, __summary__, __uri__, __version__, __author__, __email__, __license__, ) # noqa app = Flask(__name__) app.secret_key = '1234567890' babel = Babel(app) translations = ['en'] translations.extend(map(str, babel.list_translations())) @app.template_global() @babel.localeselector def get_locale(): lang = session.get('lang') if lang and lang in translations: return lang preferred = map(lambda l: l[0], request.accept_languages) return negotiate_locale(preferred, translations) from welt2000 import views # noqa
Python
0.000018
58ec62fe47bf6e7acb3302a29fd0df48c4342cec
Enable break and continue in templates
logya/template.py
logya/template.py
# -*- coding: utf-8 -*- import io import os from jinja2 import Environment, BaseLoader, TemplateNotFound, escape def filesource(logya_inst, name, lines=None): """Read and return source of text files. A template function that reads the source of the given file and returns it. The text is escaped so it can be rendered safely on a Web page. The lines keyword argument is used to limit the number of lines returned. A use case is for documentation projects to show the source code used to render the current example. """ fname = os.path.join(logya_inst.dir_site, name) with io.open(fname, 'r', encoding='utf-8') as f: if lines is None: content = f.read() else: content = ''.join(f.readlines()[:lines]) return escape(content) def get_doc(logya_inst, url): """Get document located at given URL.""" return logya_inst.docs.get(url) class Template(): """Class to handle templates.""" def __init__(self, logya_inst): """Initialize template environment.""" self.vars = {} self.dir_templates = logya_inst.dir_templates self.env = Environment(loader=TemplateLoader(self.dir_templates)) # Enable break and continue in templates self.env.add_extension('jinja2.ext.loopcontrols') # self.env.trim_blocks = True # add filesource global to allow for including the source of a file self.env.globals['filesource'] = lambda x, lines=None: filesource( logya_inst, x, lines=lines) self.env.globals['get_doc'] = lambda x: get_doc(logya_inst, x) class TemplateLoader(BaseLoader): """Class to handle template Loading.""" def __init__(self, path): """Set template path.""" self.path = path def get_source(self, environment, template): """Set template source.""" path = os.path.join(self.path, template) if not os.path.exists(path): raise TemplateNotFound(template) mtime = os.path.getmtime(path) with io.open(path, 'r', encoding='utf-8') as f: source = f.read() return source, path, lambda: mtime == os.path.getmtime(path)
# -*- coding: utf-8 -*- import io import os from jinja2 import Environment, BaseLoader, TemplateNotFound, escape def filesource(logya_inst, name, lines=None): """Read and return source of text files. A template function that reads the source of the given file and returns it. The text is escaped so it can be rendered safely on a Web page. The lines keyword argument is used to limit the number of lines returned. A use case is for documentation projects to show the source code used to render the current example. """ fname = os.path.join(logya_inst.dir_site, name) with io.open(fname, 'r', encoding='utf-8') as f: if lines is None: content = f.read() else: content = ''.join(f.readlines()[:lines]) return escape(content) def get_doc(logya_inst, url): """Get document located at given URL.""" return logya_inst.docs.get(url) class Template(): """Class to handle templates.""" def __init__(self, logya_inst): """Initialize template environment.""" self.vars = {} self.dir_templates = logya_inst.dir_templates self.env = Environment(loader=TemplateLoader(self.dir_templates)) # self.env.trim_blocks = True # add filesource global to allow for including the source of a file self.env.globals['filesource'] = lambda x, lines=None: filesource( logya_inst, x, lines=lines) self.env.globals['get_doc'] = lambda x: get_doc(logya_inst, x) class TemplateLoader(BaseLoader): """Class to handle template Loading.""" def __init__(self, path): """Set template path.""" self.path = path def get_source(self, environment, template): """Set template source.""" path = os.path.join(self.path, template) if not os.path.exists(path): raise TemplateNotFound(template) mtime = os.path.getmtime(path) with io.open(path, 'r', encoding='utf-8') as f: source = f.read() return source, path, lambda: mtime == os.path.getmtime(path)
Python
0
6f0740fbd94acc2398f0628552a6329c2a90a348
Allow start and end arguments to take inputs of multiple words such as 'New York'
greengraph/command.py
greengraph/command.py
from argparse import ArgumentParser from matplotlib import pyplot as plt from graph import Greengraph def process(): parser = ArgumentParser( description="Produce graph quantifying the amount of green land between two locations") parser.add_argument("--start", required=True, nargs="+", help="The starting location ") parser.add_argument("--end", required=True, nargs="+", help="The ending location") parser.add_argument("--steps", help="The number of steps between the starting and ending locations, defaults to 10") parser.add_argument("--out", help="The output filename, defaults to graph.png") arguments = parser.parse_args() #mygraph = Greengraph(arguments.start, arguments.end) if arguments.steps: data = mygraph.green_between(arguments.steps) else: data = mygraph.green_between(10) plt.plot(data) # TODO add a title and axis labels to this graph if arguments.out: plt.savefig(arguments.out) else: plt.savefig("graph.png") print arguments.start print arguments.end if __name__ == "__main__": process()
from argparse import ArgumentParser from matplotlib import pyplot as plt from graph import Greengraph def process(): parser = ArgumentParser( description="Produce graph quantifying the amount of green land between two locations") parser.add_argument("--start", required=True, help="The starting location ") parser.add_argument("--end", required=True, help="The ending location") parser.add_argument("--steps", help="The number of steps between the starting and ending locations, defaults to 10") parser.add_argument("--out", help="The output filename, defaults to graph.png") arguments = parser.parse_args() mygraph = Greengraph(arguments.start, arguments.end) if arguments.steps: data = mygraph.green_between(arguments.steps) else: data = mygraph.green_between(10) plt.plot(data) # TODO add a title and axis labels to this graph if arguments.out: plt.savefig(arguments.out) else: plt.savefig("graph.png") if __name__ == "__main__": process()
Python
0.000065
75a47485629725f9035c7a4aa7c154ce30de3b5e
Add new allowed host
greenland/settings.py
greenland/settings.py
""" Django settings for greenland project. Generated by 'django-admin startproject' using Django 1.9.5. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os import dj_database_url # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'lc3^jbn=netrea_9o+1+gt-1@r#w$y758%&2%_d-=tg#o89r^x' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ['greenland.herokuapp.com'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'maps' ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', # 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'greenland.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'greenland.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases if 'PORT' in os.environ: DATABASES = {} DATABASES['default'] = dj_database_url.config(conn_max_age=600) else: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/'
""" Django settings for greenland project. Generated by 'django-admin startproject' using Django 1.9.5. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os import dj_database_url # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'lc3^jbn=netrea_9o+1+gt-1@r#w$y758%&2%_d-=tg#o89r^x' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'maps' ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', # 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'greenland.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'greenland.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases if 'PORT' in os.environ: DATABASES = {} DATABASES['default'] = dj_database_url.config(conn_max_age=600) else: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/'
Python
0
3fe0a520a458a575117fc8d809f21efd133d2887
Add license file
wikilink/__init__.py
wikilink/__init__.py
""" wiki-link ~~~~~~~~ wiki-link is a web-scraping application to find minimum number of links between two given wiki pages. :copyright: (c) 2016 - 2018 by Tran Ly VU. All Rights Reserved. :license: Apache License 2.0. """ __all__ = ["wiki_link"] __author__ = "Tran Ly Vu (vutransingapore@gmail.com)" __version__ = "1.0.0" __copyright__ = "Copyright (c) 2016 - 2018 Tran Ly Vu. All Rights Reserved." __license__ = "Apache License 2.0"
""" wiki-link ~~~~~~~~ wiki-link is a web-scraping application to find minimum number of links between two given wiki pages. :copyright: (c) 2016 - 2018 by Tran Ly VU. All Rights Reserved. :license: Apache License 2.0. """ __all__ = ["wiki_link"] __author__ = "Tran Ly Vu (vutransingapore@gmail.com)" __version__ = "1.0.0" __copyright__ = "Copyright (c) 2016 - 2018 Tran Ly Vu. All Rights Reserved." __license__ = "Apache License 2.0"
Python
0
7a9f3f6cc880d2bcf0cdac8b5193b471eb2b9095
Refactor Adapter pattern
structural/adapter.py
structural/adapter.py
""" Convert the interface of a class into another interface clients expect. Adapter lets classes work together that couldn't otherwise because of incompatible interfaces. """ import abc class Target(metaclass=abc.ABCMeta): """ Define the domain-specific interface that Client uses. """ def __init__(self, adaptee): self._adaptee = adaptee @abc.abstractmethod def request(self): pass class Adapter(Target): """ Adapt the interface of Adaptee to the Target interface. """ def request(self): self._adaptee.specific_request() class Adaptee: """ Define an existing interface that needs adapting. """ def specific_request(self): pass def main(): adaptee = Adaptee() adapter = Adapter(adaptee) adapter.request() if __name__ == "__main__": main()
""" Convert the interface of a class into another interface clients expect. Adapter lets classes work together that couldn't otherwise because of incompatible interfaces. """ import abc class Target(metaclass=abc.ABCMeta): """ Define the domain-specific interface that Client uses. """ def __init__(self): self._adaptee = Adaptee() @abc.abstractmethod def request(self): pass class Adapter(Target): """ Adapt the interface of Adaptee to the Target interface. """ def request(self): self._adaptee.specific_request() class Adaptee: """ Define an existing interface that needs adapting. """ def specific_request(self): pass def main(): adapter = Adapter() adapter.request() if __name__ == "__main__": main()
Python
0
bc40db9fa1c4663db604cb7890de10ef91d6a65e
Use correct name
haproxystats/metrics.py
haproxystats/metrics.py
""" haproxstats.metrics ~~~~~~~~~~~~~~~~~~ This module provides the field names contained in the HAProxy statistics. """ DAEMON_METRICS = [ 'CompressBpsIn', 'CompressBpsOut', 'CompressBpsRateLim', 'ConnRate', 'ConnRateLimit', 'CumConns', 'CumReq', 'CumSslConns', 'CurrConns', 'CurrSslConns', 'Hard_maxconn', 'MaxConnRate', 'MaxSessRate', 'MaxSslConns', 'MaxSslRate', 'MaxZlibMemUsage', 'Maxconn', 'Maxpipes', 'Maxsock', 'Memmax_MB', 'PipesFree', 'PipesUsed', 'Run_queue', 'SessRate', 'SessRateLimit', 'SslBackendKeyRate', 'SslBackendMaxKeyRate', 'SslCacheLookups', 'SslCacheMisses', 'SslFrontendKeyRate', 'SslFrontendMaxKeyRate', 'SslFrontendSessionReuse_pct', 'SslRate', 'SslRateLimit', 'Tasks', 'Ulimit-n', 'Uptime_sec', 'ZlibMemUsage', ] DAEMON_AVG_METRICS = ['Idle_pct'] COMMON = [ 'bin', 'bout', 'dresp', 'hrsp_1xx', 'hrsp_2xx', 'hrsp_3xx', 'hrsp_4xx', 'hrsp_5xx', 'hrsp_other', 'rate', 'rate_max', 'scur', 'smax', 'stot' ] SERVER_METRICS = [ 'chkfail', 'cli_abrt', 'econ', 'eresp', 'lbtot', 'qcur', 'qmax', 'srv_abrt', 'wredis', 'wretr' ] + COMMON SERVER_AVG_METRICS = ['qtime', 'rtime', 'throttle', 'ttime', 'weight'] BACKEND_METRICS = [ 'chkdown', 'cli_abrt', 'comp_byp', 'comp_in', 'comp_out', 'comp_rsp', 'downtime', 'dreq', 'econ', 'eresp', 'lbtot', 'qcur', 'qmax', 'slim', 'srv_abrt', 'wredis', 'wretr', ] + COMMON BACKEND_AVG_METRICS = [ 'act', 'bck', 'rtime', 'ctime', 'qtime', 'ttime', 'weight' ] FRONTEND_METRICS = [ 'comp_byp', 'comp_in', 'comp_out', 'comp_rsp', 'dreq', 'ereq', 'rate_lim', 'req_rate', 'req_rate_max', 'req_tot', 'slim' ] + COMMON
""" haproxstats.metrics ~~~~~~~~~~~~~~~~~~ This module provides the field names contained in the HAProxy statistics. """ DAEMON_METRICS = [ 'CompressBpsIn', 'CompressBpsOut', 'CompressBpsRateLim', 'ConnRate', 'ConnRateLimit', 'CumConns', 'CumReq', 'CumSslConns', 'CurrConns', 'CurrSslConns', 'Hard_maxcon', 'MaxConnRate', 'MaxSessRate', 'MaxSslConns', 'MaxSslRate', 'MaxZlibMemUsage', 'Maxconn', 'Maxpipes', 'Maxsock', 'Memmax_MB', 'PipesFree', 'PipesUsed', 'Run_queue', 'SessRate', 'SessRateLimit', 'SslBackendKeyRate', 'SslBackendMaxKeyRate', 'SslCacheLookups', 'SslCacheMisses', 'SslFrontendKeyRate', 'SslFrontendMaxKeyRate', 'SslFrontendSessionReuse_pct', 'SslRate', 'SslRateLimit', 'Tasks', 'Ulimit-n', 'Uptime_sec', 'ZlibMemUsage', ] DAEMON_AVG_METRICS = ['Idle_pct'] COMMON = [ 'bin', 'bout', 'dresp', 'hrsp_1xx', 'hrsp_2xx', 'hrsp_3xx', 'hrsp_4xx', 'hrsp_5xx', 'hrsp_other', 'rate', 'rate_max', 'scur', 'smax', 'stot' ] SERVER_METRICS = [ 'chkfail', 'cli_abrt', 'econ', 'eresp', 'lbtot', 'qcur', 'qmax', 'srv_abrt', 'wredis', 'wretr' ] + COMMON SERVER_AVG_METRICS = ['qtime', 'rtime', 'throttle', 'ttime', 'weight'] BACKEND_METRICS = [ 'chkdown', 'cli_abrt', 'comp_byp', 'comp_in', 'comp_out', 'comp_rsp', 'downtime', 'dreq', 'econ', 'eresp', 'lbtot', 'qcur', 'qmax', 'slim', 'srv_abrt', 'wredis', 'wretr', ] + COMMON BACKEND_AVG_METRICS = [ 'act', 'bck', 'rtime', 'ctime', 'qtime', 'ttime', 'weight' ] FRONTEND_METRICS = [ 'comp_byp', 'comp_in', 'comp_out', 'comp_rsp', 'dreq', 'ereq', 'rate_lim', 'req_rate', 'req_rate_max', 'req_tot', 'slim' ] + COMMON
Python
0
3c63201d6113d01c870748f21be2501282a2316a
Remove unneeded import in gmail.py.
paas_manager/app/util/gmail.py
paas_manager/app/util/gmail.py
import sys import smtplib from email.mime.text import MIMEText from email.utils import formatdate from ... import config def create_message(from_addr, to_addr, subject, message, encoding): body = MIMEText(message, 'plain', encoding) body['Subject'] = subject body['From'] = from_addr body['To'] = to_addr body['Date'] = formatdate() return body def send_via_gmail(from_addr, to_addr, body): s = smtplib.SMTP('smtp.gmail.com', 587) s.ehlo() s.starttls() s.ehlo() s.login( config['gmail']['user'], config['gmail']['password']) s.sendmail(from_addr, [to_addr], body.as_string()) s.close() def gmail(message, to_addr): body = create_message( config['gmail']['user'], to_addr, '[Notification]', message, 'utf8') send_via_gmail(config['gmail']['user'], to_addr, body) return if __name__ == '__main__': argvs = sys.argv argc = len(argvs) if (argc < 3): print('USAGE: python gmail.py address message') raise SystemExit(0) else: to_addr = argvs[1] message = argvs[2] gmail(message, to_addr)
import sys import smtplib from email.mime.text import MIMEText from email.utils import formatdate import yaml from ... import config def create_message(from_addr, to_addr, subject, message, encoding): body = MIMEText(message, 'plain', encoding) body['Subject'] = subject body['From'] = from_addr body['To'] = to_addr body['Date'] = formatdate() return body def send_via_gmail(from_addr, to_addr, body): s = smtplib.SMTP('smtp.gmail.com', 587) s.ehlo() s.starttls() s.ehlo() s.login( config['gmail']['user'], config['gmail']['password']) s.sendmail(from_addr, [to_addr], body.as_string()) s.close() def gmail(message, to_addr): body = create_message( config['gmail']['user'], to_addr, '[Notification]', message, 'utf8') send_via_gmail(config['gmail']['user'], to_addr, body) return if __name__ == '__main__': argvs = sys.argv argc = len(argvs) if (argc < 3): print('USAGE: python gmail.py address message') raise SystemExit(0) else: to_addr = argvs[1] message = argvs[2] gmail(message, to_addr)
Python
0
4588a52ebfc3aee127a34a9e10067c0121c4f72e
add 'tab' and 'shift tab' for down/up movement
subiquity/ui/frame.py
subiquity/ui/frame.py
# Copyright 2015 Canonical, Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Base Frame Widget """ from urwid import Frame, WidgetWrap from subiquity.ui.anchors import Header, Footer, Body import logging log = logging.getLogger('subiquity.ui.frame') class SubiquityUI(WidgetWrap): key_conversion_map = {'tab': 'down', 'shift tab': 'up'} def __init__(self, header=None, body=None, footer=None): self.header = header if header else Header() self.body = body if body else Body() self.footer = footer if footer else Footer() self.frame = Frame(self.body, header=self.header, footer=self.footer) super().__init__(self.frame) def keypress(self, size, key): key = self.key_conversion_map.get(key, key) return super().keypress(size, key) def set_header(self, title, excerpt): self.frame.header = Header(title, excerpt) def set_footer(self, message): self.frame.footer = Footer(message) def set_body(self, widget): self.frame.body = widget
# Copyright 2015 Canonical, Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Base Frame Widget """ from urwid import Frame, WidgetWrap from subiquity.ui.anchors import Header, Footer, Body import logging log = logging.getLogger('subiquity.ui.frame') class SubiquityUI(WidgetWrap): def __init__(self, header=None, body=None, footer=None): self.header = header if header else Header() self.body = body if body else Body() self.footer = footer if footer else Footer() self.frame = Frame(self.body, header=self.header, footer=self.footer) super().__init__(self.frame) def set_header(self, title, excerpt): self.frame.header = Header(title, excerpt) def set_footer(self, message): self.frame.footer = Footer(message) def set_body(self, widget): self.frame.body = widget
Python
0
791fb484937cabeb3a098bcd173db782efe53d7c
support filtering of Authors by organization and positions
authors/views.py
authors/views.py
from rest_framework import viewsets, permissions from . import serializers from . import models class AuthorViewSet(viewsets.ModelViewSet): permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,) queryset = models.Author.objects.all() serializer_class = serializers.AuthorSerializer filter_fields = ('organization', 'positions') search_fields = ('first_name', 'last_name', 'organization', 'title', 'email', 'twitter', 'bio') ordering_fields = "__all__" class OrganizationViewSet(viewsets.ModelViewSet): permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,) queryset = models.Organization.objects.all() serializer_class = serializers.OrganizationSerializer filter_fields = () search_fields = ('name',) ordering_fields = "__all__" class PositionViewSet(viewsets.ModelViewSet): permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,) queryset = models.Position.objects.all() serializer_class = serializers.PositionSerializer filter_fields = () search_fields = ('name', 'description') ordering_fields = "__all__"
from rest_framework import viewsets, permissions from . import serializers from . import models class AuthorViewSet(viewsets.ModelViewSet): permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,) queryset = models.Author.objects.all() serializer_class = serializers.AuthorSerializer filter_fields = () search_fields = ('first_name', 'last_name', 'organization', 'title', 'email', 'twitter', 'bio') ordering_fields = "__all__" class OrganizationViewSet(viewsets.ModelViewSet): permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,) queryset = models.Organization.objects.all() serializer_class = serializers.OrganizationSerializer filter_fields = () search_fields = ('name',) ordering_fields = "__all__" class PositionViewSet(viewsets.ModelViewSet): permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,) queryset = models.Position.objects.all() serializer_class = serializers.PositionSerializer filter_fields = () search_fields = ('name', 'description') ordering_fields = "__all__"
Python
0
8aa52ea8f07f922bc6d5952ca8ad56bedd042a1f
Bump version number.
nativeconfig/version.py
nativeconfig/version.py
VERSION = '2.4.0'
VERSION = '2.3.0'
Python
0
fb223397ccdee519af7e17dc73db864fe0120e8b
Create a random HDFS folder for unit testing
fs/tests/test_hadoop.py
fs/tests/test_hadoop.py
""" fs.tests.test_hadoop: TestCases for the HDFS Hadoop Filesystem This test suite is skipped unless the following environment variables are configured with valid values. * PYFS_HADOOP_NAMENODE_ADDR * PYFS_HADOOP_NAMENODE_PORT [default=50070] * PYFS_HADOOP_NAMENODE_PATH [default="/"] All tests will be executed within a subdirectory "pyfs-hadoop" for safety. """ import os import unittest import uuid from fs.tests import FSTestCases, ThreadingTestCases from fs.path import * try: from fs import hadoop except ImportError: raise unittest.SkipTest("hadoop fs wasn't importable") class TestHadoopFS(unittest.TestCase, FSTestCases, ThreadingTestCases): def __init__(self, *args, **kwargs): self.namenode_host = os.environ.get("PYFS_HADOOP_NAMENODE_ADDR") self.namenode_port = os.environ.get("PYFS_HADOOP_NAMENODE_PORT", "50070") self.base_path = os.path.join( os.environ.get("PYFS_HADOOP_NAMENODE_PATH", "/"), "pyfstest-" + str(uuid.uuid4()) ) super(TestHadoopFS, self).__init__(*args, **kwargs) def setUp(self): if not self.namenode_host: raise unittest.SkipTest("Skipping HDFS tests (missing config)") self.fs = hadoop.HadoopFS( namenode=self.namenode_host, port=self.namenode_port, base=self.base_path ) def tearDown(self): for dir_path in self.fs.ilistdir(dirs_only=True): if dir_path == "/": continue self.fs.removedir(dir_path, recursive=False, force=True) for file_path in self.fs.ilistdir(files_only=True): self.fs.remove(file_path) self.fs.close() @unittest.skip("HadoopFS does not support seek") def test_readwriteappendseek(self): pass @unittest.skip("HadoopFS does not support truncate") def test_truncate(self): pass @unittest.skip("HadoopFS does not support truncate") def test_truncate_to_larger_size(self): pass @unittest.skip("HadoopFS does not support seek") def test_write_past_end_of_file(self): pass
""" fs.tests.test_hadoop: TestCases for the HDFS Hadoop Filesystem This test suite is skipped unless the following environment variables are configured with valid values. * PYFS_HADOOP_NAMENODE_ADDR * PYFS_HADOOP_NAMENODE_PORT [default=50070] * PYFS_HADOOP_NAMENODE_PATH [default="/"] All tests will be executed within a subdirectory "pyfs-hadoop" for safety. """ import os import unittest from fs.tests import FSTestCases, ThreadingTestCases from fs.path import * try: from fs import hadoop except ImportError: raise unittest.SkipTest("hadoop fs wasn't importable") class TestHadoopFS(unittest.TestCase, FSTestCases, ThreadingTestCases): def setUp(self): namenode_host = os.environ.get("PYFS_HADOOP_NAMENODE_ADDR") namenode_port = os.environ.get("PYFS_HADOOP_NAMENODE_PORT", "50070") base_path = os.environ.get("PYFS_HADOOP_NAMENODE_PATH", "/") if not namenode_host or not namenode_port or not base_path: raise unittest.SkipTest("Skipping HDFS tests due to lack of config") self.fs = hadoop.HadoopFS( namenode=namenode_host, port=namenode_port, base=base_path ) def tearDown(self): for dir_path in self.fs.ilistdir(dirs_only=True): if dir_path == "/": continue self.fs.removedir(dir_path, recursive=False, force=True) for file_path in self.fs.ilistdir(files_only=True): self.fs.remove(file_path) self.fs.close() @unittest.skip("HadoopFS does not support seek") def test_readwriteappendseek(self): pass @unittest.skip("HadoopFS does not support truncate") def test_truncate(self): pass @unittest.skip("HadoopFS does not support truncate") def test_truncate_to_larger_size(self): pass @unittest.skip("HadoopFS does not support seek") def test_write_past_end_of_file(self): pass
Python
0
db33f2d1e14c48cd2c73ae3e3c835fac54f39224
lower bool priority, raise int priority
sympy/core/sympify.py
sympy/core/sympify.py
"""sympify -- convert objects SymPy internal format""" # from basic import Basic, BasicType, S # from numbers import Integer, Real # from interval import Interval import decimal class SympifyError(ValueError): def __init__(self, expr, base_exc=None): self.expr = expr self.base_exc = base_exc def __str__(self): if self.base_exc is None: return "SympifyError: %s" % (self.expr,) return "Sympify of expression '%s' failed, because of exception being raised:\n%s: %s" % (self.expr, self.base_exc.__class__.__name__, str(self.base_exc)) def sympify(a, sympify_lists=False, locals= {}): """Converts an arbitrary expression to a type that can be used inside sympy. For example, it will convert python int's into instance of sympy.Rational, floats into intances of sympy.Real, etc. It is also able to coerce symbolic expressions which does inherit after Basic. This can be useful in cooperation with SAGE. It currently accepts as arguments: - any object defined in sympy (except maybe matrices [TODO]) - standard numeric python types: int, long, float, Decimal - strings (like "0.09" or "2e-19") If sympify_lists is set to True then sympify will also accept lists, tuples and sets. It will return the same type but with all of the entries sympified. If the argument is already a type that sympy understands, it will do nothing but return that value. This can be used at the begining of a function to ensure you are working with the correct type. >>> from sympy import * >>> sympify(2).is_integer True >>> sympify(2).is_real True >>> sympify(2.0).is_real True >>> sympify("2.0").is_real True >>> sympify("2e-45").is_real True """ if isinstance(a, Basic): return a if isinstance(a, BasicType): return a elif isinstance(a, (int, long)): return Integer(a) elif isinstance(a, (float, decimal.Decimal)): return Real(a) elif isinstance(a, complex): real, imag = map(sympify, (a.real, a.imag)) ireal, iimag = int(real), int(imag) if ireal + iimag*1j == a: return ireal + iimag*S.ImaginaryUnit return real + S.ImaginaryUnit * imag elif isinstance(a, bool): raise NotImplementedError("bool support") elif (a.__class__ in [list,tuple]) and len(a) == 2: # isinstance causes problems in the issue #432, so we use .__class__ return Interval(*a) elif isinstance(a, (list,tuple,set)) and sympify_lists: return type(a)([sympify(x, True) for x in a]) elif hasattr(a, "_sympy_"): # the "a" implements _sympy_() method, that returns a SymPy # expression (by definition), so we just use it return a._sympy_() else: # XXX this is here because of cyclic-import issues from sympy.matrices import Matrix from sympy.polynomials import Polynomial if isinstance(a, Polynomial): return a if isinstance(a, Matrix): raise NotImplementedError('matrix support') if not isinstance(a, str): # At this point we were given an arbitrary expression # which does not inherit from Basic and doesn't implement # _sympy_ (which is a canonical and robust way to convert # anything to SymPy expression). # # As a last chance, we try to take "a"'s normal form via str() # and try to parse it. If it fails, then we have no luck and # return an exception a = str(a) try: import ast_parser return ast_parser.SymPyParser(local_dict=locals).parse_expr(a) except Exception, exc: raise SympifyError(a, exc) raise SympifyError("%r is NOT a valid SymPy expression" % a)
"""sympify -- convert objects SymPy internal format""" # from basic import Basic, BasicType, S # from numbers import Integer, Real # from interval import Interval import decimal class SympifyError(ValueError): def __init__(self, expr, base_exc=None): self.expr = expr self.base_exc = base_exc def __str__(self): if self.base_exc is None: return "SympifyError: %s" % (self.expr,) return "Sympify of expression '%s' failed, because of exception being raised:\n%s: %s" % (self.expr, self.base_exc.__class__.__name__, str(self.base_exc)) def sympify(a, sympify_lists=False, locals= {}): """Converts an arbitrary expression to a type that can be used inside sympy. For example, it will convert python int's into instance of sympy.Rational, floats into intances of sympy.Real, etc. It is also able to coerce symbolic expressions which does inherit after Basic. This can be useful in cooperation with SAGE. It currently accepts as arguments: - any object defined in sympy (except maybe matrices [TODO]) - standard numeric python types: int, long, float, Decimal - strings (like "0.09" or "2e-19") If sympify_lists is set to True then sympify will also accept lists, tuples and sets. It will return the same type but with all of the entries sympified. If the argument is already a type that sympy understands, it will do nothing but return that value. This can be used at the begining of a function to ensure you are working with the correct type. >>> from sympy import * >>> sympify(2).is_integer True >>> sympify(2).is_real True >>> sympify(2.0).is_real True >>> sympify("2.0").is_real True >>> sympify("2e-45").is_real True """ if isinstance(a, Basic): return a if isinstance(a, BasicType): return a elif isinstance(a, bool): raise NotImplementedError("bool support") elif isinstance(a, (int, long)): return Integer(a) elif isinstance(a, (float, decimal.Decimal)): return Real(a) elif isinstance(a, complex): real, imag = map(sympify, (a.real, a.imag)) ireal, iimag = int(real), int(imag) if ireal + iimag*1j == a: return ireal + iimag*S.ImaginaryUnit return real + S.ImaginaryUnit * imag elif (a.__class__ in [list,tuple]) and len(a) == 2: # isinstance causes problems in the issue #432, so we use .__class__ return Interval(*a) elif isinstance(a, (list,tuple,set)) and sympify_lists: return type(a)([sympify(x, True) for x in a]) elif hasattr(a, "_sympy_"): # the "a" implements _sympy_() method, that returns a SymPy # expression (by definition), so we just use it return a._sympy_() else: # XXX this is here because of cyclic-import issues from sympy.matrices import Matrix from sympy.polynomials import Polynomial if isinstance(a, Polynomial): return a if isinstance(a, Matrix): raise NotImplementedError('matrix support') if not isinstance(a, str): # At this point we were given an arbitrary expression # which does not inherit from Basic and doesn't implement # _sympy_ (which is a canonical and robust way to convert # anything to SymPy expression). # # As a last chance, we try to take "a"'s normal form via str() # and try to parse it. If it fails, then we have no luck and # return an exception a = str(a) try: import ast_parser return ast_parser.SymPyParser(local_dict=locals).parse_expr(a) except Exception, exc: raise SympifyError(a, exc) raise SympifyError("%r is NOT a valid SymPy expression" % a)
Python
0.999987
926bf60c77673571cb8f6d12e3754507f41b9e80
add optional args
ngage/plugins/napalm.py
ngage/plugins/napalm.py
from __future__ import absolute_import import ngage from ngage.exceptions import AuthenticationError, ConfigError import napalm_base from napalm_base.exceptions import ( ConnectionException, ReplaceConfigException, MergeConfigException ) @ngage.plugin.register('napalm') class Driver(ngage.plugins.DriverPlugin): plugin_type = 'napalm' def _do_init(self): config = self.config self.host = config.get('host') self.user = config.get('user') self.password = config.get('password') self.optional_args = config.get('driver_args', {}) if ':' not in config['type']: raise ValueError('napalm requires a subtype') driver = config['type'].split(':', 2)[1] cls = napalm_base.get_network_driver(driver) self.dev = cls(self.host, self.user, self.password, optional_args=self.optional_args) def _do_open(self): try: self.dev.open() except ConnectionException: raise AuthenticationError def _do_close(self): self.dev.close() def _do_pull(self): if not hasattr(self.dev, 'get_config'): raise NotImplementedError('get_config not implemented, please update napalm') return self.dev.get_config(retrieve='candidate')['candidate'] def _do_push(self, fname, **kwargs): try: self.dev.load_merge_candidate(filename=fname) except (MergeConfigException, ReplaceConfigException) as e: raise ConfigError(e.message) def _do_diff(self, index=0): if index != 0: raise NotImplementedError('version index not implemented') return self.dev.compare_config() def _do_lock(self): self.dev.lock() def _do_unlock(self): self.dev.unlock() def _do_commit(self, **kwargs): self.dev.commit_config() # def _do_check(self): # not impl by napalm def _do_rollback(self, index=0): if index == 0: self.dev.discard_config() elif index == 1: self.dev.rollback() else: raise NotImplementedError('version index not implemented')
from __future__ import absolute_import import ngage from ngage.exceptions import AuthenticationError, ConfigError import napalm_base from napalm_base.exceptions import ( ConnectionException, ReplaceConfigException, MergeConfigException ) @ngage.plugin.register('napalm') class Driver(ngage.plugins.DriverPlugin): plugin_type = 'napalm' def _do_init(self): config = self.config self.host = config.get('host') self.user = config.get('user') self.password = config.get('password') if ':' not in config['type']: raise ValueError('napalm requires a subtype') (na, driver) = config['type'].split(':', 2) cls = napalm_base.get_network_driver(driver) self.dev = cls(self.host, self.user, self.password) def _do_open(self): try: self.dev.open() except ConnectionException: raise AuthenticationError def _do_close(self): self.dev.close() def _do_pull(self): if not hasattr(self.dev, 'get_config'): raise NotImplementedError('get_config not implemented, please update napalm') return self.dev.get_config(retrieve='candidate')['candidate'] def _do_push(self, fname, **kwargs): try: self.dev.load_merge_candidate(filename=fname) except (MergeConfigException, ReplaceConfigException) as e: raise ConfigError(e.message) def _do_diff(self, index=0): if index != 0: raise NotImplementedError('version index not implemented') return self.dev.compare_config() def _do_lock(self): self.dev.lock() def _do_unlock(self): self.dev.unlock() def _do_commit(self, **kwargs): self.dev.commit_config() # def _do_check(self): # not impl by napalm def _do_rollback(self, index=0): if index == 0: self.dev.discard_config() elif index == 1: self.dev.rollback() else: raise NotImplementedError('version index not implemented')
Python
0.000001
68cf8281b512ea5941ec0b88ca532409e0e97866
Fix circular import
app/evaluation/emails.py
app/evaluation/emails.py
import json from django.conf import settings from django.core.mail import send_mail from comicsite.core.urlresolvers import reverse def send_failed_job_email(job): message = ( f'Unfortunately the evaluation for the submission to ' f'{job.challenge.short_name} failed with an error. The error message ' f'is:\n\n' f'{job.output}\n\n' f'You may wish to try and correct this, or contact the challenge ' f'organizers. The following information may help them:\n' f'User: {job.submission.creator.username}\n' f'Job ID: {job.pk}\n' f'Submission ID: {job.submission.pk}' ) recipient_list = [o.email for o in job.challenge.get_admins()] recipient_list.append(job.submission.creator.email) for r in recipient_list: send_mail( subject='Evaluation Failed', message=message, from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[r.email], ) def send_new_result_email(result): recipient_list = [o.email for o in result.challenge.get_admins()] message = ( f'There is a new result for {result.challenge.short_name} from ' f'{result.job.submission.creator.username}. The following metrics ' f'were calculated:\n\n' f'{json.dumps(result.metrics, indent=2)}\n\n' ) if result.public: leaderboard_url = reverse( 'evaluation:results-list', kwargs={ 'challenge_short_name': result.challenge.short_name, } ) message += ( f'You can view the result on the leaderboard here: ' f'{leaderboard_url}' ) recipient_list.append(result.job.submission.creator.email) else: message += ( f'You can publish the result on the leaderboard here: ' f'{result.get_absolute_url()}' ) for r in recipient_list: send_mail( subject=f'New Result for {result.challenge.short_name}', message=message, from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[r.email], )
import json from django.conf import settings from django.core.mail import send_mail from comicsite.core.urlresolvers import reverse from evaluation.models import Result, Job def send_failed_job_email(job: Job): message = ( f'Unfortunately the evaluation for the submission to ' f'{job.challenge.short_name} failed with an error. The error message ' f'is:\n\n' f'{job.output}\n\n' f'You may wish to try and correct this, or contact the challenge ' f'organizers. The following information may help them:\n' f'User: {job.submission.creator.username}\n' f'Job ID: {job.pk}\n' f'Submission ID: {job.submission.pk}' ) recipient_list = [o.email for o in job.challenge.get_admins()] recipient_list.append(job.submission.creator.email) for r in recipient_list: send_mail( subject='Evaluation Failed', message=message, from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[r.email], ) def send_new_result_email(result: Result): recipient_list = [o.email for o in result.challenge.get_admins()] message = ( f'There is a new result for {result.challenge.short_name} from ' f'{result.job.submission.creator.username}. The following metrics ' f'were calculated:\n\n' f'{json.dumps(result.metrics, indent=2)}\n\n' ) if result.public: leaderboard_url = reverse( 'evaluation:results-list', kwargs={ 'challenge_short_name': result.challenge.short_name, } ) message += ( f'You can view the result on the leaderboard here: ' f'{leaderboard_url}' ) recipient_list.append(result.job.submission.creator.email) else: message += ( f'You can publish the result on the leaderboard here: ' f'{result.get_absolute_url()}' ) for r in recipient_list: send_mail( subject=f'New Result for {result.challenge.short_name}', message=message, from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[r.email], )
Python
0.000005
04aa968a70b8065c9c9cd013d1266f8988c4220a
remove accidentally committed maxDiff change
tests/__init__.py
tests/__init__.py
import os import unittest import pytest class ScraperTest(unittest.TestCase): online = False test_file_name = None def setUp(self): os.environ[ "RECIPE_SCRAPERS_SETTINGS" ] = "tests.test_data.test_settings_module.test_settings" test_file_name = ( self.test_file_name if self.test_file_name else self.scraper_class.__name__.lower() ) with open( "tests/test_data/{}.testhtml".format(test_file_name), encoding="utf-8" ) as testfile: self.harvester_class = self.scraper_class(testfile) canonical_url = self.harvester_class.canonical_url() if self.online: if not canonical_url: pytest.skip( f"could not find canonical url for online test of scraper '{self.scraper_class.__name__}'" ) self.harvester_class = self.scraper_class(url=canonical_url)
import os import unittest import pytest class ScraperTest(unittest.TestCase): maxDiff = None online = False test_file_name = None def setUp(self): os.environ[ "RECIPE_SCRAPERS_SETTINGS" ] = "tests.test_data.test_settings_module.test_settings" test_file_name = ( self.test_file_name if self.test_file_name else self.scraper_class.__name__.lower() ) with open( "tests/test_data/{}.testhtml".format(test_file_name), encoding="utf-8" ) as testfile: self.harvester_class = self.scraper_class(testfile) canonical_url = self.harvester_class.canonical_url() if self.online: if not canonical_url: pytest.skip( f"could not find canonical url for online test of scraper '{self.scraper_class.__name__}'" ) self.harvester_class = self.scraper_class(url=canonical_url)
Python
0
cc0521c2f72c534e2fa94573f90e9ec2bb169405
use utc time for timestamps
database.py
database.py
import os.path from datetime import datetime from collections import defaultdict from flask import json from flaskext.sqlalchemy import SQLAlchemy import logging log = logging.getLogger(__name__) log.setLevel(logging.INFO) db = SQLAlchemy() class User(db.Model): id = db.Column(db.Integer, primary_key=True) openid_url = db.Column(db.Text()) name = db.Column(db.Text()) email = db.Column(db.Text()) class Person(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.Text()) def get_content(self): version = self.versions.order_by(ContentVersion.time.desc()).first() return {} if version is None else json.loads(version.content) def save_content_version(self, new_content, user): utcnow = datetime.utcnow() version = ContentVersion(person=self, user=user, time=utcnow) version.content = json.dumps(new_content) db.session.add(version) db.session.commit() log.info("Content update for person id=%d version_id=%d", self.id, version.id) class ContentVersion(db.Model): id = db.Column(db.Integer, primary_key=True) person_id = db.Column(db.Integer, db.ForeignKey('person.id')) person = db.relationship('Person', backref=db.backref('versions', lazy='dynamic')) content = db.Column(db.LargeBinary) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) user = db.relationship('User') time = db.Column(db.DateTime) def get_persons(): results = {} for person in Person.query.all(): results[person.id] = dict(person.get_content(), name=person.name) return results def import_json(json_path): utcnow = datetime.utcnow() count = defaultdict(int) with open(json_path, 'rb') as f: people_data = json.load(f) for person_data in people_data: found_persons = Person.query.filter_by(name=person_data['name']).all() if found_persons: assert len(found_persons) == 1 person = found_persons[0] else: person = Person(name=person_data['name']) db.session.add(person) log.info('New person %r, id=%d', person_data['name'], person.id) count['new-person'] += 1 emails = person_data['emails'] if emails: content = {'email': emails} if content != person.get_content(): version = ContentVersion(person=person, time=utcnow) version.content = json.dumps(content) db.session.add(version) log.info('Content update for person id=%d', person.id) count['new-version'] += 1 db.session.commit() if count: log.info("JSON import from %r completed; %r", json_path, dict(count)) def get_user(openid_url): return User.query.filter_by(openid_url=openid_url).first() def get_update_user(openid_url, name, email): user = get_user(openid_url) if user is None: user = User(openid_url=openid_url) log.info("New user, openid_url=%r", openid_url) if (name, email) != (user.name, user.email): user.name = name user.email = email db.session.add(user) db.session.commit() log.info("User data modified for openid_url=%r", openid_url) return user
import os.path from datetime import datetime from collections import defaultdict from flask import json from flaskext.sqlalchemy import SQLAlchemy import logging log = logging.getLogger(__name__) log.setLevel(logging.INFO) db = SQLAlchemy() class User(db.Model): id = db.Column(db.Integer, primary_key=True) openid_url = db.Column(db.Text()) name = db.Column(db.Text()) email = db.Column(db.Text()) class Person(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.Text()) def get_content(self): version = self.versions.order_by(ContentVersion.time.desc()).first() return {} if version is None else json.loads(version.content) def save_content_version(self, new_content, user): now = datetime.now() version = ContentVersion(person=self, user=user, time=now) version.content = json.dumps(new_content) db.session.add(version) db.session.commit() log.info("Content update for person id=%d version_id=%d", self.id, version.id) class ContentVersion(db.Model): id = db.Column(db.Integer, primary_key=True) person_id = db.Column(db.Integer, db.ForeignKey('person.id')) person = db.relationship('Person', backref=db.backref('versions', lazy='dynamic')) content = db.Column(db.LargeBinary) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) user = db.relationship('User') time = db.Column(db.DateTime) def get_persons(): results = {} for person in Person.query.all(): results[person.id] = dict(person.get_content(), name=person.name) return results def import_json(json_path): now = datetime.now() count = defaultdict(int) with open(json_path, 'rb') as f: people_data = json.load(f) for person_data in people_data: found_persons = Person.query.filter_by(name=person_data['name']).all() if found_persons: assert len(found_persons) == 1 person = found_persons[0] else: person = Person(name=person_data['name']) db.session.add(person) log.info('New person %r, id=%d', person_data['name'], person.id) count['new-person'] += 1 emails = person_data['emails'] if emails: content = {'email': emails} if content != person.get_content(): version = ContentVersion(person=person, time=now) version.content = json.dumps(content) db.session.add(version) log.info('Content update for person id=%d', person.id) count['new-version'] += 1 db.session.commit() if count: log.info("JSON import from %r completed; %r", json_path, dict(count)) def get_user(openid_url): return User.query.filter_by(openid_url=openid_url).first() def get_update_user(openid_url, name, email): user = get_user(openid_url) if user is None: user = User(openid_url=openid_url) log.info("New user, openid_url=%r", openid_url) if (name, email) != (user.name, user.email): user.name = name user.email = email db.session.add(user) db.session.commit() log.info("User data modified for openid_url=%r", openid_url) return user
Python
0.000105
c72b28ece7fe5313c7eff5f26d9ef0baaad1bad2
Update denormalization command
project/apps/api/management/commands/denormalize.py
project/apps/api/management/commands/denormalize.py
from django.core.management.base import ( BaseCommand, ) from apps.api.models import ( Convention, Contest, Award, Contestant, Entrant, Session, Performance, Song, Singer, Director, Panelist, ) class Command(BaseCommand): help = "Command to denormailze data." def handle(self, *args, **options): vs = Convention.objects.all() for v in vs: v.save() ts = Contest.objects.all() for t in ts: t.save() ps = Panelist.objects.all() for p in ps: p.save() ws = Award.objects.all() for w in ws: w.save() es = Entrant.objects.all() for e in es: e.save() cs = Contestant.objects.all() for c in cs: c.save() ss = Session.objects.all() for s in ss: s.save() as_ = Performance.objects.all() for a in as_: a.save() ps = Song.objects.all() for p in ps: p.save() ss = Singer.objects.all() for s in ss: s.save() js = Panelist.objects.all() for j in js: j.save() ds = Director.objects.all() for d in ds: d.save() return "Done"
from django.core.management.base import ( BaseCommand, ) from apps.api.models import ( Convention, Contest, Contestant, Performance, Song, Group, Singer, Director, Panelist, ) class Command(BaseCommand): help = "Command to denormailze data." def handle(self, *args, **options): vs = Convention.objects.all() for v in vs: v.save() ts = Contest.objects.all() for t in ts: t.save() cs = Contestant.objects.all() for c in cs: c.save() as_ = Performance.objects.all() for a in as_: a.save() ps = Song.objects.all() for p in ps: p.save() ss = Singer.objects.all() for s in ss: s.save() js = Panelist.objects.all() for j in js: j.save() ds = Director.objects.all() for d in ds: d.save() return "Done"
Python
0.000004
74c4c832b5f99643ac23ad3885f22f7a493016f7
Update denormalization command
project/apps/api/management/commands/denormalize.py
project/apps/api/management/commands/denormalize.py
from django.core.management.base import ( BaseCommand, ) from apps.api.models import ( Convention, Contest, Award, Contestant, Entrant, Session, Performance, Song, Singer, Director, Panelist, ) class Command(BaseCommand): help = "Command to denormailze data." def handle(self, *args, **options): vs = Convention.objects.all() for v in vs: v.save() ts = Contest.objects.all() for t in ts: t.save() ps = Panelist.objects.all() for p in ps: p.save() ws = Award.objects.all() for w in ws: w.save() es = Entrant.objects.all() for e in es: e.save() cs = Contestant.objects.all() for c in cs: c.save() ss = Session.objects.all() for s in ss: s.save() as_ = Performance.objects.all() for a in as_: a.save() ps = Song.objects.all() for p in ps: p.save() ss = Singer.objects.all() for s in ss: s.save() js = Panelist.objects.all() for j in js: j.save() ds = Director.objects.all() for d in ds: d.save() return "Done"
from django.core.management.base import ( BaseCommand, ) from apps.api.models import ( Convention, Contest, Contestant, Performance, Song, Group, Singer, Director, Panelist, ) class Command(BaseCommand): help = "Command to denormailze data." def handle(self, *args, **options): vs = Convention.objects.all() for v in vs: v.save() ts = Contest.objects.all() for t in ts: t.save() cs = Contestant.objects.all() for c in cs: c.save() as_ = Performance.objects.all() for a in as_: a.save() ps = Song.objects.all() for p in ps: p.save() ss = Singer.objects.all() for s in ss: s.save() js = Panelist.objects.all() for j in js: j.save() ds = Director.objects.all() for d in ds: d.save() return "Done"
Python
0.000004
7ebf1beec0912273317ed094e1c3806b2e910600
Remove commented lines
mbtiles/worker.py
mbtiles/worker.py
"""rio-mbtiles processing worker""" import logging import warnings from rasterio.enums import Resampling from rasterio.io import MemoryFile from rasterio.transform import from_bounds as transform_from_bounds from rasterio.warp import reproject, transform_bounds from rasterio.windows import Window from rasterio.windows import from_bounds as window_from_bounds import mercantile import rasterio TILES_CRS = "EPSG:3857" log = logging.getLogger(__name__) def init_worker(path, profile, resampling_method, open_opts, warp_opts): global base_kwds, filename, resampling, open_options, warp_options resampling = Resampling[resampling_method] base_kwds = profile.copy() filename = path open_options = open_opts.copy() if open_opts is not None else {} warp_options = warp_opts.copy() if warp_opts is not None else {} def process_tile(tile): """Process a single MBTiles tile Parameters ---------- tile : mercantile.Tile warp_options : Mapping GDAL warp options as keyword arguments. Returns ------- tile : mercantile.Tile The input tile. bytes : bytearray Image bytes corresponding to the tile. """ global base_kwds, resampling, filename, open_options, warp_options with rasterio.open(filename, **open_options) as src: # Get the bounds of the tile. ulx, uly = mercantile.xy(*mercantile.ul(tile.x, tile.y, tile.z)) lrx, lry = mercantile.xy(*mercantile.ul(tile.x + 1, tile.y + 1, tile.z)) kwds = base_kwds.copy() kwds["transform"] = transform_from_bounds( ulx, lry, lrx, uly, kwds["width"], kwds["height"] ) src_nodata = kwds.pop("src_nodata", None) dst_nodata = kwds.pop("dst_nodata", None) warnings.simplefilter("ignore") log.info("Reprojecting tile: tile=%r", tile) with MemoryFile() as memfile: with memfile.open(**kwds) as tmp: # determine window of source raster corresponding to the tile # image, with small buffer at edges try: west, south, east, north = transform_bounds( TILES_CRS, src.crs, ulx, lry, lrx, uly ) tile_window = window_from_bounds( west, south, east, north, transform=src.transform ) adjusted_tile_window = Window( tile_window.col_off - 1, tile_window.row_off - 1, tile_window.width + 2, tile_window.height + 2, ) tile_window = adjusted_tile_window.round_offsets().round_shape() # if no data in window, skip processing the tile if not src.read_masks(1, window=tile_window).any(): return tile, None except ValueError: log.info( "Tile %r will not be skipped, even if empty. This is harmless.", tile, ) num_threads = int(warp_options.pop("num_threads", 2)) reproject( rasterio.band(src, tmp.indexes), rasterio.band(tmp, tmp.indexes), src_nodata=src_nodata, dst_nodata=dst_nodata, num_threads=num_threads, resampling=resampling, **warp_options ) return tile, memfile.read()
"""rio-mbtiles processing worker""" import logging import warnings from rasterio.enums import Resampling from rasterio.io import MemoryFile from rasterio.transform import from_bounds as transform_from_bounds from rasterio.warp import reproject, transform_bounds from rasterio.windows import Window from rasterio.windows import from_bounds as window_from_bounds import mercantile import rasterio # base_kwds = None # src = None TILES_CRS = "EPSG:3857" log = logging.getLogger(__name__) def init_worker(path, profile, resampling_method, open_opts, warp_opts): global base_kwds, filename, resampling, open_options, warp_options resampling = Resampling[resampling_method] base_kwds = profile.copy() filename = path open_options = open_opts.copy() if open_opts is not None else {} warp_options = warp_opts.copy() if warp_opts is not None else {} def process_tile(tile): """Process a single MBTiles tile Parameters ---------- tile : mercantile.Tile warp_options : Mapping GDAL warp options as keyword arguments. Returns ------- tile : mercantile.Tile The input tile. bytes : bytearray Image bytes corresponding to the tile. """ global base_kwds, resampling, filename, open_options, warp_options with rasterio.open(filename, **open_options) as src: # Get the bounds of the tile. ulx, uly = mercantile.xy(*mercantile.ul(tile.x, tile.y, tile.z)) lrx, lry = mercantile.xy(*mercantile.ul(tile.x + 1, tile.y + 1, tile.z)) kwds = base_kwds.copy() kwds["transform"] = transform_from_bounds( ulx, lry, lrx, uly, kwds["width"], kwds["height"] ) src_nodata = kwds.pop("src_nodata", None) dst_nodata = kwds.pop("dst_nodata", None) warnings.simplefilter("ignore") log.info("Reprojecting tile: tile=%r", tile) with MemoryFile() as memfile: with memfile.open(**kwds) as tmp: # determine window of source raster corresponding to the tile # image, with small buffer at edges try: west, south, east, north = transform_bounds( TILES_CRS, src.crs, ulx, lry, lrx, uly ) tile_window = window_from_bounds( west, south, east, north, transform=src.transform ) adjusted_tile_window = Window( tile_window.col_off - 1, tile_window.row_off - 1, tile_window.width + 2, tile_window.height + 2, ) tile_window = adjusted_tile_window.round_offsets().round_shape() # if no data in window, skip processing the tile if not src.read_masks(1, window=tile_window).any(): return tile, None except ValueError: log.info( "Tile %r will not be skipped, even if empty. This is harmless.", tile, ) num_threads = int(warp_options.pop("num_threads", 2)) reproject( rasterio.band(src, tmp.indexes), rasterio.band(tmp, tmp.indexes), src_nodata=src_nodata, dst_nodata=dst_nodata, num_threads=num_threads, resampling=resampling, **warp_options ) return tile, memfile.read()
Python
0
6785219c9e4e4bfd1d28e4802e992b84000a7f63
increase default read timeout to 5 seconds
pyatk/channel/uart.py
pyatk/channel/uart.py
# Copyright (c) 2012-2013 Harry Bock <bock.harryw@gmail.com> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import serial from pyatk.channel import base class UARTChannel(base.ATKChannelI): """ A serial port communications channel. The serial port is automatically configured for 115200 baud, 8N1, no flow control. """ def __init__(self, port): super(UARTChannel, self).__init__() self._ramkernel_channel_type = base.CHANNEL_TYPE_UART self.port = None port = serial.serial_for_url(port, do_not_open = True) port.baudrate = 115200 port.parity = serial.PARITY_NONE port.stopbits = serial.STOPBITS_ONE port.bytesize = serial.EIGHTBITS port.timeout = 5 port.rtscts = False port.xonxoff = False port.dsrdtr = False self.port = port def open(self): self.port.open() def close(self): self.port.close() def write(self, data): # Writes cannot time out with no flow control, so ChannelWriteTimeout # is not raised. self.port.write(data) def read(self, length): """ Read exactly ``length`` bytes from the UART channel. """ data_read = [] data_length = 0 while data_length < length: data = self.port.read((length - data_length)) # No data read indicates a timeout has occurred. if data == "": raise base.ChannelReadTimeout(length, "".join(data_read)) data_read.append(data) data_length += len(data) return "".join(data_read)
# Copyright (c) 2012-2013 Harry Bock <bock.harryw@gmail.com> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import serial from pyatk.channel import base class UARTChannel(base.ATKChannelI): """ A serial port communications channel. The serial port is automatically configured for 115200 baud, 8N1, no flow control. """ def __init__(self, port): super(UARTChannel, self).__init__() self._ramkernel_channel_type = base.CHANNEL_TYPE_UART self.port = None port = serial.serial_for_url(port, do_not_open = True) port.baudrate = 115200 port.parity = serial.PARITY_NONE port.stopbits = serial.STOPBITS_ONE port.bytesize = serial.EIGHTBITS port.timeout = 0.5 port.rtscts = False port.xonxoff = False port.dsrdtr = False self.port = port def open(self): self.port.open() def close(self): self.port.close() def write(self, data): # Writes cannot time out with no flow control, so ChannelWriteTimeout # is not raised. self.port.write(data) def read(self, length): """ Read exactly ``length`` bytes from the UART channel. """ data_read = [] data_length = 0 while data_length < length: data = self.port.read((length - data_length)) # No data read indicates a timeout has occurred. if data == "": raise base.ChannelReadTimeout(length, "".join(data_read)) data_read.append(data) data_length += len(data) return "".join(data_read)
Python
0
ec9bc89372670e623dbe98c34591fba62a0ee64a
Rename merge to pack in postp.
pyfr/scripts/postp.py
pyfr/scripts/postp.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from tempfile import NamedTemporaryFile from argparse import ArgumentParser, FileType import numpy as np from pyfr.util import rm def process_pack(args): # List the contents of the directory relnames = os.listdir(args.indir) # Get the absolute file names and extension-less file names absnames = [os.path.join(args.indir, f) for f in relnames] repnames = [f[:-4] for f in relnames] # Open/load the files files = [np.load(f, mmap_mode='r') for f in absnames] # Get the output pyfrs file name outname = args.outf or args.indir.rstrip('/') # Determine the dir and prefix of the temp file dirname, basename = os.path.split(outname) # Create a named temp file tempf = NamedTemporaryFile(prefix=basename, dir=dirname, delete=False) try: # Write the contents of the directory out as an npz (pyfrs) file np.savez(tempf, **dict(zip(repnames, files))) tempf.close() # Remove the output path if it should exist if os.path.exists(outname): rm(outname) # Rename the temp file into place os.rename(tempf.name, outname) except: # Clean up the temporary file if os.path.exists(tempf.name): os.remove(tempf.name) # Re-raise raise def main(): ap = ArgumentParser(prog='pyfr-postp', description='Post processes a ' 'PyFR simulation') sp = ap.add_subparsers(help='sub-command help') ap_pack = sp.add_parser('pack', help='pack --help', description='Packs a ' 'pyfrs-directory into a pyfrs-file. If no ' 'output file is specified then that of the ' 'input directory is taken. This command will ' 'replace any existing file or directory.') ap_pack.add_argument('indir', metavar='in', help='Input PyFR solution directory') ap_pack.add_argument('outf', metavar='out', nargs='?', help='Out PyFR solution file') ap_pack.set_defaults(process=process_pack) # Parse the arguments args = ap.parse_args() args.process(args) if __name__ == '__main__': main()
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from tempfile import NamedTemporaryFile from argparse import ArgumentParser, FileType import numpy as np from pyfr.util import rm def process_pack(args): # List the contents of the directory relnames = os.listdir(args.indir) # Get the absolute file names and extension-less file names absnames = [os.path.join(args.indir, f) for f in relnames] repnames = [f[:-4] for f in relnames] # Open/load the files files = [np.load(f, mmap_mode='r') for f in absnames] # Get the output pyfrs file name outname = args.outf or args.indir.rstrip('/') # Determine the dir and prefix of the temp file dirname, basename = os.path.split(outname) # Create a named temp file tempf = NamedTemporaryFile(prefix=basename, dir=dirname, delete=False) try: # Write the contents of the directory out as an npz (pyfrs) file np.savez(tempf, **dict(zip(repnames, files))) tempf.close() # Remove the output path if it should exist if os.path.exists(outname): rm(outname) # Rename the temp file into place os.rename(tempf.name, outname) except: # Clean up the temporary file if os.path.exists(tempf.name): os.remove(tempf.name) # Re-raise raise def main(): ap = ArgumentParser(prog='pyfr-postp', description='Post processes a ' 'PyFR simulation') sp = ap.add_subparsers(help='sub-command help') ap_merge = sp.add_parser('pack', help='pack --help', description='Packs a ' 'pyfrs-directory into a pyfrs-file. If no ' 'output file is specified then that of the ' 'input directory is taken. This command will ' 'replace any existing file or directory.') ap_merge.add_argument('indir', metavar='in', help='Input PyFR solution directory') ap_merge.add_argument('outf', metavar='out', nargs='?', help='Out PyFR solution file') ap_merge.set_defaults(process=process_pack) # Parse the arguments args = ap.parse_args() args.process(args) if __name__ == '__main__': main()
Python
0
ef628bcdd79ceb28e2b320059c9b00e52372663a
Improve the error message when PyGMT fails to load the GMT library (#814)
pygmt/clib/loading.py
pygmt/clib/loading.py
""" Utility functions to load libgmt as ctypes.CDLL. The path to the shared library can be found automatically by ctypes or set through the GMT_LIBRARY_PATH environment variable. """ import ctypes import os import sys from ctypes.util import find_library from pygmt.exceptions import GMTCLibError, GMTCLibNotFoundError, GMTOSError def load_libgmt(): """ Find and load ``libgmt`` as a :py:class:`ctypes.CDLL`. By default, will look for the shared library in the directory specified by the environment variable ``GMT_LIBRARY_PATH``. If it's not set, will let ctypes try to find the library. Returns ------- :py:class:`ctypes.CDLL` object The loaded shared library. Raises ------ GMTCLibNotFoundError If there was any problem loading the library (couldn't find it or couldn't access the functions). """ lib_fullnames = clib_full_names() error = True for libname in lib_fullnames: try: libgmt = ctypes.CDLL(libname) check_libgmt(libgmt) error = False break except OSError as err: error = err if error: raise GMTCLibNotFoundError( "Error loading the GMT shared library " f"{', '.join(lib_fullnames)}.\n {error}." ) return libgmt def clib_names(os_name): """ Return the name of GMT's shared library for the current OS. Parameters ---------- os_name : str The operating system name as given by ``sys.platform``. Returns ------- libnames : list of str List of possible names of GMT's shared library. """ if os_name.startswith("linux"): libnames = ["libgmt.so"] elif os_name == "darwin": # Darwin is macOS libnames = ["libgmt.dylib"] elif os_name == "win32": libnames = ["gmt.dll", "gmt_w64.dll", "gmt_w32.dll"] elif os_name.startswith("freebsd"): # FreeBSD libnames = ["libgmt.so"] else: raise GMTOSError(f'Operating system "{sys.platform}" not supported.') return libnames def clib_full_names(env=None): """ Return the full path of GMT's shared library for the current OS. Parameters ---------- env : dict or None A dictionary containing the environment variables. If ``None``, will default to ``os.environ``. Returns ------- lib_fullnames: list of str List of possible full names of GMT's shared library. """ if env is None: env = os.environ libnames = clib_names(os_name=sys.platform) # e.g. libgmt.so, libgmt.dylib, gmt.dll libpath = env.get("GMT_LIBRARY_PATH", "") # e.g. $HOME/miniconda/envs/pygmt/lib lib_fullnames = [os.path.join(libpath, libname) for libname in libnames] # Search for DLLs in PATH if GMT_LIBRARY_PATH is not defined [Windows only] if not libpath and sys.platform == "win32": for libname in libnames: libfullpath = find_library(libname) if libfullpath: lib_fullnames.append(libfullpath) return lib_fullnames def check_libgmt(libgmt): """ Make sure that libgmt was loaded correctly. Checks if it defines some common required functions. Does nothing if everything is fine. Raises an exception if any of the functions are missing. Parameters ---------- libgmt : :py:class:`ctypes.CDLL` A shared library loaded using ctypes. Raises ------ GMTCLibError """ # Check if a few of the functions we need are in the library functions = ["Create_Session", "Get_Enum", "Call_Module", "Destroy_Session"] for func in functions: if not hasattr(libgmt, "GMT_" + func): msg = " ".join( [ "Error loading libgmt.", "Couldn't access function GMT_{}.".format(func), ] ) raise GMTCLibError(msg)
""" Utility functions to load libgmt as ctypes.CDLL. The path to the shared library can be found automatically by ctypes or set through the GMT_LIBRARY_PATH environment variable. """ import ctypes import os import sys from ctypes.util import find_library from pygmt.exceptions import GMTCLibError, GMTCLibNotFoundError, GMTOSError def load_libgmt(): """ Find and load ``libgmt`` as a :py:class:`ctypes.CDLL`. By default, will look for the shared library in the directory specified by the environment variable ``GMT_LIBRARY_PATH``. If it's not set, will let ctypes try to find the library. Returns ------- :py:class:`ctypes.CDLL` object The loaded shared library. Raises ------ GMTCLibNotFoundError If there was any problem loading the library (couldn't find it or couldn't access the functions). """ lib_fullnames = clib_full_names() error = True for libname in lib_fullnames: try: libgmt = ctypes.CDLL(libname) check_libgmt(libgmt) error = False break except OSError as err: error = err if error: raise GMTCLibNotFoundError( "Error loading the GMT shared library '{}':".format( ", ".join(lib_fullnames) ) ) return libgmt def clib_names(os_name): """ Return the name of GMT's shared library for the current OS. Parameters ---------- os_name : str The operating system name as given by ``sys.platform``. Returns ------- libnames : list of str List of possible names of GMT's shared library. """ if os_name.startswith("linux"): libnames = ["libgmt.so"] elif os_name == "darwin": # Darwin is macOS libnames = ["libgmt.dylib"] elif os_name == "win32": libnames = ["gmt.dll", "gmt_w64.dll", "gmt_w32.dll"] elif os_name.startswith("freebsd"): # FreeBSD libnames = ["libgmt.so"] else: raise GMTOSError(f'Operating system "{sys.platform}" not supported.') return libnames def clib_full_names(env=None): """ Return the full path of GMT's shared library for the current OS. Parameters ---------- env : dict or None A dictionary containing the environment variables. If ``None``, will default to ``os.environ``. Returns ------- lib_fullnames: list of str List of possible full names of GMT's shared library. """ if env is None: env = os.environ libnames = clib_names(os_name=sys.platform) # e.g. libgmt.so, libgmt.dylib, gmt.dll libpath = env.get("GMT_LIBRARY_PATH", "") # e.g. $HOME/miniconda/envs/pygmt/lib lib_fullnames = [os.path.join(libpath, libname) for libname in libnames] # Search for DLLs in PATH if GMT_LIBRARY_PATH is not defined [Windows only] if not libpath and sys.platform == "win32": for libname in libnames: libfullpath = find_library(libname) if libfullpath: lib_fullnames.append(libfullpath) return lib_fullnames def check_libgmt(libgmt): """ Make sure that libgmt was loaded correctly. Checks if it defines some common required functions. Does nothing if everything is fine. Raises an exception if any of the functions are missing. Parameters ---------- libgmt : :py:class:`ctypes.CDLL` A shared library loaded using ctypes. Raises ------ GMTCLibError """ # Check if a few of the functions we need are in the library functions = ["Create_Session", "Get_Enum", "Call_Module", "Destroy_Session"] for func in functions: if not hasattr(libgmt, "GMT_" + func): msg = " ".join( [ "Error loading libgmt.", "Couldn't access function GMT_{}.".format(func), ] ) raise GMTCLibError(msg)
Python
0.000811
d3bc063cc35f5b7bc806c83cd23780108c509fb6
Disable checkin in embedded mode.
pykeg/core/checkin.py
pykeg/core/checkin.py
# Copyright 2014 Bevbot LLC, All Rights Reserved # # This file is part of the Pykeg package of the Kegbot project. # For more information on Pykeg or Kegbot, see http://kegbot.org/ # # Pykeg is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Pykeg is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Pykeg. If not, see <http://www.gnu.org/licenses/>. """Checks a central server for updates.""" from django.conf import settings from django.utils import timezone from pykeg.core import models from pykeg.core import util import logging import os import requests FIELD_REG_ID = 'reg_id' FIELD_PRODUCT = 'product' FIELD_VERSION = 'version' FIELD_INTERVAL_MILLIS = 'interval_millis' FIELD_UPDATE_AVAILABLE = 'update_available' FIELD_UPDATE_REQUIRED = 'update_required' FIELD_UPDATE_TITLE = 'update_title' FIELD_UPDATE_URL = 'update_url' FIELD_NEWS = 'news' PRODUCT = 'kegbot-server' CHECKIN_URL = os.getenv('CHECKIN_URL', None) or 'https://kegbotcheckin.appspot.com/checkin' LOGGER = logging.getLogger('checkin') logging.getLogger('requests').setLevel(logging.WARNING) class CheckinError(Exception): """Base exception.""" def checkin(url=CHECKIN_URL, product=PRODUCT, timeout=None, quiet=False): """Issue a single checkin to the checkin server. No-op if kbsite.check_for_updates is False. Returns A checkin response dictionary, or None if checkin is disabled. Raises ValueError: On malformed reponse. requests.RequestException: On error talking to server. """ if settings.EMBEDDED: LOGGER.debug('Checkin disabled in embedded mode') return kbsite = models.KegbotSite.get() if not kbsite.check_for_updates: LOGGER.debug('Upgrade check is disabled') return site = models.KegbotSite.get() reg_id = site.registration_id headers = { 'User-Agent': util.get_user_agent(), } payload = { FIELD_PRODUCT: product, FIELD_REG_ID: reg_id, FIELD_VERSION: util.get_version(), } try: LOGGER.debug('Checking in, url=%s reg_id=%s' % (url, reg_id)) result = requests.post(url, data=payload, headers=headers, timeout=timeout).json() new_reg_id = result.get(FIELD_REG_ID) if new_reg_id != reg_id: LOGGER.debug('Updating reg_id=%s' % new_reg_id) site.registration_id = new_reg_id site.save() LOGGER.debug('Checkin result: %s' % str(result)) if not quiet: LOGGER.info('Checkin complete, reg_id=%s' % (reg_id,)) site.last_checkin_response = result site.last_checkin_time = timezone.now() site.save() return result except (ValueError, requests.RequestException) as e: if not quiet: LOGGER.warning('Checkin error: %s' % str(e)) raise CheckinError(e)
# Copyright 2014 Bevbot LLC, All Rights Reserved # # This file is part of the Pykeg package of the Kegbot project. # For more information on Pykeg or Kegbot, see http://kegbot.org/ # # Pykeg is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Pykeg is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Pykeg. If not, see <http://www.gnu.org/licenses/>. """Checks a central server for updates.""" from django.utils import timezone from pykeg.core import models from pykeg.core import util import logging import os import requests FIELD_REG_ID = 'reg_id' FIELD_PRODUCT = 'product' FIELD_VERSION = 'version' FIELD_INTERVAL_MILLIS = 'interval_millis' FIELD_UPDATE_AVAILABLE = 'update_available' FIELD_UPDATE_REQUIRED = 'update_required' FIELD_UPDATE_TITLE = 'update_title' FIELD_UPDATE_URL = 'update_url' FIELD_NEWS = 'news' PRODUCT = 'kegbot-server' CHECKIN_URL = os.getenv('CHECKIN_URL', None) or 'https://kegbotcheckin.appspot.com/checkin' LOGGER = logging.getLogger('checkin') logging.getLogger('requests').setLevel(logging.WARNING) class CheckinError(Exception): """Base exception.""" def checkin(url=CHECKIN_URL, product=PRODUCT, timeout=None, quiet=False): """Issue a single checkin to the checkin server. No-op if kbsite.check_for_updates is False. Returns A checkin response dictionary, or None if checkin is disabled. Raises ValueError: On malformed reponse. requests.RequestException: On error talking to server. """ kbsite = models.KegbotSite.get() if not kbsite.check_for_updates: LOGGER.debug('Upgrade check is disabled') return site = models.KegbotSite.get() reg_id = site.registration_id headers = { 'User-Agent': util.get_user_agent(), } payload = { FIELD_PRODUCT: product, FIELD_REG_ID: reg_id, FIELD_VERSION: util.get_version(), } try: LOGGER.debug('Checking in, url=%s reg_id=%s' % (url, reg_id)) result = requests.post(url, data=payload, headers=headers, timeout=timeout).json() new_reg_id = result.get(FIELD_REG_ID) if new_reg_id != reg_id: LOGGER.debug('Updating reg_id=%s' % new_reg_id) site.registration_id = new_reg_id site.save() LOGGER.debug('Checkin result: %s' % str(result)) if not quiet: LOGGER.info('Checkin complete, reg_id=%s' % (reg_id,)) site.last_checkin_response = result site.last_checkin_time = timezone.now() site.save() return result except (ValueError, requests.RequestException) as e: if not quiet: LOGGER.warning('Checkin error: %s' % str(e)) raise CheckinError(e)
Python
0
301b2ca9cdf33665312e092937c63b1db7db888f
Add missing imports
pymessenger2/utils.py
pymessenger2/utils.py
import hashlib import hmac import six import attr import json def validate_hub_signature(app_secret, request_payload, hub_signature_header): """ @inputs: app_secret: Secret Key for application request_payload: request body hub_signature_header: X-Hub-Signature header sent with request @outputs: boolean indicated that hub signature is validated """ try: hash_method, hub_signature = hub_signature_header.split('=') except: pass else: digest_module = getattr(hashlib, hash_method) hmac_object = hmac.new( str(app_secret), unicode(request_payload), digest_module) generated_hash = hmac_object.hexdigest() if hub_signature == generated_hash: return True return False def generate_appsecret_proof(access_token, app_secret): """ @inputs: access_token: page access token app_secret_token: app secret key @outputs: appsecret_proof: HMAC-SHA256 hash of page access token using app_secret as the key """ if six.PY2: hmac_object = hmac.new( str(app_secret), unicode(access_token), hashlib.sha256) else: hmac_object = hmac.new( bytearray(app_secret, 'utf8'), str(access_token).encode('utf8'), hashlib.sha256) generated_hash = hmac_object.hexdigest() return generated_hash class ToJsonMixin: """ Derive from this with an `.asdict` member to get a working `to_json` function! """ def to_json(self): items_iterator = (attr.asdict(self).items() if six.PY3 else attr.asdict(self).iteritems()) return json.dumps({k: v for k, v in items_iterator if v is not None})
import hashlib import hmac import six def validate_hub_signature(app_secret, request_payload, hub_signature_header): """ @inputs: app_secret: Secret Key for application request_payload: request body hub_signature_header: X-Hub-Signature header sent with request @outputs: boolean indicated that hub signature is validated """ try: hash_method, hub_signature = hub_signature_header.split('=') except: pass else: digest_module = getattr(hashlib, hash_method) hmac_object = hmac.new( str(app_secret), unicode(request_payload), digest_module) generated_hash = hmac_object.hexdigest() if hub_signature == generated_hash: return True return False def generate_appsecret_proof(access_token, app_secret): """ @inputs: access_token: page access token app_secret_token: app secret key @outputs: appsecret_proof: HMAC-SHA256 hash of page access token using app_secret as the key """ if six.PY2: hmac_object = hmac.new( str(app_secret), unicode(access_token), hashlib.sha256) else: hmac_object = hmac.new( bytearray(app_secret, 'utf8'), str(access_token).encode('utf8'), hashlib.sha256) generated_hash = hmac_object.hexdigest() return generated_hash class ToJsonMixin: """ Derive from this with an `.asdict` member to get a working `to_json` function! """ def to_json(self): items_iterator = (attr.asdict(self).items() if six.PY3 else attr.asdict(self).iteritems()) return json.dumps({k: v for k, v in items_iterator if v is not None})
Python
0.000009
20d41656488ea43978f749e2e34303e49981695c
fix imports to include OR tools
pymzn/mzn/__init__.py
pymzn/mzn/__init__.py
from .model import * from .solvers import * from .minizinc import * from .templates import * __all__ = [ 'Solutions', 'minizinc', 'mzn2fzn', 'solns2out', 'MiniZincError', 'MiniZincUnsatisfiableError', 'MiniZincUnknownError', 'MiniZincUnboundedError', 'MiniZincModel', 'Statement', 'Constraint', 'Variable', 'ArrayVariable', 'OutputStatement', 'SolveStatement', 'Solver', 'Gecode', 'Chuffed', 'Optimathsat', 'Opturion', 'MIPSolver', 'Gurobi', 'CBC', 'G12Solver', 'G12Fd', 'G12Lazy', 'G12MIP', 'OscarCBLS', 'ORTools', 'gecode', 'chuffed', 'optimathsat', 'opturion', 'gurobi', 'cbc', 'g12fd', 'g12lazy', 'g12mip', 'oscar_cbls', 'or_tools', 'discretize', 'from_string', 'add_package', 'add_path' ]
from .model import * from .solvers import * from .minizinc import * from .templates import * __all__ = [ 'Solutions', 'minizinc', 'mzn2fzn', 'solns2out', 'MiniZincError', 'MiniZincUnsatisfiableError', 'MiniZincUnknownError', 'MiniZincUnboundedError', 'MiniZincModel', 'Statement', 'Constraint', 'Variable', 'ArrayVariable', 'OutputStatement', 'SolveStatement', 'Solver', 'Gecode', 'Chuffed', 'Optimathsat', 'Opturion', 'MIPSolver', 'Gurobi', 'CBC', 'G12Solver', 'G12Fd', 'G12Lazy', 'G12MIP', 'OscarCBLS', 'gecode', 'chuffed', 'optimathsat', 'opturion', 'gurobi', 'cbc', 'g12fd', 'g12lazy', 'g12mip', 'oscar_cbls', 'discretize', 'from_string', 'add_package', 'add_path' ]
Python
0
3cd595fb0a2f1d027aefe70b59e235ef3dd14d61
update basespider download
xspider/libs/basespider/basespider.py
xspider/libs/basespider/basespider.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Created on 2017-02-21 # Project: basespider import json import time import socket import requests import traceback from requests.exceptions import ReadTimeout from requests.exceptions import ConnectionError class BaseGenerator(object): """ BaseSpider Generator """ def __init__(self): """ Generator Initialization """ self.urls = [] def generate(self, url): """ Obtain URI :return: """ self.urls.append(url) for url in self.urls: print json.dumps({"url": url, "args": 'None'}) def start_generator(self): """ Start Generator :return: """ start_url = "__START_URL__" self.generate(start_url) class BaseDownloader(object): """ BaseSpider Downloader """ def __init__(self): """ Downloader Initialization """ self.reqst = requests.Session() self.headers = {'Accept': 'text/html, application/xhtml+xml, */*', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US, en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:39.0) Gecko/20100101 Firefox/39.0'} def download(self, url, tools="request", timeout=50, **kwargs): """ Downloader Download By Type :return: response object """ if tools == "request": start_time = time.time() try: resp = self.reqst.get(url, timeout=timeout, **kwargs) if resp.status_code != 200: resp = self.reqst.get(url, timeout=50) if resp.status_code != 200: raise ConnectionError end_time = time.time() return resp except Exception: print traceback.format_exc() class BaseParser(object): """ BaseSpider Parser """ def __init__(self): """ Parser Initialization """ pass def parser(self, resp): """ Paeser resp content :param resp: :return: """ return resp
#!/usr/bin/env python # -*- coding: utf-8 -*- # Created on 2017-02-21 # Project: basespider import json import time import socket import requests import traceback from requests.exceptions import ReadTimeout from requests.exceptions import ConnectionError class BaseGenerator(object): """ BaseSpider Generator """ def __init__(self): """ Generator Initialization """ self.urls = [] def generate(self, url): """ Obtain URI :return: """ self.urls.append(url) for url in self.urls: print json.dumps({"url": url, "args": 'None'}) def start_generator(self): """ Start Generator :return: """ start_url = "__START_URL__" self.generate(start_url) class BaseDownloader(object): """ BaseSpider Downloader """ def __init__(self): """ Downloader Initialization """ self.reqst = requests.Session() self.reqst.headers.update( {'Accept': 'text/html, application/xhtml+xml, */*', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US, en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:39.0) Gecko/20100101 Firefox/39.0'}) def download(self, url, type="request", timeout=50): """ Downloader Download By Type :return: response object """ if type == "request": start_time = time.time() try: resp = self.reqst.get(url, timeout=timeout) if resp.status_code != 200: resp = self.reqst.get(url, timeout=50) if resp.status_code != 200: raise ConnectionError end_time = time.time() return resp except Exception: print traceback.format_exc() class BaseParser(object): """ BaseSpider Parser """ def __init__(self): """ Parser Initialization """ pass def parser(self, resp): """ Paeser resp content :param resp: :return: """ return resp
Python
0
80bc283676be51ef67fe7924bcc32adaa93fc985
Change timestamp format
guestbook/__init__.py
guestbook/__init__.py
# coding: utf-8 import pickle from datetime import datetime from collections import namedtuple, deque from flask import Flask, request, render_template, redirect, escape, Markup application = Flask(__name__) DATA_FILE = 'guestbook.dat' Post = namedtuple('Post', ['name', 'timestamp', 'comment']) def save_post(name, timestamp, comment): posts = pickle.load(DATA_FILE) assert isinstance(posts, deque) posts.appendleft(Post(name, timestamp, comment)) pickle.dump(posts, DATA_FILE) def load_posts(): return pickle.load(DATA_FILE) @application.route('/') def index(): return render_template('index.html', greeting_list=load_posts()) @application.route('/post', methods=['POST']) def post(): name = request.form.get('name') comment = request.form.get('comment') save_post(name, datetime.now(), comment) return redirect('/') @application.template_filter('nl2br') def nl2br_filter(s): return escape(s).replace('\n', Markup('<br />')) @application.template_filter('datetime_fmt') def datetime_fmt_filter(dt): return dt.strftime('%d/%m/%Y %H:%M:%S') def main(): application.run('127.0.0.1', 8000) if __name__ == "__main__": application.run('127.0.0.1', 8000, debug=True)
# coding: utf-8 import pickle from datetime import datetime from collections import namedtuple, deque from flask import Flask, request, render_template, redirect, escape, Markup application = Flask(__name__) DATA_FILE = 'guestbook.dat' Post = namedtuple('Post', ['name', 'timestamp', 'comment']) def save_post(name, timestamp, comment): posts = pickle.load(DATA_FILE) assert isinstance(posts, deque) posts.appendleft(Post(name, timestamp, comment)) pickle.dump(posts, DATA_FILE) def load_posts(): return pickle.load(DATA_FILE) @application.route('/') def index(): return render_template('index.html', greeting_list=load_posts()) @application.route('/post', methods=['POST']) def post(): name = request.form.get('name') comment = request.form.get('comment') save_post(name, datetime.now(), comment) return redirect('/') @application.template_filter('nl2br') def nl2br_filter(s): return escape(s).replace('\n', Markup('<br />')) @application.template_filter('datetime_fmt') def datetime_fmt_filter(dt): return dt.strftime('%Y%m%d %H:%M:%S') def main(): application.run('127.0.0.1', 8000) if __name__ == "__main__": application.run('127.0.0.1', 8000, debug=True)
Python
0.000162
f860a306b4c9fc583a83289ae2a6ecf407214e38
Add more checks to avoid crashing when input files are missing
pysteps/io/readers.py
pysteps/io/readers.py
"""Methods for reading files. """ import numpy as np def read_timeseries(inputfns, importer, **kwargs): """Read a list of input files using io tools and stack them into a 3d array. Parameters ---------- inputfns : list List of input files returned by any function implemented in archive. importer : function Any function implemented in importers. kwargs : dict Optional keyword arguments for the importer. Returns ------- out : tuple A three-element tuple containing the precipitation fields read, the quality fields, and associated metadata. """ # check for missing data Rref = None if all(ifn is None for ifn in inputfns): return None, None, None else: if len(inputfns[0]) == 0: return None, None, None for ifn in inputfns[0]: if ifn is not None: Rref, Qref, metadata = importer(ifn, **kwargs) break if Rref is None: return None, None, None R = [] Q = [] timestamps = [] for i,ifn in enumerate(inputfns[0]): if ifn is not None: R_, Q_, _ = importer(ifn, **kwargs) R.append(R_) Q.append(Q_) timestamps.append(inputfns[1][i]) else: R.append(Rref*np.nan) if Qref is not None: Q.append(Qref*np.nan) else: Q.append(None) timestamps.append(inputfns[1][i]) # Replace this with stack? R = np.concatenate([R_[None, :, :] for R_ in R]) #TODO: Q should be organized as R, but this is not trivial as Q_ can be also None or a scalar metadata["timestamps"] = np.array(timestamps) return R, Q, metadata
"""Methods for reading files. """ import numpy as np def read_timeseries(inputfns, importer, **kwargs): """Read a list of input files using io tools and stack them into a 3d array. Parameters ---------- inputfns : list List of input files returned by any function implemented in archive. importer : function Any function implemented in importers. kwargs : dict Optional keyword arguments for the importer. Returns ------- out : tuple A three-element tuple containing the precipitation fields read, the quality fields, and associated metadata. """ # check for missing data if all(ifn is None for ifn in inputfns): return None, None, None else: for ifn in inputfns[0]: if ifn is not None: Rref, Qref, metadata = importer(ifn, **kwargs) break R = [] Q = [] timestamps = [] for i,ifn in enumerate(inputfns[0]): if ifn is not None: R_, Q_, _ = importer(ifn, **kwargs) R.append(R_) Q.append(Q_) timestamps.append(inputfns[1][i]) else: R.append(Rref*np.nan) if Qref is not None: Q.append(Qref*np.nan) else: Q.append(None) timestamps.append(inputfns[1][i]) R = np.concatenate([R_[None, :, :] for R_ in R]) #TODO: Q should be organized as R, but this is not trivial as Q_ can be also None or a scalar metadata["timestamps"] = np.array(timestamps) return R, Q, metadata
Python
0
c3527f5526ee96398760cbef11d7de48f41fe998
Annotate NormOP test to skip grad check (#21894)
python/paddle/fluid/tests/unittests/test_norm_op.py
python/paddle/fluid/tests/unittests/test_norm_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np from op_test import OpTest, skip_check_grad_ci def l2_norm(x, axis, epsilon): x2 = x**2 s = np.sum(x2, axis=axis, keepdims=True) r = np.sqrt(s + epsilon) y = x / np.broadcast_to(r, x.shape) return y, r class TestNormOp(OpTest): def setUp(self): self.op_type = "norm" self.init_test_case() x = np.random.random(self.shape).astype("float64") y, norm = l2_norm(x, self.axis, self.epsilon) self.inputs = {'X': x} self.attrs = {'epsilon': self.epsilon, 'axis': self.axis} self.outputs = {'Out': y, 'Norm': norm} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') def init_test_case(self): self.shape = [2, 3, 4, 5] self.axis = 1 self.epsilon = 1e-8 class TestNormOp2(TestNormOp): def init_test_case(self): self.shape = [5, 3, 9, 7] self.axis = 0 self.epsilon = 1e-8 class TestNormOp3(TestNormOp): def init_test_case(self): self.shape = [5, 3, 2, 7] self.axis = -1 self.epsilon = 1e-8 @skip_check_grad_ci(reason="'check_grad' on large inputs is too slow, " + "however it is desirable to cover the forward pass") class TestNormOp4(TestNormOp): def init_test_case(self): self.shape = [128, 1024, 14, 14] self.axis = 2 self.epsilon = 1e-8 def test_check_grad(self): pass @skip_check_grad_ci(reason="'check_grad' on large inputs is too slow, " + "however it is desirable to cover the forward pass") class TestNormOp5(TestNormOp): def init_test_case(self): self.shape = [2048, 2048] self.axis = 1 self.epsilon = 1e-8 def test_check_grad(self): pass if __name__ == '__main__': unittest.main()
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np from op_test import OpTest def l2_norm(x, axis, epsilon): x2 = x**2 s = np.sum(x2, axis=axis, keepdims=True) r = np.sqrt(s + epsilon) y = x / np.broadcast_to(r, x.shape) return y, r class TestNormOp(OpTest): def setUp(self): self.op_type = "norm" self.init_test_case() x = np.random.random(self.shape).astype("float64") y, norm = l2_norm(x, self.axis, self.epsilon) self.inputs = {'X': x} self.attrs = {'epsilon': self.epsilon, 'axis': self.axis} self.outputs = {'Out': y, 'Norm': norm} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') def init_test_case(self): self.shape = [2, 3, 4, 5] self.axis = 1 self.epsilon = 1e-8 class TestNormOp2(TestNormOp): def init_test_case(self): self.shape = [5, 3, 9, 7] self.axis = 0 self.epsilon = 1e-8 class TestNormOp3(TestNormOp): def init_test_case(self): self.shape = [5, 3, 2, 7] self.axis = -1 self.epsilon = 1e-8 class TestNormOp4(TestNormOp): def init_test_case(self): self.shape = [128, 1024, 14, 14] self.axis = 2 self.epsilon = 1e-8 def test_check_grad(self): # since the gradient check is very slow in large shape, so skip check_grad pass class TestNormOp5(TestNormOp): def init_test_case(self): self.shape = [2048, 2048] self.axis = 1 self.epsilon = 1e-8 def test_check_grad(self): # since the gradient check is very slow in large shape, so skip check_grad pass if __name__ == '__main__': unittest.main()
Python
0
dbc1df293f283367526b3a80c5f24d71e5d46be1
fix bug abort is undefined and return 204
middleware/app.py
middleware/app.py
from flask import Flask, jsonify, request, abort from sense_hat import SenseHat from hat_manager import HatManager app = Flask(__name__) sense_hat = SenseHat() hat_manager = HatManager(sense_hat) @app.route('/') def index(): return 'Welcome to the PI manager. Choose a route according to what you want to do.' @app.route('/status') def get_status(): status = {'pressure': hat_manager.get_pressure, 'temperature': hat_manager.get_temperature, 'humidity': hat_manager.get_humidity} return jsonify({'status': status}) @app.route('/message', methods=['POST']) def print_message(): if not request.json or not 'message' in request.json: abort(400) message = request.json['message'] color = request.json['text_color'] bg_color = request.json['bg_color'] hat_manager.set_message(message) return jsonify(), 204 if __name__ == '__main__': # 0.0.0.0 = accessible to any device on the network app.run(debug=True, host='0.0.0.0')
from flask import Flask, jsonify, request from sense_hat import SenseHat from hat_manager import HatManager app = Flask(__name__) sense_hat = SenseHat() hat_manager = HatManager(sense_hat) @app.route('/') def index(): return 'Welcome to the PI manager. Choose a route according to what you want to do.' @app.route('/status') def get_status(): status = {'pressure': hat_manager.get_pressure, 'temperature': hat_manager.get_temperature, 'humidity': hat_manager.get_humidity} return jsonify({'status': status}) @app.route('/message', methods=['POST']) def print_message(): if not request.json or not 'message' in request.json: abort(400) message = request.json['message'] color = request.json['text_color'] bg_color = request.json['bg_color'] hat_manager.set_message(message) if __name__ == '__main__': # 0.0.0.0 = accessible to any device on the network app.run(debug=True, host='0.0.0.0')
Python
0.00001
fb34eebd253727dcc718e2387cb6f4ac763f0bae
Add DateTime Completed Field to Task
tasks/models/tasks.py
tasks/models/tasks.py
"""Models for tasks Each new type of task corresponds to a task model """ from django.db import models from data import Data_FullGrid_Confidence, Data_FullGrid # Tasks class Task_Naming_001(Data_FullGrid_Confidence): class Meta: db_table = 'tbl_response_naming_001' def __unicode__(self): return 'Task Naming 001' task_response_id = models.AutoField(primary_key=True, unique=True) worker_id = models.CharField(max_length=128) task_response_key = models.CharField(max_length=32, unique=True) class Task_Foci_001(Data_FullGrid): class Meta: db_table = 'tbl_response_foci_001' def __unicode__(self): return 'Task Foci 001' task_response_id = models.AutoField(primary_key=True, unique=True) worker_id = models.CharField(max_length=128) task_response_key = models.CharField(max_length=32, unique=True) dt_completed = models.DateTimeField(auto_now=True) class Task_Mapping_001(Data_FullGrid): class Meta: db_table = 'tbl_response_mapping_001' def __unicode__(self): return 'Task Mapping 001' task_response_id = models.AutoField(primary_key=True, unique=True) worker_id = models.CharField(max_length=128) task_response_key = models.CharField(max_length=32, unique=True)
"""Models for tasks Each new type of task corresponds to a task model """ from django.db import models from data import Data_FullGrid_Confidence, Data_FullGrid # Tasks class Task_Naming_001(Data_FullGrid_Confidence): class Meta: db_table = 'tbl_response_naming_001' def __unicode__(self): return 'Task Naming 001' task_response_id = models.AutoField(primary_key=True, unique=True) worker_id = models.CharField(max_length=128) task_response_key = models.CharField(max_length=32, unique=True) class Task_Foci_001(Data_FullGrid): class Meta: db_table = 'tbl_response_foci_001' def __unicode__(self): return 'Task Foci 001' task_response_id = models.AutoField(primary_key=True, unique=True) worker_id = models.CharField(max_length=128) task_response_key = models.CharField(max_length=32, unique=True) class Task_Mapping_001(Data_FullGrid): class Meta: db_table = 'tbl_response_mapping_001' def __unicode__(self): return 'Task Mapping 001' task_response_id = models.AutoField(primary_key=True, unique=True) worker_id = models.CharField(max_length=128) task_response_key = models.CharField(max_length=32, unique=True)
Python
0
6db0dccc5643cb2af254b0bf052806645f7445fd
fix regression on qibuild deploy
python/qibuild/gdb.py
python/qibuild/gdb.py
## Copyright (c) 2012 Aldebaran Robotics. All rights reserved. ## Use of this source code is governed by a BSD-style license that can be ## found in the COPYING file. """ Tools for the GNU debbugger """ import os from qibuild import ui import qibuild.sh import qibuild.command def split_debug(base_dir, objcopy=None): """ Split the debug information out of all the binaries in lib/ and bin/ The debug information will be put in a .debug directory next to the executable <base_dir>/bin/foo <base_dir>/bin/.debug/foo Also uses objcopy so that the binaries and libraries still remain usable with gdb :param: the objcopy executable to use. (defaults to the first objcopy executable found in PATH) """ if objcopy is None: objcopy = "objcopy" def _get_binaries(dir): res = list() for root, directories, filenames in os.walk(dir): if os.path.basename(root) == ".debug": continue for filename in filenames: full_path = os.path.join(root, filename) if qibuild.sh.is_binary(full_path): res.append(full_path) return res binaries = list() bin_dir = os.path.join(base_dir, "bin") lib_dir = os.path.join(base_dir, "lib") binaries.extend(_get_binaries(bin_dir)) binaries.extend(_get_binaries(lib_dir)) for src in binaries: dirname, basename = os.path.split(src) debug_dir = os.path.join(dirname, ".debug") qibuild.sh.mkdir(debug_dir) dest = os.path.join(src, debug_dir, basename) to_run = list() to_run.append([objcopy, "--only-keep-debug", src, dest]) to_run.append([objcopy, "--strip-debug", "--strip-unneeded", "--add-gnu-debuglink=%s" % dest, src]) retcode = 0 #check if we need to do something #if mtime of src and dest are the same continue, else do the work and set #the mtime of dest to the one of src. stsrc = os.stat(src) stdst = None if os.path.exists(dest): stdst = os.stat(dest) if stdst and stsrc.st_mtime == stdst.st_mtime: ui.info("Debug info up-to-date for %s" % os.path.relpath(src, base_dir)) continue for cmd in to_run: retcode = 0 # FIXME: we should of course not try to split debug info twice, but # that's a hard problem retcode += qibuild.command.call(cmd, ignore_ret_code=True, quiet=True) if retcode == 0: os.utime(dest, (stsrc.st_atime, stsrc.st_mtime)) ui.info("Debug info extracted for %s" % os.path.relpath(src, base_dir)) else: ui.error("Error while extracting debug for %s" % os.path.relpath(src, base_dir)) if __name__ == "__main__": import sys split_debug(sys.argv[1])
## Copyright (c) 2012 Aldebaran Robotics. All rights reserved. ## Use of this source code is governed by a BSD-style license that can be ## found in the COPYING file. """ Tools for the GNU debbugger """ import os from qibuild import ui import qibuild.sh import qibuild.command def split_debug(base_dir, objcopy=None): """ Split the debug information out of all the binaries in lib/ and bin/ The debug information will be put in a .debug directory next to the executable <base_dir>/bin/foo <base_dir>/bin/.debug/foo Also uses objcopy so that the binaries and libraries still remain usable with gdb :param: the objcopy executable to use. (defaults to the first objcopy executable found in PATH) """ if objcopy is None: objcopy = "objcopy" def _get_binaries(dir): res = list() for root, directories, filenames in os.walk(dir): if os.path.basename(root) == ".debug": continue for filename in filenames: full_path = os.path.join(root, filename) if qibuild.sh.is_binary(filename): res.append(full_path) return res binaries = list() bin_dir = os.path.join(base_dir, "bin") lib_dir = os.path.join(base_dir, "lib") binaries.extend(_get_binaries(bin_dir)) binaries.extend(_get_binaries(lib_dir)) for src in binaries: dirname, basename = os.path.split(src) debug_dir = os.path.join(dirname, ".debug") qibuild.sh.mkdir(debug_dir) dest = os.path.join(src, debug_dir, basename) to_run = list() to_run.append([objcopy, "--only-keep-debug", src, dest]) to_run.append([objcopy, "--strip-debug", "--strip-unneeded", "--add-gnu-debuglink=%s" % dest, src]) retcode = 0 #check if we need to do something #if mtime of src and dest are the same continue, else do the work and set #the mtime of dest to the one of src. stsrc = os.stat(src) stdst = None if os.path.exists(dest): stdst = os.stat(dest) if stdst and stsrc.st_mtime == stdst.st_mtime: ui.info("Debug info up-to-date for %s" % os.path.relpath(src, base_dir)) continue for cmd in to_run: retcode = 0 # FIXME: we should of course not try to split debug info twice, but # that's a hard problem retcode += qibuild.command.call(cmd, ignore_ret_code=True, quiet=True) if retcode == 0: os.utime(dest, (stsrc.st_atime, stsrc.st_mtime)) ui.info("Debug info extracted for %s" % os.path.relpath(src, base_dir)) else: ui.error("Error while extracting debug for %s" % os.path.relpath(src, base_dir)) if __name__ == "__main__": import sys split_debug(sys.argv[1])
Python
0.000001
547c1d5d1ff2ced0969a86eda6e0094f8b76d94f
Bump to 0.1.1 with setup.py fix
minio/__init__.py
minio/__init__.py
# Minimal Object Storage Library, (C) 2015 Minio, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .minio import Minio from .acl import Acl from .parsers import Bucket, Object, ResponseError __author__ = "Minio, Inc." __version__ = "0.1.1"
# Minimal Object Storage Library, (C) 2015 Minio, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .minio import Minio from .acl import Acl from .parsers import Bucket, Object, ResponseError __author__ = "Minio, Inc." __version__ = "0.1.0"
Python
0
4bb8a61cde27575865cdd2b7df5afcb5d6860523
Add weird SLP orientation to get_world_pedir
fmriprep/interfaces/tests/test_reports.py
fmriprep/interfaces/tests/test_reports.py
import pytest from ..reports import get_world_pedir @pytest.mark.parametrize("orientation,pe_dir,expected", [ ('RAS', 'j', 'Posterior-Anterior'), ('RAS', 'j-', 'Anterior-Posterior'), ('RAS', 'i', 'Left-Right'), ('RAS', 'i-', 'Right-Left'), ('RAS', 'k', 'Inferior-Superior'), ('RAS', 'k-', 'Superior-Inferior'), ('LAS', 'j', 'Posterior-Anterior'), ('LAS', 'i-', 'Left-Right'), ('LAS', 'k-', 'Superior-Inferior'), ('LPI', 'j', 'Anterior-Posterior'), ('LPI', 'i-', 'Left-Right'), ('LPI', 'k-', 'Inferior-Superior'), ('SLP', 'k-', 'Posterior-Anterior'), ('SLP', 'k', 'Anterior-Posterior'), ('SLP', 'j-', 'Left-Right'), ('SLP', 'j', 'Right-Left'), ('SLP', 'i', 'Inferior-Superior'), ('SLP', 'i-', 'Superior-Inferior'), ]) def test_get_world_pedir(tmpdir, orientation, pe_dir, expected): assert get_world_pedir(orientation, pe_dir) == expected
import pytest from ..reports import get_world_pedir @pytest.mark.parametrize("orientation,pe_dir,expected", [ ('RAS', 'j', 'Posterior-Anterior'), ('RAS', 'j-', 'Anterior-Posterior'), ('RAS', 'i', 'Left-Right'), ('RAS', 'i-', 'Right-Left'), ('RAS', 'k', 'Inferior-Superior'), ('RAS', 'k-', 'Superior-Inferior'), ('LAS', 'j', 'Posterior-Anterior'), ('LAS', 'i-', 'Left-Right'), ('LAS', 'k-', 'Superior-Inferior'), ('LPI', 'j', 'Anterior-Posterior'), ('LPI', 'i-', 'Left-Right'), ('LPI', 'k-', 'Inferior-Superior'), ]) def test_get_world_pedir(tmpdir, orientation, pe_dir, expected): assert get_world_pedir(orientation, pe_dir) == expected
Python
0
a059a7e8b751fbc49bd1f363378d630d774ed2c1
set subtypes to None if not supported in this TAXII version
taxii_client/utils.py
taxii_client/utils.py
import pytz import json import calendar from libtaxii.clients import HttpClient from libtaxii.messages_10 import ContentBlock as ContentBlock10 from datetime import datetime from collections import namedtuple def ts_to_date(timestamp): if not timestamp: return None return datetime.utcfromtimestamp(timestamp).replace(tzinfo=pytz.UTC) def date_to_ts(obj): if obj.utcoffset() is not None: obj = obj - obj.utcoffset() millis = int( calendar.timegm(obj.timetuple()) * 1000 + obj.microsecond / 1000 ) return millis def configure_taxii_client_auth(tclient, cert=None, key=None, username=None, password=None): tls_auth = (cert and key) basic_auth = (username and password) if tls_auth and basic_auth: tclient.set_auth_type(HttpClient.AUTH_CERT_BASIC) tclient.set_auth_credentials(dict( key_file = key, cert_file = cert, username = username, password = password )) elif tls_auth: tclient.set_auth_type(HttpClient.AUTH_CERT) tclient.set_auth_credentials(dict( key_file = key, cert_file = cert )) elif basic_auth: tclient.set_auth_type(HttpClient.AUTH_BASIC) tclient.set_auth_credentials(dict( username = username, password = password )) return tclient class DatetimeJSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime): return date_to_ts(obj) else: return JSONEncoder.default(self, obj) AbstractContentBlock = namedtuple('AbstractContentBlock', ['content', 'binding', 'subtypes', 'timestamp', 'source', 'sink_collection', 'source_collection']) class ContentBlock(AbstractContentBlock): def to_json(self): return json.dumps(self._asdict(), cls=DatetimeJSONEncoder) def extract_content(response, source=None, source_collection=None, sink_collection=None): for block in response.content_blocks: if isinstance(block, ContentBlock10): yield ContentBlock( content = block.content, binding = block.content_binding, timestamp = block.timestamp_label, subtypes = None, source = source, source_collection = source_collection, sink_collection = sink_collection ) else: yield ContentBlock( content = block.content, binding = block.content_binding.binding_id, timestamp = block.timestamp_label, subtypes = block.content_binding.subtype_ids, source = source, source_collection = source_collection, sink_collection = sink_collection )
import pytz import json import calendar from libtaxii.clients import HttpClient from libtaxii.messages_10 import ContentBlock as ContentBlock10 from datetime import datetime from collections import namedtuple def ts_to_date(timestamp): if not timestamp: return None return datetime.utcfromtimestamp(timestamp).replace(tzinfo=pytz.UTC) def date_to_ts(obj): if obj.utcoffset() is not None: obj = obj - obj.utcoffset() millis = int( calendar.timegm(obj.timetuple()) * 1000 + obj.microsecond / 1000 ) return millis def configure_taxii_client_auth(tclient, cert=None, key=None, username=None, password=None): tls_auth = (cert and key) basic_auth = (username and password) if tls_auth and basic_auth: tclient.set_auth_type(HttpClient.AUTH_CERT_BASIC) tclient.set_auth_credentials(dict( key_file = key, cert_file = cert, username = username, password = password )) elif tls_auth: tclient.set_auth_type(HttpClient.AUTH_CERT) tclient.set_auth_credentials(dict( key_file = key, cert_file = cert )) elif basic_auth: tclient.set_auth_type(HttpClient.AUTH_BASIC) tclient.set_auth_credentials(dict( username = username, password = password )) return tclient class DatetimeJSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime): return date_to_ts(obj) else: return JSONEncoder.default(self, obj) AbstractContentBlock = namedtuple('AbstractContentBlock', ['content', 'binding', 'subtypes', 'timestamp', 'source', 'sink_collection', 'source_collection']) class ContentBlock(AbstractContentBlock): def to_json(self): return json.dumps(self._asdict(), cls=DatetimeJSONEncoder) def extract_content(response, source=None, source_collection=None, sink_collection=None): for block in response.content_blocks: if isinstance(block, ContentBlock10): yield ContentBlock( content = block.content, binding = block.content_binding, timestamp = block.timestamp_label, subtypes = [], source = source, source_collection = source_collection, sink_collection = sink_collection ) else: yield ContentBlock( content = block.content, binding = block.content_binding.binding_id, timestamp = block.timestamp_label, subtypes = block.content_binding.subtype_ids, source = source, source_collection = source_collection, sink_collection = sink_collection )
Python
0.000001
bfa66827e5afd175c15640b1678fbba347009953
Fix unit tests
python/test/_utils.py
python/test/_utils.py
from python.ServerGateway import DwebGatewayHTTPRequestHandler def _processurl(url, verbose, headers={}, **kwargs): # Simulates HTTP Server process - wont work for all methods args = url.split('/') method = args.pop(0) DwebGatewayHTTPRequestHandler.headers = headers # This is a kludge, put headers on class, method expects an instance. f = getattr(DwebGatewayHTTPRequestHandler, method) assert f namespace = args.pop(0) if verbose: kwargs["verbose"] = True res = f(DwebGatewayHTTPRequestHandler, namespace, *args, **kwargs) return res
from python.ServerGateway import DwebGatewayHTTPRequestHandler def _processurl(url, verbose, **kwargs): # Simulates HTTP Server process - wont work for all methods args = url.split('/') method = args.pop(0) f = getattr(DwebGatewayHTTPRequestHandler, method) assert f namespace = args.pop(0) if verbose: kwargs["verbose"] = True res = f(DwebGatewayHTTPRequestHandler, namespace, *args, **kwargs) return res
Python
0.000005
c7e9ea888bbbcef9e7ae29340c45e9aaf211d1da
Fix tests
tests/travis.py
tests/travis.py
import os os.environ['QT_API'] = os.environ['USE_QT_API'].lower() from qtpy import QtCore, QtGui, QtWidgets print('Qt version:%s' % QtCore.__version__) print(QtCore.QEvent) print(QtGui.QPainter) print(QtWidgets.QWidget)
import os os.environ['QT_API'] = os.environ['USE_QT_API'] from qtpy import QtCore, QtGui, QtWidgets print('Qt version:%s' % QtCore.__version__) print(QtCore.QEvent) print(QtGui.QPainter) print(QtWidgets.QWidget)
Python
0.000103
efac3c253dcd71be2c6510b5025ddedbb9a7358e
work when there's no RAVEN_CONFIG
temba/temba_celery.py
temba/temba_celery.py
from __future__ import absolute_import, unicode_literals import celery import os import raven import sys from django.conf import settings from raven.contrib.celery import register_signal, register_logger_signal # set the default Django settings module for the 'celery' program. os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'temba.settings') app = celery.Celery('temba') app.config_from_object('django.conf:settings') app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) # register raven if configured raven_config = getattr(settings, 'RAVEN_CONFIG', None) if raven_config: client = raven.Client(settings.RAVEN_CONFIG['dsn']) register_logger_signal(client) register_signal(client) @app.task(bind=True) def debug_task(self): # pragma: needs cover print('Request: {0!r}'.format(self.request)) # this is needed to simulate CELERY_ALWAYS_EAGER for plain 'send' tasks if 'test' in sys.argv or getattr(settings, 'CELERY_ALWAYS_EAGER', False): from celery import current_app def send_task(name, args=(), kwargs={}, **opts): # pragma: needs cover task = current_app.tasks[name] return task.apply(args, kwargs, **opts) current_app.send_task = send_task
from __future__ import absolute_import, unicode_literals import celery import os import raven import sys from django.conf import settings from raven.contrib.celery import register_signal, register_logger_signal # set the default Django settings module for the 'celery' program. os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'temba.settings') app = celery.Celery('temba') app.config_from_object('django.conf:settings') app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) client = raven.Client(settings.RAVEN_CONFIG['dsn']) register_logger_signal(client) register_signal(client) @app.task(bind=True) def debug_task(self): # pragma: needs cover print('Request: {0!r}'.format(self.request)) # this is needed to simulate CELERY_ALWAYS_EAGER for plain 'send' tasks if 'test' in sys.argv or getattr(settings, 'CELERY_ALWAYS_EAGER', False): from celery import current_app def send_task(name, args=(), kwargs={}, **opts): # pragma: needs cover task = current_app.tasks[name] return task.apply(args, kwargs, **opts) current_app.send_task = send_task
Python
0.000004
a3e8c27be953194df2d1b84c5c35b4b6d56f9268
Use common functions. Add deep sleep logic
temp-sensor03/main.py
temp-sensor03/main.py
import machine from ds18x20 import DS18X20 import onewire import time import ujson import urequests import mybuddy #Safetynet during development #Give enough time to delete file before execution for x in range (5): print (".",end='') time.sleep (1) print ("") def get_wifi_config(): keystext = open("wifi.json").read() wificonfig = ujson.loads(keystext) return wificonfig def setwifimode(stationmode=False,apmode=False): import network sta = network.WLAN(network.STA_IF) # create station interface ap = network.WLAN(network.AP_IF) sta.active(stationmode) ap.active(apmode) return [sta,ap] def connecttonetwork(retry = 5): import network wlan = network.WLAN(network.STA_IF) wlan.active(True) wificonfig = get_wifi_config() wlan.connect(wificonfig['ssid'], wificonfig['password']) for x in range (retry): if wlan.isconnected(): break time.sleep(5) if wlan.isconnected(): print(wlan.ifconfig()) return wlan.isconnected() def wifioffdeepsleep(sleepinterval_seconds): setwifimode (False, False) mybuddy.deepsleep(sleepinterval_seconds *1000) def posttocloud(temperature): keystext = open("sparkfun_keys.json").read() keys = ujson.loads(keystext) params = {} params['temp'] = "{:02.1f}".format(round(temperature,1)) params['private_key'] = keys['privateKey'] #data.sparkfun doesn't support putting data into the POST Body. #We had to add the data to the query string #Copied the Dirty hack from #https://github.com/matze/python-phant/blob/24edb12a449b87700a4f736e43a5415b1d021823/phant/__init__.py payload_str = "&".join("%s=%s" % (k, v) for k, v in params.items()) url = keys['inputUrl'] + "?" + payload_str resp = urequests.request("POST", url) print (resp.text) if __name__ == "__main__": #Put things here which can be done before needing wifi #Get the last run time from RTC memory resetcause = machine.reset_cause() rtcdata = None #Try to read save data from memory _rtc = machine.RTC() memorystring = _rtc.memory() if len(memorystring) == 0: print("No Data in RTC") else: try: import json print ("Memory Data string %s"%(memorystring)) rtcdata = json.loads(memorystring) except ValueError: print ("Error parsing RTC data") rtcdata = None if rtcdata is None: rtcdata = {} #Connect to Network if not connecttonetwork(): print ("No connection to wifi") wifioffdeepsleep(15*60) else: if not mybuddy.have_internet(): #No internet. Sleep and retry later print ("No connection to internet") wifioffdeepsleep(5*60) #Flow comes here only when we have wifi try: mybuddy.setntptime(10) except: print ("Error setting NTP Time. Going to sleep") wifioffdeepsleep(2*60) rtcdata['ntptime'] = time.time() #Micropython has no timezone support. Timezone is always UTC. #Bad Hack. #Add the Delta for India Time localtime = time.localtime(rtcdata['ntptime']+ 19800) p = machine.Pin(2) # Data Line is on GPIO2 aka D4 ow = onewire.OneWire(p) ds = DS18X20(ow) lstrom = ds.scan() #Assuming we have only 1 device connected rom = lstrom[0] ds.convert_temp() time.sleep_ms(750) temperature = round(float(ds.read_temp(rom)),1) #print("Temperature: {:02.1f}".format(temperature)) posttocloud(temperature) if True: import json _rtc = machine.RTC() datastring = json.dumps(rtcdata) print("Saving Data in RTC %s"%(datastring)) _rtc.memory(datastring) time.sleep (2) sleepinterval_seconds = 5 * 60 nextcheck = rtcdata['ntptime'] + (sleepinterval_seconds - rtcdata['ntptime'] % sleepinterval_seconds) sleepinterval_seconds = nextcheck - rtcdata['ntptime'] wifioffdeepsleep(sleepinterval_seconds)
import machine from ds18x20 import DS18X20 import onewire import time import ujson import urequests def deepsleep(): # configure RTC.ALARM0 to be able to wake the device rtc = machine.RTC() rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP) # set RTC.ALARM0 to fire after some time. Time is given in milliseconds here rtc.alarm(rtc.ALARM0, 15*60*1000) #Make sure you have GPIO16 connected RST to wake from deepSleep. # put the device to sleep print ("Going into Sleep now") machine.deepsleep() def posttocloud(temperature): keystext = open("sparkfun_keys.json").read() keys = ujson.loads(keystext) params = {} params['temp'] = "{:02.1f}".format(round(temperature,1)) params['private_key'] = keys['privateKey'] #data.sparkfun doesn't support putting data into the POST Body. #We had to add the data to the query string #Copied the Dirty hack from #https://github.com/matze/python-phant/blob/24edb12a449b87700a4f736e43a5415b1d021823/phant/__init__.py payload_str = "&".join("%s=%s" % (k, v) for k, v in params.items()) url = keys['inputUrl'] + "?" + payload_str resp = urequests.request("POST", url) print (resp.text) if __name__ == "__main__": import network wlan = network.WLAN(network.STA_IF) while not wlan.isconnected() : time.sleep_ms(1) p = machine.Pin(2) # Data Line is on GPIO2 aka D4 ow = onewire.OneWire(p) ds = DS18X20(ow) lstrom = ds.scan() #Assuming we have only 1 device connected rom = lstrom[0] ds.convert_temp() time.sleep_ms(750) temperature = round(float(ds.read_temp(rom)),1) #print("Temperature: {:02.1f}".format(temperature)) posttocloud(temperature) deepsleep()
Python
0.000556
460f218c2ed71a0a7aff5bb3353bca01a4841af1
Update Homework_Week4_CaseStudy2.py
Week4-Case-Studies-Part2/Bird-Migration/Homework_Week4_CaseStudy2.py
Week4-Case-Studies-Part2/Bird-Migration/Homework_Week4_CaseStudy2.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Feb 23 21:46:13 2017 @author: lamahamadeh """ ''' ============================== Case Study 2 - Bird Migration ============================== ''' #In this case study, we will continue taking a look at patterns of flight #for each of the three birds in our dataset. #------------------------------------------------------------------------------ #Exercise 1 #---------- #pandas makes it easy to perform basic operations on groups within a dataframe #without needing to loop through each value in the dataframe. The sample code #shows you how to group the dataframe by birdname and then find the average #speed_2d for each bird. Modify the code to assign the mean altitudes of each #bird into an object called mean_altitudes. #load the dataframe import pandas as pd birddata = pd.read_csv('/Users/Admin/Desktop/bird_tracking.csv') # First, use `groupby` to group up the data. grouped_birds = birddata.groupby("bird_name") # Now operations are performed on each group. mean_speeds = grouped_birds.speed_2d.mean() # The `head` method prints the first 5 lines of each bird. print grouped_birds.head() # Find the mean `altitude` for each bird. # Assign this to `mean_altitudes`. mean_altitudes = grouped_birds.altitude.mean() #------------------------------------------------------------------------------ #Exercise 2 #---------- #In this exercise, we will group the flight times by date and calculate the #mean altitude within that day. Use groupby to group the data by date. # Convert birddata.date_time to the `pd.datetime` format. birddata.date_time = pd.to_datetime(birddata.date_time) # Create a new column of day of observation birddata["date"] = birddata.date_time.dt.date # Check the head of the column. print birddata.date.head() grouped_bydates = birddata.groupby("date") #Calculate the mean altitude per day and store these results as #mean_altitudes_perday. mean_altitudes_perday = grouped_bydates.altitude.mean() #------------------------------------------------------------------------------ #Exercise 3 #---------- #birddata already contains the date column. To find the average speed for each #bird and day, create a new grouped dataframe called grouped_birdday that #groups the data by both bird_name and date. grouped_birdday = birddata.groupby(["bird_name", "date"]) mean_altitudes_perday = grouped_birdday.altitude.mean() # look at the head of `mean_altitudes_perday`. mean_altitudes_perday.head() #------------------------------------------------------------------------------ #Exercise 4 #---------- #Great! Now find the average speed for each bird and day. Store these are three #pandas Series objects – one for each bird. #Use the plotting code provided to plot the average speeds for each bird. import matplotlib.pyplot as plt eric_daily_speed = grouped_birdday.speed_2d.mean()["Eric"] sanne_daily_speed = grouped_birdday.speed_2d.mean()["Sanne"] nico_daily_speed = grouped_birdday.speed_2d.mean()["Nico"] eric_daily_speed.plot(label="Eric") sanne_daily_speed.plot(label="Sanne") nico_daily_speed.plot(label="Nico") plt.legend(loc="upper left") plt.show() #------------------------------------------------------------------------------
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Feb 23 21:46:13 2017 @author: lamahamadeh """ ''' ============================== Case Study 2 - Bird Migration ============================== ''' #In this case study, we will continue taking a look at patterns of flight #for each of the three birds in our dataset. #------------------------------------------------------------------------------ #Exercise 1 #---------- #pandas makes it easy to perform basic operations on groups within a dataframe #without needing to loop through each value in the dataframe. The sample code #shows you how to group the dataframe by birdname and then find the average #speed_2d for each bird. Modify the code to assign the mean altitudes of each #bird into an object called mean_altitudes. #load the dataframe import pandas as pd birddata = pd.read_csv('/Users/Admin/Desktop/bird_tracking.csv') # First, use `groupby` to group up the data. grouped_birds = birddata.groupby("bird_name") # Now operations are performed on each group. mean_speeds = grouped_birds.speed_2d.mean() # The `head` method prints the first 5 lines of each bird. print grouped_birds.head() # Find the mean `altitude` for each bird. # Assign this to `mean_altitudes`. mean_altitudes = grouped_birds.altitude.mean() #------------------------------------------------------------------------------ #Exercise 2 #---------- #In this exercise, we will group the flight times by date and calculate the #mean altitude within that day. Use groupby to group the data by date. # Convert birddata.date_time to the `pd.datetime` format. birddata.date_time = pd.to_datetime(birddata.date_time) # Create a new column of day of observation birddata["date"] = birddata.date_time.dt.date # Check the head of the column. print birddata.date.head() grouped_bydates = birddata.groupby("date") #Calculate the mean altitude per day and store these results as #mean_altitudes_perday. mean_altitudes_perday = grouped_bydates.altitude.mean() #------------------------------------------------------------------------------ #Exercise 3 #---------- #birddata already contains the date column. To find the average speed for each #bird and day, create a new grouped dataframe called grouped_birdday that #groups the data by both bird_name and date. grouped_birdday = birddata.groupby(["bird_name", "date"]) mean_altitudes_perday = grouped_birdday.altitude.mean() # look at the head of `mean_altitudes_perday`. mean_altitudes_perday.head() #------------------------------------------------------------------------------ #Exercise 4 #---------- #Great! Now find the average speed for each bird and day. Store these are three #pandas Series objects – one for each bird. #Use the plotting code provided to plot the average speeds for each bird. import matplotlib.pyplot as plt eric_daily_speed = grouped_birdday.speed_2d.mean()["Eric"] sanne_daily_speed = grouped_birdday.speed_2d.mean()["Sanne"] nico_daily_speed = grouped_birdday.speed_2d.mean()["Nico"] eric_daily_speed.plot(label="Eric") sanne_daily_speed.plot(label="Sanne") nico_daily_speed.plot(label="Nico") plt.legend(loc="upper left") plt.show() #------------------------------------------------------------------------------
Python
0.000001
e9e40dd4d9d5357069261653cd1a432e99e8e1aa
Remove unexpected dummy code
initialize_data.py
initialize_data.py
import pandas import numpy as np from google.cloud import datastore from math import floor import pdb RATING_KIND = 'Rating' MOVIE_KIND = 'Movie' PROJECT_ID = 'cf-mr-service' client = datastore.Client(PROJECT_ID) def load_from_store(): query = client.query(kind=RATING_KIND) result = query.fetch() rating = list(result) read_rating = None for entity in rating: arr = np.fromstring(entity['data_str'], dtype=entity['dtype']).reshape(entity['rows'], entity['cols']) if read_rating is not None: read_rating = np.append(read_rating, arr, axis=0) else: read_rating = arr def save_to_store(): print 'save to store' header = ['user_id', 'item_id', 'rating', 'timestamp'] rating_data = pandas.read_csv('u.data', sep='\t', names=header) n_users = rating_data.user_id.unique().shape[0] n_items = rating_data.item_id.unique().shape[0] print 'Number of users = ' + str(n_users) + ' | Number of movies = ' + str(n_items) user_rating = np.zeros((n_users, n_items), dtype='uint8') for line in rating_data.itertuples(): user_rating[line[1] - 1, line[2] - 1] = line[3] # split_size = int(floor(1048487.0 * 3 / (4 * n_items))) split_size = int(floor(1048487.0 / n_items)) entity_list = [] print 'config split size = ' + str(split_size) config_key = client.key('Config', 'v1.0') entity = client.get(key=config_key) if entity is None: entity = datastore.Entity(key=config_key, exclude_from_indexes=['user_rating_split_size']) entity.update({ 'user_rating_split_size': split_size }) entity_list.append(entity) for i in xrange(0, n_users + 1, split_size): print 'split rating data from ' + str(i) + ' to ' + str(i + split_size) entity = datastore.Entity(key=client.key(RATING_KIND, str(i / split_size)), exclude_from_indexes=['rows', 'cols', 'dtype', 'data_str']) sub_arr = user_rating[i : i + split_size] entity.update({ 'rows': sub_arr.shape[0], 'cols': sub_arr.shape[1], 'dtype': str(sub_arr.dtype), 'data_str': sub_arr.tostring() }) entity_list.append(entity) print 'prepare deleting indexed users' query = client.query(kind='User') query.keys_only() user_keys = [] for user in query.fetch(): print 'users to be delete ' + user.key.name user_keys.append(user.key) with client.transaction(): print 'run transaction' client.put_multi(entity_list) client.delete_multi(user_keys) entity_list = [] print 'load movie info' f = open('u.item') while True: s = f.readline() if not s: break; item_info = s.split('|') entity = datastore.Entity(key=client.key(MOVIE_KIND, str(int(item_info[0]) - 1)), exclude_from_indexes=['title', 'imdb_url']) entity.update({ 'title': item_info[1], 'imdb_url': item_info[4] }) entity_list.append(entity) if (len(entity_list) >= 400): print 'put movie info' client.put_multi(entity_list) entity_list = [] print 'initialization transaction' if __name__ == '__main__': save_to_store() # load_from_store()
import pandas import numpy as np from google.cloud import datastore from math import floor import pdb RATING_KIND = 'Rating' MOVIE_KIND = 'Movie' PROJECT_ID = 'cf-mr-service' client = datastore.Client(PROJECT_ID) def load_from_store(): query = client.query(kind=RATING_KIND) result = query.fetch() rating = list(result) read_rating = None for entity in rating: arr = np.fromstring(entity['data_str'], dtype=entity['dtype']).reshape(entity['rows'], entity['cols']) if read_rating is not None: read_rating = np.append(read_rating, arr, axis=0) else: read_rating = arr def save_to_store(): print 'save to store' header = ['user_id', 'item_id', 'rating', 'timestamp'] rating_data = pandas.read_csv('u.data', sep='\t', names=header) n_users = rating_data.user_id.unique().shape[0] n_items = rating_data.item_id.unique().shape[0] print 'Number of users = ' + str(n_users) + ' | Number of movies = ' + str(n_items) user_rating = np.zeros((n_users, n_items), dtype='uint8') for line in rating_data.itertuples(): user_rating[line[1] - 1, line[2] - 1] = line[3] # split_size = int(floor(1048487.0 * 3 / (4 * n_items))) split_size = int(floor(1048487.0 / n_items)) entity_list = [] print 'config split size = ' + str(split_size) config_key = key=client.key('Config', 'v1.0') entity = client.get(key=config_key) if entity is None: entity = datastore.Entity(key=config_key, exclude_from_indexes=['user_rating_split_size']) entity.update({ 'user_rating_split_size': split_size }) entity_list.append(entity) for i in xrange(0, n_users + 1, split_size): print 'split rating data from ' + str(i) + ' to ' + str(i + split_size) entity = datastore.Entity(key=client.key(RATING_KIND, str(i / split_size)), exclude_from_indexes=['rows', 'cols', 'dtype', 'data_str']) sub_arr = user_rating[i : i + split_size] entity.update({ 'rows': sub_arr.shape[0], 'cols': sub_arr.shape[1], 'dtype': str(sub_arr.dtype), 'data_str': sub_arr.tostring() }) entity_list.append(entity) print 'prepare deleting indexed users' query = client.query(kind='User') query.keys_only() user_keys = [] for user in query.fetch(): print 'users to be delete ' + user.key.name user_keys.append(user.key) with client.transaction(): print 'run transaction' client.put_multi(entity_list) client.delete_multi(user_keys) entity_list = [] print 'load movie info' f = open('u.item') while True: s = f.readline() if not s: break; item_info = s.split('|') entity = datastore.Entity(key=client.key(MOVIE_KIND, str(int(item_info[0]) - 1)), exclude_from_indexes=['title', 'imdb_url']) entity.update({ 'title': item_info[1], 'imdb_url': item_info[4] }) entity_list.append(entity) if (len(entity_list) >= 400): print 'put movie info' client.put_multi(entity_list) entity_list = [] print 'initialization transaction' if __name__ == '__main__': save_to_store() # load_from_store()
Python
0.000037
6cb9a09ee92f3be6a5d807e9c5af41bac4796435
Remove loading of env file in development
{{cookiecutter.repo_name}}/config/settings/base.py
{{cookiecutter.repo_name}}/config/settings/base.py
# -*- coding: utf-8 -*- """ Django settings for {{ cookiecutter.project_name }}. For more information on this file, see https://docs.djangoproject.com/en/stable/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/stable/ref/settings/ """ import environ # Django-environ for using 12-factor environment variables. # http://12factor.net/) env = environ.Env() # Build paths inside the project like this: str(BASE_DIR.path('directory')) BASE_DIR = environ.Path(__file__) - 3 # Secret key from environment variables # https://docs.djangoproject.com/en/stable/ref/settings/#secret-key SECRET_KEY = env('DJANGO_SECRET_KEY', default='this_is_a_secret') # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Third party app # Own apps ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ str(BASE_DIR.path('templates')), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.static', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' # Database # https://docs.djangoproject.com/en/stable/ref/settings/#databases # Get databases from DATABASE_URL. # https://django-environ.readthedocs.org/en/latest/ DATABASES = { 'default': env.db('DATABASE_URL', default='postgres:///{{ cookiecutter.repo_name }}'), } # Internationalization # https://docs.djangoproject.com/en/stable/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' # Example: Europe/Oslo USE_I18N = False USE_L10N = True USE_TZ = True # Managers # https://docs.djangoproject.com/en/stable/ref/settings/#managers ADMINS = ( ("""{{ cookiecutter.author_name }}""", '{{ cookiecutter.email }}'), ) MANAGERS = ADMINS # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/stable/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = ( str(BASE_DIR.path('static')), ) STATIC_ROOT = 'staticfiles'
# -*- coding: utf-8 -*- """ Django settings for {{ cookiecutter.project_name }}. For more information on this file, see https://docs.djangoproject.com/en/stable/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/stable/ref/settings/ """ from os.path import dirname, join, exists import environ # Django-environ for using 12-factor environment variables. # http://12factor.net/) env = environ.Env() # Load environment files from file in development env_file = join(dirname(__file__), 'development.env') if exists(env_file): environ.Env.read_env(str(env_file)) # Build paths inside the project like this: join(BASE_DIR, "directory") BASE_DIR = dirname(dirname(dirname(__file__))) # Secret key from environment variables # https://docs.djangoproject.com/en/stable/ref/settings/#secret-key SECRET_KEY = env('DJANGO_SECRET_KEY') # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Third party app # Own apps ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ join(BASE_DIR, 'templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.static', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' # Database # https://docs.djangoproject.com/en/stable/ref/settings/#databases # Get databases from DATABASE_URL. # https://django-environ.readthedocs.org/en/latest/ DATABASES = { 'default': env.db(), } # Internationalization # https://docs.djangoproject.com/en/stable/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' # Example: Europe/Oslo USE_I18N = False USE_L10N = True USE_TZ = True # Managers # https://docs.djangoproject.com/en/stable/ref/settings/#managers ADMINS = ( ("""{{ cookiecutter.author_name }}""", '{{ cookiecutter.email }}'), ) MANAGERS = ADMINS # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/stable/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = [join(BASE_DIR, 'static')] STATIC_ROOT = 'staticfiles'
Python
0
c90fce44f30398fef0c20ec08f761ae19951308a
Delete unused pipeline settings
{{cookiecutter.repo_name}}/src/settings/project.py
{{cookiecutter.repo_name}}/src/settings/project.py
# -*- coding: utf-8 -*- """ Project settings for {{cookiecutter.project_name}} Author : {{cookiecutter.author_name}} <{{cookiecutter.email}}> """ from defaults import * from getenv import env INSTALLED_APPS += ( 'applications.front', ) GRAPPELLI_ADMIN_TITLE = "Admin"
# -*- coding: utf-8 -*- """ Project settings for {{cookiecutter.project_name}} Author : {{cookiecutter.author_name}} <{{cookiecutter.email}}> """ from defaults import * from getenv import env INSTALLED_APPS += ( 'applications.front', ) GRAPPELLI_ADMIN_TITLE = "Admin" PIPELINE_CSS = { 'stylesheets': { 'source_filenames': ( ), 'output_filename': 'stylesheets.css', 'extra_context': { 'media': 'screen,projection', }, }, } PIPELINE_JS = { 'scripts': { 'source_filenames': ( ), 'output_filename': 'scripts.js', } }
Python
0.000001
c83e8d7d83b9395b5b0428dcac2909b8d6762fe4
make Tool work without invoke scripts again
hublib/rappture/tool.py
hublib/rappture/tool.py
from __future__ import print_function from .node import Node import numpy as np from lxml import etree as ET import os import subprocess import sys from .rappture import RapXML class Tool(RapXML): def __init__(self, tool): """ tool can be any of the following: - Path to a tool.xml file. - Name of a published tool. The current version will be run. """ dirname, xml = os.path.split(tool) if dirname == "": if xml != "tool.xml": # must be tool name dirname = "/apps/%s/current" % xml xml = dirname + "/rappture/tool.xml" else: dirname = os.getcwd() else: xml = tool dirname = os.path.abspath(dirname) xml = os.path.abspath(xml) if not os.path.isfile(xml): raise ValueError("tool must be a toolname or path to a tool.xml file.") sessdir = os.environ['SESSIONDIR'] invoke_file = os.path.join(dirname, 'middleware', 'invoke') if os.path.isfile(invoke_file): self.invoke_file = invoke_file else: self.invoke_file = os.path.join(sessdir, 'invoke_%s' % os.getpid()) with open(self.invoke_file, 'w') as f: print('#!/bin/sh', file=f) print('/usr/bin/invoke_app -T %s -C rappture' % dirname, file=f) subprocess.call('chmod +x %s' % self.invoke_file, shell=True) self.tmp_name = os.path.join(sessdir, 'tool_driver_%s.xml' % os.getpid()) self.run_name = "" self.toolparameters_name = os.path.join(sessdir, 'driver_%s.hz' % os.getpid()) self.rappturestatus_name = os.path.join(sessdir, 'rappture.status') self.dirname = dirname self.tool = xml RapXML.__init__(self, xml) def run(self, verbose=True): # print("Writing", self.tmp_name) with open(self.tmp_name, 'w') as f: f.write(str(self.xml(pretty=False, header=True))) with open(self.toolparameters_name, 'w') as f: f.write("file(execute):%s" % (self.tmp_name)) cmd = "TOOL_PARAMETERS=%s %s" % (self.toolparameters_name, self.invoke_file) if verbose: print("cmd=", cmd) cwd = os.getcwd() os.chdir(os.environ['SESSIONDIR']) try: ret = subprocess.call(cmd, shell=True) if ret: print('Error: "%s"' % cmd, file=sys.stderr) if ret < 0: print("Terminated by signal", -ret, file=sys.stderr) else: print("Returncode", ret, file=sys.stderr) except OSError as e: print('Error: "%s"' % cmd, file=sys.stderr) print("Failed:", e, file=sys.stderr) sys.exit(1) with(open(self.rappturestatus_name, 'r')) as f: statusData = f.readlines() for record in statusData: if 'output saved in' in record: self.run_name = record.strip().split()[-1] break if self.run_name: self.tree = ET.parse(self.run_name) os.chdir(cwd)
from __future__ import print_function from .node import Node import numpy as np from lxml import etree as ET import os import subprocess import sys from .rappture import RapXML class Tool(RapXML): def __init__(self, tool): """ tool can be any of the following: - Path to a tool.xml file. - Name of a published tool. The current version will be run. """ dirname, xml = os.path.split(tool) if dirname == "": if xml != "tool.xml": # must be tool name dirname = "/apps/%s/current" % xml xml = dirname + "/rappture/tool.xml" else: dirname = os.getcwd() else: xml = tool dirname = os.path.abspath(os.path.join(dirname, '..')) xml = os.path.abspath(xml) if not os.path.isfile(xml): raise ValueError("tool must be a toolname or path to a tool.xml file.") invoke_file = os.path.join(dirname, 'middleware', 'invoke') if os.path.isfile(invoke_file): self.invoke_file = invoke_file sessdir = os.environ['SESSIONDIR'] self.tmp_name = os.path.join(sessdir, 'tool_driver_%s.xml' % os.getpid()) self.run_name = "" self.toolparameters_name = os.path.join(sessdir, 'driver_%s.hz' % os.getpid()) self.rappturestatus_name = os.path.join(sessdir, 'rappture.status') self.fname = xml self.tree = ET.parse(xml) self.path = '' def run(self, verbose=True): # print("Writing", self.tmp_name) with open(self.tmp_name, 'w') as f: f.write(str(self.xml(pretty=False, header=True))) with open(self.toolparameters_name, 'w') as f: f.write("file(execute):%s" % (self.tmp_name)) cmd = "TOOL_PARAMETERS=%s %s" % (self.toolparameters_name,self.invoke_file) if verbose: print("cmd=", cmd) cwd = os.getcwd() os.chdir(os.environ['SESSIONDIR']) try: ret = subprocess.call(cmd, shell=True) if ret: print('Error: "%s"' % cmd, file=sys.stderr) if ret < 0: print("Terminated by signal", -ret, file=sys.stderr) else: print("Returncode", ret, file=sys.stderr) except OSError as e: print('Error: "%s"' % cmd, file=sys.stderr) print("Failed:", e, file=sys.stderr) sys.exit(1) with(open(self.rappturestatus_name, 'r')) as f: statusData = f.readlines() for record in statusData: if 'output saved in' in record: self.run_name = record.strip().split()[-1] break if self.run_name: self.tree = ET.parse(self.run_name) os.chdir(cwd)
Python
0
c06ceb1c3f06bb08e9adb84d82d33325e54ec507
Update accordion.py
cmsplugin_cascade/bootstrap4/accordion.py
cmsplugin_cascade/bootstrap4/accordion.py
from django.forms import widgets, BooleanField, CharField from django.forms.fields import IntegerField from django.utils.translation import ungettext_lazy, ugettext_lazy as _ from django import VERSION as DJANGO_VERSION if DJANGO_VERSION < (2, 0): from django.utils.text import Truncator, mark_safe else: from django.utils.safestring import Truncator, mark_safe from django.utils.html import escape from entangled.forms import EntangledModelFormMixin from cms.plugin_pool import plugin_pool from cmsplugin_cascade.forms import ManageChildrenFormMixin from cmsplugin_cascade.plugin_base import TransparentWrapper, TransparentContainer from cmsplugin_cascade.widgets import NumberInputWidget from .plugin_base import BootstrapPluginBase class AccordionFormMixin(ManageChildrenFormMixin, EntangledModelFormMixin): num_children = IntegerField( min_value=1, initial=1, widget=NumberInputWidget(attrs={'size': '3', 'style': 'width: 5em !important;'}), label=_("Groups"), help_text=_("Number of groups for this accordion."), ) close_others = BooleanField( label=_("Close others"), initial=True, required=False, help_text=_("Open only one card at a time.") ) first_is_open = BooleanField( label=_("First open"), initial=True, required=False, help_text=_("Start with the first card open.") ) class Meta: untangled_fields = ['num_children'] entangled_fields = {'glossary': ['close_others', 'first_is_open']} class BootstrapAccordionPlugin(TransparentWrapper, BootstrapPluginBase): name = _("Accordion") default_css_class = 'accordion' require_parent = True parent_classes = ['BootstrapColumnPlugin'] direct_child_classes = ['BootstrapAccordionGroupPlugin'] allow_children = True form = AccordionFormMixin render_template = 'cascade/bootstrap4/{}accordion.html' @classmethod def get_identifier(cls, obj): num_cards = obj.get_num_children() content = ungettext_lazy('with {0} card', 'with {0} cards', num_cards).format(num_cards) return mark_safe(content) def render(self, context, instance, placeholder): context = self.super(BootstrapAccordionPlugin, self).render(context, instance, placeholder) context.update({ 'close_others': instance.glossary.get('close_others', True), 'first_is_open': instance.glossary.get('first_is_open', True), }) return context def save_model(self, request, obj, form, change): wanted_children = int(form.cleaned_data.get('num_children')) super().save_model(request, obj, form, change) self.extend_children(obj, wanted_children, BootstrapAccordionGroupPlugin) plugin_pool.register_plugin(BootstrapAccordionPlugin) class AccordionGroupFormMixin(EntangledModelFormMixin): heading = CharField( label=_("Heading"), widget=widgets.TextInput(attrs={'size': 80}), ) body_padding = BooleanField( label=_("Body with padding"), initial=True, required=False, help_text=_("Add standard padding to card body."), ) class Meta: entangled_fields = {'glossary': ['heading', 'body_padding']} def clean_heading(self): return escape(self.cleaned_data['heading']) class BootstrapAccordionGroupPlugin(TransparentContainer, BootstrapPluginBase): name = _("Accordion Group") direct_parent_classes = parent_classes = ['BootstrapAccordionPlugin'] render_template = 'cascade/generic/naked.html' require_parent = True form = AccordionGroupFormMixin alien_child_classes = True @classmethod def get_identifier(cls, instance): heading = instance.glossary.get('heading', '') return Truncator(heading).words(3, truncate=' ...') def render(self, context, instance, placeholder): context = self.super(BootstrapAccordionGroupPlugin, self).render(context, instance, placeholder) context.update({ 'heading': mark_safe(instance.glossary.get('heading', '')), 'no_body_padding': not instance.glossary.get('body_padding', True), }) return context plugin_pool.register_plugin(BootstrapAccordionGroupPlugin)
from django.forms import widgets, BooleanField, CharField from django.forms.fields import IntegerField from django.utils.translation import ungettext_lazy, ugettext_lazy as _ from django.utils.text import Truncator, mark_safe from django.utils.html import escape from entangled.forms import EntangledModelFormMixin from cms.plugin_pool import plugin_pool from cmsplugin_cascade.forms import ManageChildrenFormMixin from cmsplugin_cascade.plugin_base import TransparentWrapper, TransparentContainer from cmsplugin_cascade.widgets import NumberInputWidget from .plugin_base import BootstrapPluginBase class AccordionFormMixin(ManageChildrenFormMixin, EntangledModelFormMixin): num_children = IntegerField( min_value=1, initial=1, widget=NumberInputWidget(attrs={'size': '3', 'style': 'width: 5em !important;'}), label=_("Groups"), help_text=_("Number of groups for this accordion."), ) close_others = BooleanField( label=_("Close others"), initial=True, required=False, help_text=_("Open only one card at a time.") ) first_is_open = BooleanField( label=_("First open"), initial=True, required=False, help_text=_("Start with the first card open.") ) class Meta: untangled_fields = ['num_children'] entangled_fields = {'glossary': ['close_others', 'first_is_open']} class BootstrapAccordionPlugin(TransparentWrapper, BootstrapPluginBase): name = _("Accordion") default_css_class = 'accordion' require_parent = True parent_classes = ['BootstrapColumnPlugin'] direct_child_classes = ['BootstrapAccordionGroupPlugin'] allow_children = True form = AccordionFormMixin render_template = 'cascade/bootstrap4/{}accordion.html' @classmethod def get_identifier(cls, obj): num_cards = obj.get_num_children() content = ungettext_lazy('with {0} card', 'with {0} cards', num_cards).format(num_cards) return mark_safe(content) def render(self, context, instance, placeholder): context = self.super(BootstrapAccordionPlugin, self).render(context, instance, placeholder) context.update({ 'close_others': instance.glossary.get('close_others', True), 'first_is_open': instance.glossary.get('first_is_open', True), }) return context def save_model(self, request, obj, form, change): wanted_children = int(form.cleaned_data.get('num_children')) super().save_model(request, obj, form, change) self.extend_children(obj, wanted_children, BootstrapAccordionGroupPlugin) plugin_pool.register_plugin(BootstrapAccordionPlugin) class AccordionGroupFormMixin(EntangledModelFormMixin): heading = CharField( label=_("Heading"), widget=widgets.TextInput(attrs={'size': 80}), ) body_padding = BooleanField( label=_("Body with padding"), initial=True, required=False, help_text=_("Add standard padding to card body."), ) class Meta: entangled_fields = {'glossary': ['heading', 'body_padding']} def clean_heading(self): return escape(self.cleaned_data['heading']) class BootstrapAccordionGroupPlugin(TransparentContainer, BootstrapPluginBase): name = _("Accordion Group") direct_parent_classes = parent_classes = ['BootstrapAccordionPlugin'] render_template = 'cascade/generic/naked.html' require_parent = True form = AccordionGroupFormMixin alien_child_classes = True @classmethod def get_identifier(cls, instance): heading = instance.glossary.get('heading', '') return Truncator(heading).words(3, truncate=' ...') def render(self, context, instance, placeholder): context = self.super(BootstrapAccordionGroupPlugin, self).render(context, instance, placeholder) context.update({ 'heading': mark_safe(instance.glossary.get('heading', '')), 'no_body_padding': not instance.glossary.get('body_padding', True), }) return context plugin_pool.register_plugin(BootstrapAccordionGroupPlugin)
Python
0.000001